Merge branch 'for-2.6.26' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.26' of git://git.kernel.dk/linux-2.6-block:
  block: fix blk_register_queue() return value
  block: fix memory hotplug and bouncing in block layer
  block: replace remaining __FUNCTION__ occurrences
  Kconfig: clean up block/Kconfig help descriptions
  cciss: fix warning oops on rmmod of driver
  cciss: Fix race between disk-adding code and interrupt handler
  block: move the padding adjustment to blk_rq_map_sg
  block: add bio_copy_user_iov support to blk_rq_map_user_iov
  block: convert bio_copy_user to bio_copy_user_iov
  loop: manage partitions in disk image
  cdrom: use kmalloced buffers instead of buffers on stack
  cdrom: make unregister_cdrom() return void
  cdrom: use list_head for cdrom_device_info list
  cdrom: protect cdrom_device_info list by mutex
  cdrom: cleanup hardcoded error-code
  cdrom: remove ifdef CONFIG_SYSCTL
This commit is contained in:
Linus Torvalds 2008-04-21 16:03:40 -07:00
commit 548453fd10
18 changed files with 461 additions and 238 deletions

View file

@ -777,7 +777,7 @@ Note that a driver must have one static structure, $<device>_dops$, while
it may have as many structures $<device>_info$ as there are minor devices
active. $Register_cdrom()$ builds a linked list from these.
\subsection{$Int\ unregister_cdrom(struct\ cdrom_device_info * cdi)$}
\subsection{$Void\ unregister_cdrom(struct\ cdrom_device_info * cdi)$}
Unregistering device $cdi$ with minor number $MINOR(cdi\to dev)$ removes
the minor device from the list. If it was the last registered minor for

View file

@ -5,14 +5,18 @@ menuconfig BLOCK
bool "Enable the block layer" if EMBEDDED
default y
help
This permits the block layer to be removed from the kernel if it's not
needed (on some embedded devices for example). If this option is
disabled, then blockdev files will become unusable and some
filesystems (such as ext3) will become unavailable.
Provide block layer support for the kernel.
This option will also disable SCSI character devices and USB storage
since they make use of various block layer definitions and
facilities.
Disable this option to remove the block layer support from the
kernel. This may be useful for embedded devices.
If this option is disabled:
- block device files will become unusable
- some filesystems (such as ext3) will become unavailable.
Also, SCSI character devices and USB storage will be disabled since
they make use of various block layer definitions and facilities.
Say Y here unless you know you really don't want to mount disks and
suchlike.
@ -23,9 +27,20 @@ config LBD
bool "Support for Large Block Devices"
depends on !64BIT
help
Say Y here if you want to attach large (bigger than 2TB) discs to
your machine, or if you want to have a raid or loopback device
bigger than 2TB. Otherwise say N.
Enable block devices of size 2TB and larger.
This option is required to support the full capacity of large
(2TB+) block devices, including RAID, disk, Network Block Device,
Logical Volume Manager (LVM) and loopback.
For example, RAID devices are frequently bigger than the capacity
of the largest individual hard drive.
This option is not required if you have individual disk drives
which total 2TB+ and you are not aggregating the capacity into
a large block device (e.g. using RAID or LVM).
If unsure, say N.
config BLK_DEV_IO_TRACE
bool "Support for tracing block io actions"
@ -33,19 +48,21 @@ config BLK_DEV_IO_TRACE
select RELAY
select DEBUG_FS
help
Say Y here, if you want to be able to trace the block layer actions
Say Y here if you want to be able to trace the block layer actions
on a given queue. Tracing allows you to see any traffic happening
on a block device queue. For more information (and the user space
support tools needed), fetch the blktrace app from:
on a block device queue. For more information (and the userspace
support tools needed), fetch the blktrace tools from:
git://git.kernel.dk/blktrace.git
If unsure, say N.
config LSF
bool "Support for Large Single Files"
depends on !64BIT
help
Say Y here if you want to be able to handle very large files (bigger
than 2TB), otherwise say N.
Say Y here if you want to be able to handle very large files (2TB
and larger), otherwise say N.
If unsure, say Y.
@ -53,14 +70,16 @@ config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)"
depends on EXPERIMENTAL
---help---
Saying Y here will enable generic SG (SCSI generic) v4 support
for any block device.
Saying Y here will enable generic SG (SCSI generic) v4 support
for any block device.
Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
can handle complicated SCSI commands: tagged variable length cdbs
with bidirectional data transfers and generic request/response
protocols (e.g. Task Management Functions and SMP in Serial
Attached SCSI).
Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
can handle complicated SCSI commands: tagged variable length cdbs
with bidirectional data transfers and generic request/response
protocols (e.g. Task Management Functions and SMP in Serial
Attached SCSI).
If unsure, say N.
endif # BLOCK

View file

@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#include "blk.h"
@ -140,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
ubuf += ret;
}
/*
* __blk_rq_map_user() copies the buffers if starting address
* or length isn't aligned to dma_pad_mask. As the copied
* buffer is always page aligned, we know that there's enough
* room for padding. Extend the last bio and update
* rq->data_len accordingly.
*
* On unmap, bio_uncopy_user() will use unmodified
* bio_map_data pointed to by bio->bi_private.
*/
if (len & q->dma_pad_mask) {
unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
struct bio *tail = rq->biotail;
tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
tail->bi_size += pad_len;
rq->extra_len += pad_len;
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
rq->buffer = rq->data = NULL;
return 0;
@ -194,15 +178,26 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;
int i, read = rq_data_dir(rq) == READ;
int unaligned = 0;
if (!iov || iov_count <= 0)
return -EINVAL;
/* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */
bio = bio_map_user_iov(q, NULL, iov, iov_count,
rq_data_dir(rq) == READ);
for (i = 0; i < iov_count; i++) {
unsigned long uaddr = (unsigned long)iov[i].iov_base;
if (uaddr & queue_dma_alignment(q)) {
unaligned = 1;
break;
}
}
if (unaligned || (q->dma_pad_mask & len))
bio = bio_copy_user_iov(q, iov, iov_count, read);
else
bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
if (IS_ERR(bio))
return PTR_ERR(bio);
@ -212,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return -EINVAL;
}
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;

View file

@ -220,6 +220,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
bvprv = bvec;
} /* segments in rq */
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(rq->data_len & q->dma_pad_mask)) {
unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
sg->length += pad_len;
rq->extra_len += pad_len;
}
if (q->dma_drain_size && q->dma_drain_needed(rq)) {
if (rq->cmd_flags & REQ_RW)
memset(q->dma_drain_buffer, 0, q->dma_drain_size);

View file

@ -276,9 +276,12 @@ int blk_register_queue(struct gendisk *disk)
struct request_queue *q = disk->queue;
if (!q || !q->request_fn)
if (WARN_ON(!q))
return -ENXIO;
if (!q->request_fn)
return 0;
ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
"%s", "queue");
if (ret < 0)
@ -300,7 +303,10 @@ void blk_unregister_queue(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
if (q && q->request_fn) {
if (WARN_ON(!q))
return;
if (q->request_fn) {
elv_unregister_queue(q);
kobject_uevent(&q->kobj, KOBJ_REMOVE);

View file

@ -1349,6 +1349,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
h->drv[drv_index].busy_configuring = 1;
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
/* deregister_disk sets h->drv[drv_index].queue = NULL */
/* which keeps the interrupt handler from starting */
/* the queue. */
ret = deregister_disk(h->gendisk[drv_index],
&h->drv[drv_index], 0);
h->drv[drv_index].busy_configuring = 0;
@ -1419,6 +1423,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
blk_queue_hardsect_size(disk->queue,
hba[ctlr]->drv[drv_index].block_size);
/* Make sure all queue data is written out before */
/* setting h->drv[drv_index].queue, as setting this */
/* allows the interrupt handler to start the queue */
wmb();
h->drv[drv_index].queue = disk->queue;
add_disk(disk);
}
@ -3520,10 +3528,17 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
continue;
blk_queue_hardsect_size(q, drv->block_size);
set_capacity(disk, drv->nr_blocks);
add_disk(disk);
j++;
} while (j <= hba[i]->highest_lun);
/* Make sure all queue data is written out before */
/* interrupt handler, triggered by add_disk, */
/* is allowed to start them. */
wmb();
for (j = 0; j <= hba[i]->highest_lun; j++)
add_disk(hba[i]->gendisk[j]);
return 1;
clean4:

View file

@ -1349,9 +1349,9 @@ cciss_unregister_scsi(int ctlr)
/* set scsi_host to NULL so our detect routine will
find us on register */
sa->scsi_host = NULL;
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
scsi_cmd_stack_free(ctlr);
kfree(sa);
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
}
static int

View file

@ -82,6 +82,9 @@
static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex);
static int max_part;
static int part_shift;
/*
* Transfer functions
*/
@ -692,6 +695,8 @@ static int loop_change_fd(struct loop_device *lo, struct file *lo_file,
goto out_putf;
fput(old_file);
if (max_part > 0)
ioctl_by_bdev(bdev, BLKRRPART, 0);
return 0;
out_putf:
@ -819,6 +824,8 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
}
lo->lo_state = Lo_bound;
wake_up_process(lo->lo_thread);
if (max_part > 0)
ioctl_by_bdev(bdev, BLKRRPART, 0);
return 0;
out_clr:
@ -919,6 +926,8 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
fput(filp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
if (max_part > 0)
ioctl_by_bdev(bdev, BLKRRPART, 0);
return 0;
}
@ -1360,6 +1369,8 @@ static struct block_device_operations lo_fops = {
static int max_loop;
module_param(max_loop, int, 0);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
module_param(max_part, int, 0);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@ -1412,7 +1423,7 @@ static struct loop_device *loop_alloc(int i)
if (!lo->lo_queue)
goto out_free_dev;
disk = lo->lo_disk = alloc_disk(1);
disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
goto out_free_queue;
@ -1422,7 +1433,7 @@ static struct loop_device *loop_alloc(int i)
init_waitqueue_head(&lo->lo_event);
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
disk->first_minor = i;
disk->first_minor = i << part_shift;
disk->fops = &lo_fops;
disk->private_data = lo;
disk->queue = lo->lo_queue;
@ -1502,7 +1513,12 @@ static int __init loop_init(void)
* themselves and have kernel automatically instantiate actual
* device on-demand.
*/
if (max_loop > 1UL << MINORBITS)
part_shift = 0;
if (max_part > 0)
part_shift = fls(max_part);
if (max_loop > 1UL << (MINORBITS - part_shift))
return -EINVAL;
if (max_loop) {
@ -1510,7 +1526,7 @@ static int __init loop_init(void)
range = max_loop;
} else {
nr = 8;
range = 1UL << MINORBITS;
range = 1UL << (MINORBITS - part_shift);
}
if (register_blkdev(LOOP_MAJOR, "loop"))
@ -1549,7 +1565,7 @@ static void __exit loop_exit(void)
unsigned long range;
struct loop_device *lo, *next;
range = max_loop ? max_loop : 1UL << MINORBITS;
range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
loop_del_one(lo);

View file

@ -79,9 +79,9 @@ MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30
/* note: prints function name for you */
#ifdef CARM_DEBUG
#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
#ifdef CARM_VERBOSE_DEBUG
#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
#else
#define VPRINTK(fmt, args...)
#endif /* CARM_VERBOSE_DEBUG */
@ -96,7 +96,7 @@ MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30
#define assert(expr) \
if(unlikely(!(expr))) { \
printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
#expr,__FILE__,__FUNCTION__,__LINE__); \
#expr, __FILE__, __func__, __LINE__); \
}
#endif

View file

@ -360,10 +360,9 @@ static int cdrom_mrw_exit(struct cdrom_device_info *cdi);
static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di);
#ifdef CONFIG_SYSCTL
static void cdrom_sysctl_register(void);
#endif /* CONFIG_SYSCTL */
static struct cdrom_device_info *topCdromPtr;
static LIST_HEAD(cdrom_list);
static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
@ -394,13 +393,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
cdinfo(CD_OPEN, "entering register_cdrom\n");
if (cdo->open == NULL || cdo->release == NULL)
return -2;
return -EINVAL;
if (!banner_printed) {
printk(KERN_INFO "Uniform CD-ROM driver " REVISION "\n");
banner_printed = 1;
#ifdef CONFIG_SYSCTL
cdrom_sysctl_register();
#endif /* CONFIG_SYSCTL */
}
ENSURE(drive_status, CDC_DRIVE_STATUS );
@ -439,35 +436,18 @@ int register_cdrom(struct cdrom_device_info *cdi)
cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
mutex_lock(&cdrom_mutex);
cdi->next = topCdromPtr;
topCdromPtr = cdi;
list_add(&cdi->list, &cdrom_list);
mutex_unlock(&cdrom_mutex);
return 0;
}
#undef ENSURE
int unregister_cdrom(struct cdrom_device_info *unreg)
void unregister_cdrom(struct cdrom_device_info *cdi)
{
struct cdrom_device_info *cdi, *prev;
cdinfo(CD_OPEN, "entering unregister_cdrom\n");
prev = NULL;
mutex_lock(&cdrom_mutex);
cdi = topCdromPtr;
while (cdi && cdi != unreg) {
prev = cdi;
cdi = cdi->next;
}
if (cdi == NULL) {
mutex_unlock(&cdrom_mutex);
return -2;
}
if (prev)
prev->next = cdi->next;
else
topCdromPtr = cdi->next;
list_del(&cdi->list);
mutex_unlock(&cdrom_mutex);
if (cdi->exit)
@ -475,34 +455,43 @@ int unregister_cdrom(struct cdrom_device_info *unreg)
cdi->ops->n_minors--;
cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
return 0;
}
int cdrom_get_media_event(struct cdrom_device_info *cdi,
struct media_event_desc *med)
{
struct packet_command cgc;
unsigned char buffer[8];
struct event_header *eh = (struct event_header *) buffer;
unsigned char *buffer;
struct event_header *eh;
int ret = 1;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
eh = (struct event_header *)buffer;
init_cdrom_command(&cgc, buffer, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
cgc.cmd[1] = 1; /* IMMED */
cgc.cmd[4] = 1 << 4; /* media event */
cgc.cmd[8] = sizeof(buffer);
cgc.cmd[8] = 8;
cgc.quiet = 1;
if (cdi->ops->generic_packet(cdi, &cgc))
return 1;
goto err;
if (be16_to_cpu(eh->data_len) < sizeof(*med))
return 1;
goto err;
if (eh->nea || eh->notification_class != 0x4)
return 1;
goto err;
memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
return 0;
memcpy(med, buffer + sizeof(*eh), sizeof(*med));
ret = 0;
err:
kfree(buffer);
return ret;
}
/*
@ -512,68 +501,82 @@ int cdrom_get_media_event(struct cdrom_device_info *cdi,
static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[16];
char *buffer;
int ret = 1;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
buffer = kmalloc(16, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.timeout = HZ;
cgc.quiet = 1;
if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) {
cdi->mrw_mode_page = MRW_MODE_PC;
return 0;
ret = 0;
} else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) {
cdi->mrw_mode_page = MRW_MODE_PC_PRE1;
return 0;
ret = 0;
}
return 1;
kfree(buffer);
return ret;
}
static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write)
{
struct packet_command cgc;
struct mrw_feature_desc *mfd;
unsigned char buffer[16];
unsigned char *buffer;
int ret;
*write = 0;
buffer = kmalloc(16, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[3] = CDF_MRW;
cgc.cmd[8] = sizeof(buffer);
cgc.cmd[8] = 16;
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
goto err;
mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)];
if (be16_to_cpu(mfd->feature_code) != CDF_MRW)
return 1;
if (be16_to_cpu(mfd->feature_code) != CDF_MRW) {
ret = 1;
goto err;
}
*write = mfd->write;
if ((ret = cdrom_mrw_probe_pc(cdi))) {
*write = 0;
return ret;
}
return 0;
err:
kfree(buffer);
return ret;
}
static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
{
struct packet_command cgc;
unsigned char buffer[12];
unsigned char *buffer;
int ret;
printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : "");
buffer = kmalloc(12, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/*
* FmtData bit set (bit 4), format type is 1
*/
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_WRITE);
init_cdrom_command(&cgc, buffer, 12, CGC_DATA_WRITE);
cgc.cmd[0] = GPCMD_FORMAT_UNIT;
cgc.cmd[1] = (1 << 4) | 1;
@ -600,6 +603,7 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
if (ret)
printk(KERN_INFO "cdrom: bgformat failed\n");
kfree(buffer);
return ret;
}
@ -659,16 +663,17 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
{
struct packet_command cgc;
struct mode_page_header *mph;
char buffer[16];
char *buffer;
int ret, offset, size;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
buffer = kmalloc(16, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
cgc.buffer = buffer;
cgc.buflen = sizeof(buffer);
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0)))
return ret;
goto err;
mph = (struct mode_page_header *) buffer;
offset = be16_to_cpu(mph->desc_length);
@ -678,55 +683,70 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
cgc.buflen = size;
if ((ret = cdrom_mode_select(cdi, &cgc)))
return ret;
goto err;
printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]);
return 0;
ret = 0;
err:
kfree(buffer);
return ret;
}
static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
struct rwrt_feature_desc *rfd)
{
struct packet_command cgc;
char buffer[24];
char *buffer;
int ret;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
buffer = kmalloc(24, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_cdrom_command(&cgc, buffer, 24, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */
cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */
cgc.cmd[8] = sizeof(buffer); /* often 0x18 */
cgc.cmd[8] = 24; /* often 0x18 */
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
goto err;
memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd));
return 0;
ret = 0;
err:
kfree(buffer);
return ret;
}
static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[16];
char *buffer;
__be16 *feature_code;
int ret;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
buffer = kmalloc(16, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[3] = CDF_HWDM;
cgc.cmd[8] = sizeof(buffer);
cgc.cmd[8] = 16;
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
goto err;
feature_code = (__be16 *) &buffer[sizeof(struct feature_header)];
if (be16_to_cpu(*feature_code) == CDF_HWDM)
return 0;
return 1;
ret = 0;
err:
kfree(buffer);
return ret;
}
@ -817,10 +837,14 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
static int mo_open_write(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[255];
char *buffer;
int ret;
init_cdrom_command(&cgc, &buffer, 4, CGC_DATA_READ);
buffer = kmalloc(255, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_cdrom_command(&cgc, buffer, 4, CGC_DATA_READ);
cgc.quiet = 1;
/*
@ -837,10 +861,15 @@ static int mo_open_write(struct cdrom_device_info *cdi)
}
/* drive gave us no info, let the user go ahead */
if (ret)
return 0;
if (ret) {
ret = 0;
goto err;
}
return buffer[3] & 0x80;
ret = buffer[3] & 0x80;
err:
kfree(buffer);
return ret;
}
static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
@ -863,15 +892,19 @@ static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[32];
char *buffer;
int ret, mmc3_profile;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
buffer = kmalloc(32, GFP_KERNEL);
if (!buffer)
return;
init_cdrom_command(&cgc, buffer, 32, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[1] = 0;
cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */
cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
cgc.cmd[8] = 32; /* Allocation Length */
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
@ -880,6 +913,7 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
mmc3_profile = (buffer[6] << 8) | buffer[7];
cdi->mmc3_profile = mmc3_profile;
kfree(buffer);
}
static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
@ -1594,12 +1628,15 @@ static void setup_send_key(struct packet_command *cgc, unsigned agid, unsigned t
static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
{
int ret;
u_char buf[20];
u_char *buf;
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
rpc_state_t rpc_state;
rpc_state_t *rpc_state;
buf = kzalloc(20, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memset(buf, 0, sizeof(buf));
init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ);
switch (ai->type) {
@ -1610,7 +1647,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
setup_report_key(&cgc, ai->lsa.agid, 0);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
ai->lsa.agid = buf[7] >> 6;
/* Returning data, let host change state */
@ -1621,7 +1658,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
setup_report_key(&cgc, ai->lsk.agid, 2);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
copy_key(ai->lsk.key, &buf[4]);
/* Returning data, let host change state */
@ -1632,7 +1669,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
setup_report_key(&cgc, ai->lsc.agid, 1);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
copy_chal(ai->lsc.chal, &buf[4]);
/* Returning data, let host change state */
@ -1649,7 +1686,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
cgc.cmd[2] = ai->lstk.lba >> 24;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
ai->lstk.cpm = (buf[4] >> 7) & 1;
ai->lstk.cp_sec = (buf[4] >> 6) & 1;
@ -1663,7 +1700,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
setup_report_key(&cgc, ai->lsasf.agid, 5);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
ai->lsasf.asf = buf[7] & 1;
break;
@ -1676,7 +1713,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
copy_chal(&buf[4], ai->hsc.chal);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
ai->type = DVD_LU_SEND_KEY1;
break;
@ -1689,7 +1726,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
if ((ret = cdo->generic_packet(cdi, &cgc))) {
ai->type = DVD_AUTH_FAILURE;
return ret;
goto err;
}
ai->type = DVD_AUTH_ESTABLISHED;
break;
@ -1700,24 +1737,23 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
setup_report_key(&cgc, ai->lsa.agid, 0x3f);
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
break;
/* Get region settings */
case DVD_LU_SEND_RPC_STATE:
cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
setup_report_key(&cgc, 0, 8);
memset(&rpc_state, 0, sizeof(rpc_state_t));
cgc.buffer = (char *) &rpc_state;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
ai->lrpcs.type = rpc_state.type_code;
ai->lrpcs.vra = rpc_state.vra;
ai->lrpcs.ucca = rpc_state.ucca;
ai->lrpcs.region_mask = rpc_state.region_mask;
ai->lrpcs.rpc_scheme = rpc_state.rpc_scheme;
rpc_state = (rpc_state_t *)buf;
ai->lrpcs.type = rpc_state->type_code;
ai->lrpcs.vra = rpc_state->vra;
ai->lrpcs.ucca = rpc_state->ucca;
ai->lrpcs.region_mask = rpc_state->region_mask;
ai->lrpcs.rpc_scheme = rpc_state->rpc_scheme;
break;
/* Set region settings */
@ -1728,20 +1764,23 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
buf[4] = ai->hrpcs.pdrc;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
break;
default:
cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
return -ENOTTY;
ret = -ENOTTY;
goto err;
}
return 0;
ret = 0;
err:
kfree(buf);
return ret;
}
static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
{
unsigned char buf[21], *base;
unsigned char *buf, *base;
struct dvd_layer *layer;
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
@ -1750,7 +1789,11 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
if (layer_num >= DVD_LAYERS)
return -EINVAL;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
buf = kmalloc(21, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(&cgc, buf, 21, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc.cmd[6] = layer_num;
cgc.cmd[7] = s->type;
@ -1762,7 +1805,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
cgc.quiet = 1;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
base = &buf[4];
layer = &s->physical.layer[layer_num];
@ -1786,17 +1829,24 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15];
layer->bca = base[16] >> 7;
return 0;
ret = 0;
err:
kfree(buf);
return ret;
}
static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
{
int ret;
u_char buf[8];
u_char *buf;
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
buf = kmalloc(8, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(&cgc, buf, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc.cmd[6] = s->copyright.layer_num;
cgc.cmd[7] = s->type;
@ -1804,12 +1854,15 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
cgc.cmd[9] = cgc.buflen & 0xff;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
s->copyright.cpst = buf[4];
s->copyright.rmi = buf[5];
return 0;
ret = 0;
err:
kfree(buf);
return ret;
}
static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
@ -1841,26 +1894,33 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s)
{
int ret;
u_char buf[4 + 188];
u_char *buf;
struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
buf = kmalloc(4 + 188, GFP_KERNEL);
if (!buf)
return -ENOMEM;
init_cdrom_command(&cgc, buf, 4 + 188, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
cgc.cmd[7] = s->type;
cgc.cmd[9] = cgc.buflen & 0xff;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
s->bca.len = buf[0] << 8 | buf[1];
if (s->bca.len < 12 || s->bca.len > 188) {
cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
return -EIO;
ret = -EIO;
goto err;
}
memcpy(s->bca.value, &buf[4], s->bca.len);
return 0;
ret = 0;
err:
kfree(buf);
return ret;
}
static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
@ -1960,9 +2020,13 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
{
struct cdrom_device_ops *cdo = cdi->ops;
struct packet_command cgc;
char buffer[32];
char *buffer;
int ret;
buffer = kmalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
cgc.cmd[1] = 2; /* MSF addressing */
@ -1971,7 +2035,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
cgc.cmd[8] = 16;
if ((ret = cdo->generic_packet(cdi, &cgc)))
return ret;
goto err;
subchnl->cdsc_audiostatus = cgc.buffer[1];
subchnl->cdsc_format = CDROM_MSF;
@ -1986,7 +2050,10 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
return 0;
ret = 0;
err:
kfree(buffer);
return ret;
}
/*
@ -3309,7 +3376,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
*pos += ret;
for (cdi = topCdromPtr; cdi; cdi = cdi->next) {
list_for_each_entry(cdi, &cdrom_list, list) {
switch (option) {
case CTL_NAME:
ret = scnprintf(info + *pos, max_size - *pos,
@ -3430,7 +3497,8 @@ static void cdrom_update_settings(void)
{
struct cdrom_device_info *cdi;
for (cdi = topCdromPtr; cdi != NULL; cdi = cdi->next) {
mutex_lock(&cdrom_mutex);
list_for_each_entry(cdi, &cdrom_list, list) {
if (autoclose && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= CDO_AUTO_CLOSE;
else if (!autoclose)
@ -3448,6 +3516,7 @@ static void cdrom_update_settings(void)
else
cdi->options &= ~CDO_CHECK_TYPE;
}
mutex_unlock(&cdrom_mutex);
}
static int cdrom_sysctl_handler(ctl_table *ctl, int write, struct file * filp,
@ -3571,22 +3640,29 @@ static void cdrom_sysctl_unregister(void)
unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
static void cdrom_sysctl_register(void)
{
}
static void cdrom_sysctl_unregister(void)
{
}
#endif /* CONFIG_SYSCTL */
static int __init cdrom_init(void)
{
#ifdef CONFIG_SYSCTL
cdrom_sysctl_register();
#endif
return 0;
}
static void __exit cdrom_exit(void)
{
printk(KERN_INFO "Uniform CD-ROM driver unloaded\n");
#ifdef CONFIG_SYSCTL
cdrom_sysctl_unregister();
#endif
}
module_init(cdrom_init);

View file

@ -827,7 +827,9 @@ static int __devexit remove_gdrom(struct platform_device *devptr)
del_gendisk(gd.disk);
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
return unregister_cdrom(gd.cd_info);
unregister_cdrom(gd.cd_info);
return 0;
}
static struct platform_driver gdrom_driver = {

View file

@ -650,10 +650,7 @@ static int viocd_remove(struct vio_dev *vdev)
{
struct disk_info *d = &viocd_diskinfo[vdev->unit_address];
if (unregister_cdrom(&d->viocd_info) != 0)
printk(VIOCD_KERN_WARNING
"Cannot unregister viocd CD-ROM %s!\n",
d->viocd_info.name);
unregister_cdrom(&d->viocd_info);
del_gendisk(d->viocd_disk);
blk_cleanup_queue(d->viocd_disk->queue);
put_disk(d->viocd_disk);

View file

@ -2032,9 +2032,8 @@ static void ide_cd_release(struct kref *kref)
kfree(info->buffer);
kfree(info->toc);
if (devinfo->handle == drive && unregister_cdrom(devinfo))
printk(KERN_ERR "%s: %s failed to unregister device from the cdrom "
"driver.\n", __FUNCTION__, drive->name);
if (devinfo->handle == drive)
unregister_cdrom(devinfo);
drive->dsc_overlap = 0;
drive->driver_data = NULL;
blk_queue_prep_rq(drive->queue, NULL);

View file

@ -852,7 +852,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
"Notifying upper driver of completion "
"(result %x)\n", cmd->result));
good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len;
good_bytes = scsi_bufflen(cmd);
if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
drv = scsi_cmd_to_driver(cmd);
if (drv->done)

158
fs/bio.c
View file

@ -444,22 +444,27 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
struct bio_map_data {
struct bio_vec *iovecs;
void __user *userptr;
int nr_sgvecs;
struct sg_iovec *sgvecs;
};
static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
struct sg_iovec *iov, int iov_count)
{
memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
bmd->nr_sgvecs = iov_count;
bio->bi_private = bmd;
}
static void bio_free_map_data(struct bio_map_data *bmd)
{
kfree(bmd->iovecs);
kfree(bmd->sgvecs);
kfree(bmd);
}
static struct bio_map_data *bio_alloc_map_data(int nr_segs)
static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count)
{
struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
@ -467,13 +472,71 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs)
return NULL;
bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
if (bmd->iovecs)
if (!bmd->iovecs) {
kfree(bmd);
return NULL;
}
bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL);
if (bmd->sgvecs)
return bmd;
kfree(bmd->iovecs);
kfree(bmd);
return NULL;
}
static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
int uncopy)
{
int ret = 0, i;
struct bio_vec *bvec;
int iov_idx = 0;
unsigned int iov_off = 0;
int read = bio_data_dir(bio) == READ;
__bio_for_each_segment(bvec, bio, i, 0) {
char *bv_addr = page_address(bvec->bv_page);
unsigned int bv_len = bvec->bv_len;
while (bv_len && iov_idx < iov_count) {
unsigned int bytes;
char *iov_addr;
bytes = min_t(unsigned int,
iov[iov_idx].iov_len - iov_off, bv_len);
iov_addr = iov[iov_idx].iov_base + iov_off;
if (!ret) {
if (!read && !uncopy)
ret = copy_from_user(bv_addr, iov_addr,
bytes);
if (read && uncopy)
ret = copy_to_user(iov_addr, bv_addr,
bytes);
if (ret)
ret = -EFAULT;
}
bv_len -= bytes;
bv_addr += bytes;
iov_addr += bytes;
iov_off += bytes;
if (iov[iov_idx].iov_len == iov_off) {
iov_idx++;
iov_off = 0;
}
}
if (uncopy)
__free_page(bvec->bv_page);
}
return ret;
}
/**
* bio_uncopy_user - finish previously mapped bio
* @bio: bio being terminated
@ -484,55 +547,56 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs)
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
const int read = bio_data_dir(bio) == READ;
struct bio_vec *bvec;
int i, ret = 0;
int ret;
__bio_for_each_segment(bvec, bio, i, 0) {
char *addr = page_address(bvec->bv_page);
unsigned int len = bmd->iovecs[i].bv_len;
ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1);
if (read && !ret && copy_to_user(bmd->userptr, addr, len))
ret = -EFAULT;
__free_page(bvec->bv_page);
bmd->userptr += len;
}
bio_free_map_data(bmd);
bio_put(bio);
return ret;
}
/**
* bio_copy_user - copy user data to bio
* bio_copy_user_iov - copy user data to bio
* @q: destination block queue
* @uaddr: start of user address
* @len: length in bytes
* @iov: the iovec.
* @iov_count: number of elements in the iovec
* @write_to_vm: bool indicating writing to pages or not
*
* Prepares and returns a bio for indirect user io, bouncing data
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
unsigned int len, int write_to_vm)
struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
int iov_count, int write_to_vm)
{
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
struct bio_map_data *bmd;
struct bio_vec *bvec;
struct page *page;
struct bio *bio;
int i, ret;
int nr_pages = 0;
unsigned int len = 0;
bmd = bio_alloc_map_data(end - start);
for (i = 0; i < iov_count; i++) {
unsigned long uaddr;
unsigned long end;
unsigned long start;
uaddr = (unsigned long)iov[i].iov_base;
end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
start = uaddr >> PAGE_SHIFT;
nr_pages += end - start;
len += iov[i].iov_len;
}
bmd = bio_alloc_map_data(nr_pages, iov_count);
if (!bmd)
return ERR_PTR(-ENOMEM);
bmd->userptr = (void __user *) uaddr;
ret = -ENOMEM;
bio = bio_alloc(GFP_KERNEL, end - start);
bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!bio)
goto out_bmd;
@ -564,22 +628,12 @@ struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
* success
*/
if (!write_to_vm) {
char __user *p = (char __user *) uaddr;
/*
* for a write, copy in data to kernel pages
*/
ret = -EFAULT;
bio_for_each_segment(bvec, bio, i) {
char *addr = page_address(bvec->bv_page);
if (copy_from_user(addr, p, bvec->bv_len))
goto cleanup;
p += bvec->bv_len;
}
ret = __bio_copy_iov(bio, iov, iov_count, 0);
if (ret)
goto cleanup;
}
bio_set_map_data(bmd, bio);
bio_set_map_data(bmd, bio, iov, iov_count);
return bio;
cleanup:
bio_for_each_segment(bvec, bio, i)
@ -591,6 +645,28 @@ struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
return ERR_PTR(ret);
}
/**
* bio_copy_user - copy user data to bio
* @q: destination block queue
* @uaddr: start of user address
* @len: length in bytes
* @write_to_vm: bool indicating writing to pages or not
*
* Prepares and returns a bio for indirect user io, bouncing data
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
unsigned int len, int write_to_vm)
{
struct sg_iovec iov;
iov.iov_base = (void __user *)uaddr;
iov.iov_len = len;
return bio_copy_user_iov(q, &iov, 1, write_to_vm);
}
static struct bio *__bio_map_user_iov(struct request_queue *q,
struct block_device *bdev,
struct sg_iovec *iov, int iov_count,

View file

@ -327,6 +327,8 @@ extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
int, int);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);

View file

@ -112,6 +112,7 @@ enum rq_flag_bits {
__REQ_RW_SYNC, /* request is sync (O_DIRECT) */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_NR_BITS, /* stops here */
};
@ -133,6 +134,7 @@ enum rq_flag_bits {
#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define BLK_MAX_CDB 16
@ -533,8 +535,13 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
* BLK_BOUNCE_ANY : don't bounce anything
* BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
*/
#if BITS_PER_LONG == 32
#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT)
#else
#define BLK_BOUNCE_HIGH -1ULL
#endif
#define BLK_BOUNCE_ANY (-1ULL)
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
/*

View file

@ -910,6 +910,7 @@ struct mode_page_header {
#ifdef __KERNEL__
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/device.h>
#include <linux/list.h>
struct packet_command
{
@ -934,7 +935,7 @@ struct packet_command
/* Uniform cdrom data structures for cdrom.c */
struct cdrom_device_info {
struct cdrom_device_ops *ops; /* link to device_ops */
struct cdrom_device_info *next; /* next device_info for this major */
struct list_head list; /* linked list of all device_info */
struct gendisk *disk; /* matching block layer disk */
void *handle; /* driver-dependent data */
/* specifications */
@ -994,7 +995,7 @@ extern int cdrom_ioctl(struct file *file, struct cdrom_device_info *cdi,
extern int cdrom_media_changed(struct cdrom_device_info *);
extern int register_cdrom(struct cdrom_device_info *cdi);
extern int unregister_cdrom(struct cdrom_device_info *cdi);
extern void unregister_cdrom(struct cdrom_device_info *cdi);
typedef struct {
int data;