diff options
author | Tejun Heo <tj@kernel.org> | 2009-08-14 09:41:02 +0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-08-14 09:45:31 +0400 |
commit | 384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch) | |
tree | 04c93f391a1b65c8bf8d7ba8643c07d26c26590a /include/linux/blkdev.h | |
parent | a76761b621bcd8336065c4fe3a74f046858bc34c (diff) | |
parent | 142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff) | |
download | linux-384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c.tar.xz |
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts:
arch/sparc/kernel/smp_64.c
arch/x86/kernel/cpu/perf_counter.c
arch/x86/kernel/setup_percpu.c
drivers/cpufreq/cpufreq_ondemand.c
mm/percpu.c
Conflicts in core and arch percpu codes are mostly from commit
ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many
num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all
the first chunk allocators into mm/percpu.c, the changes are moved
from arch code to mm/percpu.c.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 49ae07951d55..69103e053c92 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -70,11 +70,6 @@ enum rq_cmd_type_bits { REQ_TYPE_ATA_PC, }; -enum { - BLK_RW_ASYNC = 0, - BLK_RW_SYNC = 1, -}; - /* * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a @@ -723,6 +718,7 @@ struct rq_map_data { int nr_entries; unsigned long offset; int null_mapped; + int from_user; }; struct req_iterator { @@ -779,18 +775,18 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, * congested queues, and wake up anyone who was waiting for requests to be * put back. */ -static inline void blk_clear_queue_congested(struct request_queue *q, int rw) +static inline void blk_clear_queue_congested(struct request_queue *q, int sync) { - clear_bdi_congested(&q->backing_dev_info, rw); + clear_bdi_congested(&q->backing_dev_info, sync); } /* * A queue has just entered congestion. Flag that in the queue's VM-visible * state flags and increment the global gounter of congested queues. */ -static inline void blk_set_queue_congested(struct request_queue *q, int rw) +static inline void blk_set_queue_congested(struct request_queue *q, int sync) { - set_bdi_congested(&q->backing_dev_info, rw); + set_bdi_congested(&q->backing_dev_info, sync); } extern void blk_start_queue(struct request_queue *q); @@ -917,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short) extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); +extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_default_limits(struct queue_limits *lim); |