diff options
author | Steven Whitehouse <steve@men-an-tol.chygwyn.com> | 2006-02-23 12:49:43 +0300 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-02-23 12:49:43 +0300 |
commit | d35462b4bb847b68321c55e95c926aa485aecce2 (patch) | |
tree | b08e18bf6e672633402871ee763102fdb5e63229 /block | |
parent | 91ffd7db71e7451f89941a8f428b4daa2a7c1e38 (diff) | |
parent | 9e956c2dac9bec602ed1ba29181b45ba6d2b6448 (diff) | |
download | linux-d35462b4bb847b68321c55e95c926aa485aecce2.tar.xz |
Merge branch 'master'
Diffstat (limited to 'block')
-rw-r--r-- | block/elevator.c | 114 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 53 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 3 |
3 files changed, 82 insertions, 88 deletions
diff --git a/block/elevator.c b/block/elevator.c index c9f424d5399c..24b702d649a9 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -139,35 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e, static char chosen_elevator[16]; -static void elevator_setup_default(void) +static int __init elevator_setup(char *str) { - struct elevator_type *e; - - /* - * If default has not been set, use the compiled-in selection. - */ - if (!chosen_elevator[0]) - strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED); - /* * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ - if (!strcmp(chosen_elevator, "as")) + if (!strcmp(str, "as")) strcpy(chosen_elevator, "anticipatory"); - - /* - * If the given scheduler is not available, fall back to the default - */ - if ((e = elevator_find(chosen_elevator))) - elevator_put(e); else - strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED); -} - -static int __init elevator_setup(char *str) -{ - strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); + strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); return 0; } @@ -184,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name) q->end_sector = 0; q->boundary_rq = NULL; - elevator_setup_default(); + if (name && !(e = elevator_get(name))) + return -EINVAL; - if (!name) - name = chosen_elevator; + if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) + printk("I/O scheduler %s not found\n", chosen_elevator); - e = elevator_get(name); - if (!e) - return -EINVAL; + if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { + printk("Default I/O scheduler not found, using no-op\n"); + e = elevator_get("noop"); + } eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); if (!eq) { @@ -310,7 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) rq->flags &= ~REQ_STARTED; - __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0); + elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); } static void elv_drain_elevator(request_queue_t *q) @@ -327,40 +310,11 @@ static void elv_drain_elevator(request_queue_t *q) } } -void __elv_add_request(request_queue_t *q, struct request *rq, int where, - int plug) +void elv_insert(request_queue_t *q, struct request *rq, int where) { struct list_head *pos; unsigned ordseq; - if (q->ordcolor) - rq->flags |= REQ_ORDERED_COLOR; - - if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { - /* - * toggle ordered color - */ - q->ordcolor ^= 1; - - /* - * barriers implicitly indicate back insertion - */ - if (where == ELEVATOR_INSERT_SORT) - where = ELEVATOR_INSERT_BACK; - - /* - * this request is scheduling boundary, update end_sector - */ - if (blk_fs_request(rq)) { - q->end_sector = rq_end_sector(rq); - q->boundary_rq = rq; - } - } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) - where = ELEVATOR_INSERT_BACK; - - if (plug) - blk_plug_device(q); - rq->q = q; switch (where) { @@ -441,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, } } +void __elv_add_request(request_queue_t *q, struct request *rq, int where, + int plug) +{ + if (q->ordcolor) + rq->flags |= REQ_ORDERED_COLOR; + + if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { + /* + * toggle ordered color + */ + if (blk_barrier_rq(rq)) + q->ordcolor ^= 1; + + /* + * barriers implicitly indicate back insertion + */ + if (where == ELEVATOR_INSERT_SORT) + where = ELEVATOR_INSERT_BACK; + + /* + * this request is scheduling boundary, update + * end_sector + */ + if (blk_fs_request(rq)) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + } + } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) + where = ELEVATOR_INSERT_BACK; + + if (plug) + blk_plug_device(q); + + elv_insert(q, rq, where); +} + void elv_add_request(request_queue_t *q, struct request *rq, int where, int plug) { @@ -669,8 +659,10 @@ int elv_register(struct elevator_type *e) spin_unlock_irq(&elv_list_lock); printk(KERN_INFO "io scheduler %s registered", e->elevator_name); - if (!strcmp(e->elevator_name, chosen_elevator)) - printk(" (default)"); + if (!strcmp(e->elevator_name, chosen_elevator) || + (!*chosen_elevator && + !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) + printk(" (default)"); printk("\n"); return 0; } diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8e27d0ab0d7c..03d9c82b0fe7 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) * blk_queue_ordered - does this queue support ordered writes * @q: the request queue * @ordered: one of QUEUE_ORDERED_* + * @prepare_flush_fn: rq setup helper for cache flush ordered writes * * Description: * For journalled file systems, doing ordered writes on a commit @@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered, return -EINVAL; } + q->ordered = ordered; q->next_ordered = ordered; q->prepare_flush_fn = prepare_flush_fn; @@ -452,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which) rq->end_io = end_io; q->prepare_flush_fn(q, rq); - __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); } static inline struct request *start_ordered(request_queue_t *q, @@ -488,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q, else q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; - __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); if (q->ordered & QUEUE_ORDERED_PREFLUSH) { queue_flush(q, QUEUE_ORDERED_PREFLUSH); @@ -506,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q, int blk_do_ordered(request_queue_t *q, struct request **rqp) { - struct request *rq = *rqp, *allowed_rq; + struct request *rq = *rqp; int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); if (!q->ordseq) { @@ -530,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp) } } + /* + * Ordered sequence in progress + */ + + /* Special requests are not subject to ordering rules. */ + if (!blk_fs_request(rq) && + rq != &q->pre_flush_rq && rq != &q->post_flush_rq) + return 1; + if (q->ordered & QUEUE_ORDERED_TAG) { + /* Ordered by tag. Blocking the next barrier is enough. */ if (is_barrier && rq != &q->bar_rq) *rqp = NULL; - return 1; - } - - switch (blk_ordered_cur_seq(q)) { - case QUEUE_ORDSEQ_PREFLUSH: - allowed_rq = &q->pre_flush_rq; - break; - case QUEUE_ORDSEQ_BAR: - allowed_rq = &q->bar_rq; - break; - case QUEUE_ORDSEQ_POSTFLUSH: - allowed_rq = &q->post_flush_rq; - break; - default: - allowed_rq = NULL; - break; + } else { + /* Ordered by draining. Wait for turn. */ + WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); + if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) + *rqp = NULL; } - if (rq != allowed_rq && - (blk_fs_request(rq) || rq == &q->pre_flush_rq || - rq == &q->post_flush_rq)) - *rqp = NULL; - return 1; } @@ -662,7 +658,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); * Enables a low level driver to set an upper limit on the size of * received requests. **/ -void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) +void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) { max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); @@ -2577,6 +2573,8 @@ void disk_round_stats(struct gendisk *disk) disk->stamp = now; } +EXPORT_SYMBOL_GPL(disk_round_stats); + /* * queue lock must be held */ @@ -2632,6 +2630,7 @@ EXPORT_SYMBOL(blk_put_request); /** * blk_end_sync_rq - executes a completion event on a request * @rq: request to complete + * @error: end io status of the request */ void blk_end_sync_rq(struct request *rq, int error) { @@ -3153,7 +3152,7 @@ static int __end_that_request_first(struct request *req, int uptodate, if (blk_fs_request(req) && req->rq_disk) { const int rw = rq_data_dir(req); - __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); + disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); } total_bytes = bio_nbytes = 0; @@ -3448,7 +3447,7 @@ int __init blk_dev_init(void) iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); - for (i = 0; i < NR_CPUS; i++) + for_each_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index cc72210687eb..24f7af9d0abc 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -310,6 +310,8 @@ static int sg_io(struct file *file, request_queue_t *q, if (!rq->timeout) rq->timeout = BLK_DEFAULT_TIMEOUT; + rq->retries = 0; + start_time = jiffies; /* ignore return value. All information is passed back to caller @@ -427,6 +429,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q, rq->data = buffer; rq->data_len = bytes; rq->flags |= REQ_BLOCK_PC; + rq->retries = 0; blk_execute_rq(q, bd_disk, rq, 0); err = rq->errors & 0xff; /* only 8 bit SCSI status */ |