diff options
Diffstat (limited to 'drivers/block')
32 files changed, 421 insertions, 2927 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index fd236158f32d..63056cfd4b62 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -50,7 +50,7 @@ config MAC_FLOPPY config BLK_DEV_SWIM tristate "Support for SWIM Macintosh floppy" - depends on M68K && MAC + depends on M68K && MAC && !HIGHMEM help You should select this option if you want floppy support and you don't have a II, IIfx, Q900, Q950 or AV series. @@ -121,23 +121,6 @@ source "drivers/block/mtip32xx/Kconfig" source "drivers/block/zram/Kconfig" -config BLK_DEV_UMEM - tristate "Micro Memory MM5415 Battery Backed RAM support" - depends on PCI - help - Saying Y here will include support for the MM5415 family of - battery backed (Non-volatile) RAM cards. - <http://www.umem.com/> - - The cards appear as block devices that can be partitioned into - as many as 15 partitions. - - To compile this driver as a module, choose M here: the - module will be called umem. - - The umem driver has not yet been allocated a MAJOR number, so - one is chosen dynamically. - config BLK_DEV_UBD bool "Virtual block device" depends on UML @@ -378,12 +361,6 @@ config SUNVDC source "drivers/s390/block/Kconfig" -config XILINX_SYSACE - tristate "Xilinx SystemACE support" - depends on 4xx || MICROBLAZE - help - Include support for the Xilinx SystemACE CompactFlash interface - config XEN_BLKDEV_FRONTEND tristate "Xen virtual block device support" depends on XEN diff --git a/drivers/block/Makefile b/drivers/block/Makefile index e3e3f1c79a82..bc68817ef496 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -20,11 +20,9 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o obj-$(CONFIG_N64CART) += n64cart.o obj-$(CONFIG_BLK_DEV_RAM) += brd.o obj-$(CONFIG_BLK_DEV_LOOP) += loop.o -obj-$(CONFIG_XILINX_SYSACE) += xsysace.o obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o obj-$(CONFIG_SUNVDC) += sunvdc.o -obj-$(CONFIG_BLK_DEV_UMEM) += umem.o obj-$(CONFIG_BLK_DEV_NBD) += nbd.o obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 104b713f4055..d601e49f80e0 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -729,8 +729,12 @@ static int do_format(int drive, int type, struct atari_format_descr *desc) unsigned long flags; int ret; - if (type) + if (type) { type--; + if (type >= NUM_DISK_MINORS || + minor2disktype[type].drive_types > DriveType) + return -EINVAL; + } q = unit[drive].disk[type]->queue; blk_mq_freeze_queue(q); @@ -742,11 +746,6 @@ static int do_format(int drive, int type, struct atari_format_descr *desc) local_irq_restore(flags); if (type) { - if (type >= NUM_DISK_MINORS || - minor2disktype[type].drive_types > DriveType) { - ret = -EINVAL; - goto out; - } type = minor2disktype[type].index; UDT = &atari_disk_type[type]; } @@ -2002,7 +2001,10 @@ static void ataflop_probe(dev_t dev) int drive = MINOR(dev) & 3; int type = MINOR(dev) >> 2; - if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) + if (type) + type--; + + if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS) return; mutex_lock(&ataflop_probe_lock); if (!unit[drive].disk[type]) { diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 18bf99906662..6e622c1327ee 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -22,6 +22,7 @@ #include <linux/fs.h> #include <linux/slab.h> #include <linux/backing-dev.h> +#include <linux/debugfs.h> #include <linux/uaccess.h> @@ -48,6 +49,7 @@ struct brd_device { */ spinlock_t brd_lock; struct radix_tree_root brd_pages; + u64 brd_nr_pages; }; /* @@ -116,6 +118,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) page = radix_tree_lookup(&brd->brd_pages, idx); BUG_ON(!page); BUG_ON(page->index != idx); + } else { + brd->brd_nr_pages++; } spin_unlock(&brd->brd_lock); @@ -365,11 +369,13 @@ __setup("ramdisk_size=", ramdisk_size); */ static LIST_HEAD(brd_devices); static DEFINE_MUTEX(brd_devices_mutex); +static struct dentry *brd_debugfs_dir; static struct brd_device *brd_alloc(int i) { struct brd_device *brd; struct gendisk *disk; + char buf[DISK_NAME_LEN]; brd = kzalloc(sizeof(*brd), GFP_KERNEL); if (!brd) @@ -382,6 +388,11 @@ static struct brd_device *brd_alloc(int i) if (!brd->brd_queue) goto out_free_dev; + snprintf(buf, DISK_NAME_LEN, "ram%d", i); + if (!IS_ERR_OR_NULL(brd_debugfs_dir)) + debugfs_create_u64(buf, 0444, brd_debugfs_dir, + &brd->brd_nr_pages); + /* This is so fdisk will align partitions on 4k, because of * direct_access API needing 4k alignment, returning a PFN * (This is only a problem on very small devices <= 4M, @@ -397,7 +408,7 @@ static struct brd_device *brd_alloc(int i) disk->fops = &brd_fops; disk->private_data = brd; disk->flags = GENHD_FL_EXT_DEVT; - sprintf(disk->disk_name, "ram%d", i); + strlcpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); /* Tell the block layer that this is not a rotational device */ @@ -495,6 +506,8 @@ static int __init brd_init(void) brd_check_and_reset_par(); + brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); + mutex_lock(&brd_devices_mutex); for (i = 0; i < rd_nr; i++) { brd = brd_alloc(i); @@ -519,6 +532,8 @@ static int __init brd_init(void) return 0; out_free: + debugfs_remove_recursive(brd_debugfs_dir); + list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { list_del(&brd->brd_list); brd_free(brd); @@ -534,6 +549,8 @@ static void __exit brd_exit(void) { struct brd_device *brd, *next; + debugfs_remove_recursive(brd_debugfs_dir); + list_for_each_entry_safe(brd, next, &brd_devices, brd_list) brd_del_one(brd); diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c index 651bd0236a99..f07b4378388b 100644 --- a/drivers/block/drbd/drbd_interval.c +++ b/drivers/block/drbd/drbd_interval.c @@ -3,7 +3,7 @@ #include <linux/rbtree_augmented.h> #include "drbd_interval.h" -/** +/* * interval_end - return end of @node */ static inline @@ -18,7 +18,7 @@ sector_t interval_end(struct rb_node *node) RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, struct drbd_interval, rb, sector_t, end, NODE_END); -/** +/* * drbd_insert_interval - insert a new interval into a tree */ bool @@ -56,6 +56,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this) /** * drbd_contains_interval - check if a tree contains a given interval + * @root: red black tree root * @sector: start sector of @interval * @interval: may not be a valid pointer * @@ -88,7 +89,7 @@ drbd_contains_interval(struct rb_root *root, sector_t sector, return false; } -/** +/* * drbd_remove_interval - remove an interval from a tree */ void @@ -99,6 +100,7 @@ drbd_remove_interval(struct rb_root *root, struct drbd_interval *this) /** * drbd_find_overlap - search for an interval overlapping with [sector, sector + size) + * @root: red black tree root * @sector: start sector * @size: size, aligned to 512 bytes * diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 25cd8a2f729d..de463773b530 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -125,7 +125,7 @@ struct bio_set drbd_io_bio_set; member of struct page. */ struct page *drbd_pp_pool; -spinlock_t drbd_pp_lock; +DEFINE_SPINLOCK(drbd_pp_lock); int drbd_pp_vacant; wait_queue_head_t drbd_pp_wait; @@ -268,7 +268,7 @@ void tl_restart(struct drbd_connection *connection, enum drbd_req_event what) /** * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL - * @device: DRBD device. + * @connection: DRBD connection. * * This is called after the connection to the peer was lost. The storage covered * by the requests on the transfer gets marked as our of sync. Called from the @@ -479,7 +479,7 @@ int conn_lowest_minor(struct drbd_connection *connection) } #ifdef CONFIG_SMP -/** +/* * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs * * Forces all threads of a resource onto the same CPU. This is beneficial for @@ -518,7 +518,6 @@ static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask) /** * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread - * @device: DRBD device. * @thi: drbd_thread object * * call in the "main loop" of _all_ threads, no need for any mutex, current won't die @@ -538,7 +537,7 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi) #define drbd_calc_cpu_mask(A) ({}) #endif -/** +/* * drbd_header_size - size of a packet header * * The header size is a multiple of 8, so any payload following the header is @@ -1193,7 +1192,7 @@ static int fill_bitmap_rle_bits(struct drbd_device *device, return len; } -/** +/* * send_bitmap_rle_or_plain * * Return 0 when done, 1 when another iteration is needed, and a negative error @@ -1324,11 +1323,11 @@ void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set /** * _drbd_send_ack() - Sends an ack packet - * @device: DRBD device. - * @cmd: Packet command code. - * @sector: sector, needs to be in big endian byte order - * @blksize: size in byte, needs to be in big endian byte order - * @block_id: Id, big endian byte order + * @peer_device: DRBD peer device. + * @cmd: Packet command code. + * @sector: sector, needs to be in big endian byte order + * @blksize: size in byte, needs to be in big endian byte order + * @block_id: Id, big endian byte order */ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd, u64 sector, u32 blksize, u64 block_id) @@ -1370,9 +1369,9 @@ void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd /** * drbd_send_ack() - Sends an ack packet - * @device: DRBD device - * @cmd: packet command code - * @peer_req: peer request + * @peer_device: DRBD peer device + * @cmd: packet command code + * @peer_req: peer request */ int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd, struct drbd_peer_request *peer_req) @@ -1882,7 +1881,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, return sent; } -/** +/* * drbd_send_all - Send an entire buffer * * Returns 0 upon success and a negative error value otherwise. @@ -2161,9 +2160,6 @@ static int drbd_create_mempools(void) if (ret) goto Enomem; - /* drbd's page pool */ - spin_lock_init(&drbd_pp_lock); - for (i = 0; i < number; i++) { page = alloc_page(GFP_HIGHUSER); if (!page) @@ -3509,6 +3505,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused) * @io_fn: IO callback to be called when bitmap IO is possible * @done: callback to be called after the bitmap IO was performed * @why: Descriptive text of the reason for doing the IO + * @flags: Bitmap flags * * While IO on the bitmap happens we freeze application IO thus we ensure * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be @@ -3554,6 +3551,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device, * @device: DRBD device. * @io_fn: IO callback to be called when bitmap IO is possible * @why: Descriptive text of the reason for doing the IO + * @flags: Bitmap flags * * freezes application IO while that the actual IO operations runs. This * functions MAY NOT be called from worker context. @@ -3657,7 +3655,6 @@ const char *cmdname(enum drbd_packet cmd) [P_RS_CANCEL] = "RSCancel", [P_CONN_ST_CHG_REQ] = "conn_st_chg_req", [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply", - [P_RETRY_WRITE] = "retry_write", [P_PROTOCOL_UPDATE] = "protocol_update", [P_RS_THIN_REQ] = "rs_thin_req", [P_RS_DEALLOCATED] = "rs_deallocated", diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index bf7de4c7b96c..e7d0e637e632 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -790,9 +790,11 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) mutex_lock(&adm_ctx.resource->adm_mutex); if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) - retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); + retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device, + R_PRIMARY, parms.assume_uptodate); else - retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); + retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device, + R_SECONDARY, 0); mutex_unlock(&adm_ctx.resource->adm_mutex); genl_lock(); @@ -916,7 +918,7 @@ void drbd_resume_io(struct drbd_device *device) wake_up(&device->misc_wait); } -/** +/* * drbd_determine_dev_size() - Sets the right device size obeying all constraints * @device: DRBD device. * @@ -1134,7 +1136,7 @@ drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev, return size; } -/** +/* * drbd_check_al_size() - Ensures that the AL is of the right size * @device: DRBD device. * @@ -1962,7 +1964,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) drbd_flush_workqueue(&connection->sender_work); rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); - retcode = rv; /* FIXME: Type mismatch. */ + retcode = (enum drbd_ret_code)rv; drbd_resume_io(device); if (rv < SS_SUCCESS) goto fail; @@ -2687,7 +2689,8 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) } rcu_read_unlock(); - retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); + retcode = (enum drbd_ret_code)conn_request_state(connection, + NS(conn, C_UNCONNECTED), CS_VERBOSE); conn_reconfig_done(connection); mutex_unlock(&adm_ctx.resource->adm_mutex); @@ -2800,7 +2803,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) mutex_lock(&adm_ctx.resource->adm_mutex); rv = conn_try_disconnect(connection, parms.force_disconnect); if (rv < SS_SUCCESS) - retcode = rv; /* FIXME: Type mismatch. */ + retcode = (enum drbd_ret_code)rv; else retcode = NO_ERROR; mutex_unlock(&adm_ctx.resource->adm_mutex); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c3f09a122f20..69284ebba786 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -242,9 +242,9 @@ static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection) /** * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) - * @device: DRBD device. - * @number: number of pages requested - * @retry: whether to retry, if not enough pages are available right now + * @peer_device: DRBD device. + * @number: number of pages requested + * @retry: whether to retry, if not enough pages are available right now * * Tries to allocate number pages, first from our own page pool, then from * the kernel. @@ -1352,7 +1352,7 @@ static void drbd_flush(struct drbd_connection *connection) /** * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. - * @device: DRBD device. + * @connection: DRBD connection. * @epoch: Epoch object. * @ev: Epoch event. */ @@ -1441,9 +1441,8 @@ max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo) return wo; } -/** +/* * drbd_bump_write_ordering() - Fall back to an other write ordering method - * @connection: DRBD connection. * @wo: Write ordering method to try. */ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, @@ -1619,11 +1618,10 @@ static void drbd_issue_peer_wsame(struct drbd_device *device, } -/** +/* * drbd_submit_peer_request() * @device: DRBD device. * @peer_req: peer request - * @rw: flag field, see bio->bi_opf * * May spread the pages to multiple bios, * depending on bio_add_page restrictions. @@ -3048,7 +3046,7 @@ out_free_e: return -EIO; } -/** +/* * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries */ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local) @@ -3131,7 +3129,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold return rv; } -/** +/* * drbd_asb_recover_1p - Recover after split-brain with one remaining primary */ static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local) @@ -3188,7 +3186,7 @@ static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold return rv; } -/** +/* * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries */ static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local) @@ -4672,7 +4670,7 @@ static int receive_sync_uuid(struct drbd_connection *connection, struct packet_i return 0; } -/** +/* * receive_bitmap_plain * * Return 0 when done, 1 when another iteration is needed, and a negative error @@ -4724,7 +4722,7 @@ static int dcbp_get_pad_bits(struct p_compressed_bm *p) return (p->encoding >> 4) & 0x7; } -/** +/* * recv_bm_rle_bits * * Return 0 when done, 1 when another iteration is needed, and a negative error @@ -4793,7 +4791,7 @@ recv_bm_rle_bits(struct drbd_peer_device *peer_device, return (s != c->bm_bits); } -/** +/* * decode_bitmap_c * * Return 0 when done, 1 when another iteration is needed, and a negative error @@ -5865,6 +5863,7 @@ static int got_NegRSDReply(struct drbd_connection *connection, struct packet_inf switch (pi->cmd) { case P_NEG_RS_DREPLY: drbd_rs_failed_io(device, sector, size); + break; case P_RS_CANCEL: break; default: diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 9398c2c2cb2d..13beb98a7c5a 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -753,6 +753,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, case WRITE_ACKED_BY_PEER_AND_SIS: req->rq_state |= RQ_NET_SIS; + fallthrough; case WRITE_ACKED_BY_PEER: /* Normal operation protocol C: successfully written on peer. * During resync, even in protocol != C, diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 0067d328f0b5..b8a27818ab3f 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -904,9 +904,9 @@ out: * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible * This function limits state transitions that may be declined by DRBD. I.e. * user requests (aka soft transitions). - * @device: DRBD device. - * @ns: new state. * @os: old state. + * @ns: new state. + * @connection: DRBD connection. */ static enum drbd_state_rv is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection) @@ -1044,7 +1044,7 @@ static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_st * @device: DRBD device. * @os: old state. * @ns: new state. - * @warn_sync_abort: + * @warn: placeholder for returned state warning. * * When we loose connection, we have to set the state of the peers disk (pdsk) * to D_UNKNOWN. This rule and many more along those lines are in this function. @@ -1696,6 +1696,7 @@ static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_st * @os: old state. * @ns: new state. * @flags: Flags + * @state_change: state change to broadcast */ static void after_state_ch(struct drbd_device *device, union drbd_state os, union drbd_state ns, enum chg_state_flags flags, diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 4aa9683ee0c1..8a9d22207c59 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -145,8 +145,6 @@ * Better audit of register_blkdev. */ -#undef FLOPPY_SILENT_DCL_CLEAR - #define REALLY_SLOW_IO #define DEBUGT 2 @@ -2399,11 +2397,10 @@ static void rw_interrupt(void) probing = 0; } - if (CT(raw_cmd->cmd[COMMAND]) != FD_READ || - raw_cmd->kernel_data == bio_data(current_req->bio)) { + if (CT(raw_cmd->cmd[COMMAND]) != FD_READ) { /* transfer directly from buffer */ cont->done(1); - } else if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) { + } else { buffer_track = raw_cmd->track; buffer_drive = current_drive; INFBOUND(buffer_max, nr_sectors + fsector_t); @@ -2411,27 +2408,6 @@ static void rw_interrupt(void) cont->redo(); } -/* Compute maximal contiguous buffer size. */ -static int buffer_chain_size(void) -{ - struct bio_vec bv; - int size; - struct req_iterator iter; - char *base; - - base = bio_data(current_req->bio); - size = 0; - - rq_for_each_segment(bv, current_req, iter) { - if (page_address(bv.bv_page) + bv.bv_offset != base + size) - break; - - size += bv.bv_len; - } - - return size >> 9; -} - /* Compute the maximal transfer size */ static int transfer_size(int ssize, int max_sector, int max_size) { @@ -2453,7 +2429,6 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) { int remaining; /* number of transferred 512-byte sectors */ struct bio_vec bv; - char *buffer; char *dma_buffer; int size; struct req_iterator iter; @@ -2492,8 +2467,6 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) size = bv.bv_len; SUPBOUND(size, remaining); - - buffer = page_address(bv.bv_page) + bv.bv_offset; if (dma_buffer + size > floppy_track_buffer + (max_buffer_sectors << 10) || dma_buffer < floppy_track_buffer) { @@ -2509,13 +2482,13 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) pr_info("write\n"); break; } - if (((unsigned long)buffer) % 512) - DPRINT("%p buffer not aligned\n", buffer); if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) - memcpy(buffer, dma_buffer, size); + memcpy_to_page(bv.bv_page, bv.bv_offset, dma_buffer, + size); else - memcpy(dma_buffer, buffer, size); + memcpy_from_page(dma_buffer, bv.bv_page, bv.bv_offset, + size); remaining -= size; dma_buffer += size; @@ -2690,54 +2663,6 @@ static int make_raw_rw_request(void) raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; raw_cmd->cmd[COMMAND] = FM_MODE(_floppy, FD_READ); - } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { - unsigned long dma_limit; - int direct, indirect; - - indirect = - transfer_size(ssize, max_sector, - max_buffer_sectors * 2) - fsector_t; - - /* - * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide - * on a 64 bit machine! - */ - max_size = buffer_chain_size(); - dma_limit = (MAX_DMA_ADDRESS - - ((unsigned long)bio_data(current_req->bio))) >> 9; - if ((unsigned long)max_size > dma_limit) - max_size = dma_limit; - /* 64 kb boundaries */ - if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) - max_size = (K_64 - - ((unsigned long)bio_data(current_req->bio)) % - K_64) >> 9; - direct = transfer_size(ssize, max_sector, max_size) - fsector_t; - /* - * We try to read tracks, but if we get too many errors, we - * go back to reading just one sector at a time. - * - * This means we should be able to read a sector even if there - * are other bad sectors on this track. - */ - if (!direct || - (indirect * 2 > direct * 3 && - *errors < drive_params[current_drive].max_errors.read_track && - ((!probing || - (drive_params[current_drive].read_track & (1 << drive_state[current_drive].probed_format)))))) { - max_size = blk_rq_sectors(current_req); - } else { - raw_cmd->kernel_data = bio_data(current_req->bio); - raw_cmd->length = current_count_sectors << 9; - if (raw_cmd->length == 0) { - DPRINT("%s: zero dma transfer attempted\n", __func__); - DPRINT("indirect=%d direct=%d fsector_t=%d\n", - indirect, direct, fsector_t); - return 0; - } - virtualdmabug_workaround(); - return 2; - } } if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) @@ -2781,19 +2706,17 @@ static int make_raw_rw_request(void) raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || - (raw_cmd->kernel_data != bio_data(current_req->bio) && - CT(raw_cmd->cmd[COMMAND]) == FD_WRITE && + (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || raw_cmd->length % (128 << raw_cmd->cmd[SIZECODE]) || raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); - if (raw_cmd->kernel_data != bio_data(current_req->bio)) - pr_info("addr=%d, length=%ld\n", - (int)((raw_cmd->kernel_data - - floppy_track_buffer) >> 9), - current_count_sectors); + pr_info("addr=%d, length=%ld\n", + (int)((raw_cmd->kernel_data - + floppy_track_buffer) >> 9), + current_count_sectors); pr_info("st=%d ast=%d mse=%d msi=%d\n", fsector_t, aligned_sector_t, max_sector, max_size); pr_info("ssize=%x SIZECODE=%d\n", ssize, raw_cmd->cmd[SIZECODE]); @@ -2807,31 +2730,21 @@ static int make_raw_rw_request(void) return 0; } - if (raw_cmd->kernel_data != bio_data(current_req->bio)) { - if (raw_cmd->kernel_data < floppy_track_buffer || - current_count_sectors < 0 || - raw_cmd->length < 0 || - raw_cmd->kernel_data + raw_cmd->length > - floppy_track_buffer + (max_buffer_sectors << 10)) { - DPRINT("buffer overrun in schedule dma\n"); - pr_info("fsector_t=%d buffer_min=%d current_count=%ld\n", - fsector_t, buffer_min, raw_cmd->length >> 9); - pr_info("current_count_sectors=%ld\n", - current_count_sectors); - if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) - pr_info("read\n"); - if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) - pr_info("write\n"); - return 0; - } - } else if (raw_cmd->length > blk_rq_bytes(current_req) || - current_count_sectors > blk_rq_sectors(current_req)) { - DPRINT("buffer overrun in direct transfer\n"); + if (raw_cmd->kernel_data < floppy_track_buffer || + current_count_sectors < 0 || + raw_cmd->length < 0 || + raw_cmd->kernel_data + raw_cmd->length > + floppy_track_buffer + (max_buffer_sectors << 10)) { + DPRINT("buffer overrun in schedule dma\n"); + pr_info("fsector_t=%d buffer_min=%d current_count=%ld\n", + fsector_t, buffer_min, raw_cmd->length >> 9); + pr_info("current_count_sectors=%ld\n", + current_count_sectors); + if (CT(raw_cmd->cmd[COMMAND]) == FD_READ) + pr_info("read\n"); + if (CT(raw_cmd->cmd[COMMAND]) == FD_WRITE) + pr_info("write\n"); return 0; - } else if (raw_cmd->length < current_count_sectors << 9) { - DPRINT("more sectors than bytes\n"); - pr_info("bytes=%ld\n", raw_cmd->length >> 9); - pr_info("sectors=%ld\n", current_count_sectors); } if (raw_cmd->length == 0) { DPRINT("zero dma transfer attempted from make_raw_request\n"); @@ -3073,8 +2986,6 @@ static const char *drive_name(int type, int drive) /* raw commands */ static void raw_cmd_done(int flag) { - int i; - if (!flag) { raw_cmd->flags |= FD_RAW_FAILURE; raw_cmd->flags |= FD_RAW_HARDFAILURE; @@ -3082,8 +2993,7 @@ static void raw_cmd_done(int flag) raw_cmd->reply_count = inr; if (raw_cmd->reply_count > FD_RAW_REPLY_SIZE) raw_cmd->reply_count = 0; - for (i = 0; i < raw_cmd->reply_count; i++) - raw_cmd->reply[i] = reply_buffer[i]; + memcpy(raw_cmd->reply, reply_buffer, raw_cmd->reply_count); if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) { unsigned long flags; @@ -3175,7 +3085,6 @@ static int raw_cmd_copyin(int cmd, void __user *param, { struct floppy_raw_cmd *ptr; int ret; - int i; *rcmd = NULL; @@ -3194,8 +3103,7 @@ loop: if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE) return -EINVAL; - for (i = 0; i < FD_RAW_REPLY_SIZE; i++) - ptr->reply[i] = 0; + memset(ptr->reply, 0, FD_RAW_REPLY_SIZE); ptr->resultcode = 0; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { @@ -4317,7 +4225,7 @@ static char __init get_fdc_version(int fdc) r = result(fdc); if (r <= 0x00) return FDC_NONE; /* No FDC present ??? */ - if ((r == 1) && (reply_buffer[0] == 0x80)) { + if ((r == 1) && (reply_buffer[ST0] == 0x80)) { pr_info("FDC %d is an 8272A\n", fdc); return FDC_8272A; /* 8272a/765 don't know DUMPREGS */ } @@ -4342,12 +4250,12 @@ static char __init get_fdc_version(int fdc) output_byte(fdc, FD_UNLOCK); r = result(fdc); - if ((r == 1) && (reply_buffer[0] == 0x80)) { + if ((r == 1) && (reply_buffer[ST0] == 0x80)) { pr_info("FDC %d is a pre-1991 82077\n", fdc); return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know * LOCK/UNLOCK */ } - if ((r != 1) || (reply_buffer[0] != 0x00)) { + if ((r != 1) || (reply_buffer[ST0] != 0x00)) { pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n", fdc, r); return FDC_UNKNOWN; @@ -4359,11 +4267,11 @@ static char __init get_fdc_version(int fdc) fdc, r); return FDC_UNKNOWN; } - if (reply_buffer[0] == 0x80) { + if (reply_buffer[ST0] == 0x80) { pr_info("FDC %d is a post-1991 82077\n", fdc); return FDC_82077; /* Revised 82077AA passes all the tests */ } - switch (reply_buffer[0] >> 5) { + switch (reply_buffer[ST0] >> 5) { case 0x0: /* Either a 82078-1 or a 82078SL running at 5Volt */ pr_info("FDC %d is an 82078.\n", fdc); @@ -4379,7 +4287,7 @@ static char __init get_fdc_version(int fdc) return FDC_87306; default: pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n", - fdc, reply_buffer[0] >> 5); + fdc, reply_buffer[ST0] >> 5); return FDC_82078_UNKN; } } /* get_fdc_version */ @@ -4597,7 +4505,6 @@ static int floppy_alloc_disk(unsigned int drive, unsigned int type) return err; } - blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH); blk_queue_max_hw_sectors(disk->queue, 64); disk->major = FLOPPY_MAJOR; disk->first_minor = TOMINOR(drive) | (type << 2); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 3be0dbc674bd..589cb0f1e030 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -95,9 +95,9 @@ /* Device instance number, incremented each time a device is probed. */ static int instance; -static struct list_head online_list; -static struct list_head removing_list; -static spinlock_t dev_lock; +static LIST_HEAD(online_list); +static LIST_HEAD(removing_list); +static DEFINE_SPINLOCK(dev_lock); /* * Global variable used to hold the major block device number @@ -1213,7 +1213,7 @@ static int mtip_standby_immediate(struct mtip_port *port) { int rv; struct host_to_dev_fis fis; - unsigned long start; + unsigned long __maybe_unused start; unsigned int timeout; /* Build the FIS. */ @@ -4363,11 +4363,6 @@ static int __init mtip_init(void) pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); - spin_lock_init(&dev_lock); - - INIT_LIST_HEAD(&online_list); - INIT_LIST_HEAD(&removing_list); - /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 51bfd7737552..5f006d9e1472 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -84,6 +84,10 @@ enum { NULL_Q_MQ = 2, }; +static bool g_virt_boundary = false; +module_param_named(virt_boundary, g_virt_boundary, bool, 0444); +MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False"); + static int g_no_sched; module_param_named(no_sched, g_no_sched, int, 0444); MODULE_PARM_DESC(no_sched, "No io scheduler"); @@ -366,6 +370,7 @@ NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL); NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); +NULLB_DEVICE_ATTR(virt_boundary, bool, NULL); static ssize_t nullb_device_power_show(struct config_item *item, char *page) { @@ -486,6 +491,7 @@ static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_zone_nr_conv, &nullb_device_attr_zone_max_open, &nullb_device_attr_zone_max_active, + &nullb_device_attr_virt_boundary, NULL, }; @@ -539,7 +545,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item) static ssize_t memb_group_features_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, - "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors\n"); + "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n"); } CONFIGFS_ATTR_RO(memb_group_, features); @@ -605,6 +611,7 @@ static struct nullb_device *null_alloc_dev(void) dev->zone_nr_conv = g_zone_nr_conv; dev->zone_max_open = g_zone_max_open; dev->zone_max_active = g_zone_max_active; + dev->virt_boundary = g_virt_boundary; return dev; } @@ -1896,6 +1903,9 @@ static int null_add_dev(struct nullb_device *dev) BLK_DEF_MAX_SECTORS); blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); + if (dev->virt_boundary) + blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); + null_config_discard(nullb); sprintf(nullb->disk_name, "nullb%d", nullb->index); diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 4876d5adb12d..64bef125d1df 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -97,6 +97,7 @@ struct nullb_device { bool memory_backed; /* if data is stored in memory */ bool discard; /* if support discard */ bool zoned; /* if device is zoned */ + bool virt_boundary; /* virtual boundary on/off for the device */ }; struct nullb { diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index bfcab1c782b5..dae54dd1aeac 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -180,6 +180,7 @@ int null_register_zoned_dev(struct nullb *nullb) void null_free_zoned_dev(struct nullb_device *dev) { kvfree(dev->zones); + dev->zones = NULL; } int null_report_zones(struct gendisk *disk, sector_t sector, diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 897acda20ac8..828a45ffe0e7 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -859,16 +859,6 @@ static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing) return r ? DISK_EVENT_MEDIA_CHANGE : 0; } -static int pd_revalidate(struct gendisk *p) -{ - struct pd_unit *disk = p->private_data; - if (pd_special_command(disk, pd_identify) == 0) - set_capacity(p, disk->capacity); - else - set_capacity(p, 0); - return 0; -} - static const struct block_device_operations pd_fops = { .owner = THIS_MODULE, .open = pd_open, @@ -877,7 +867,6 @@ static const struct block_device_operations pd_fops = { .compat_ioctl = pd_ioctl, .getgeo = pd_getgeo, .check_events = pd_check_events, - .revalidate_disk= pd_revalidate }; /* probing */ diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index fc4b0f1aa86d..bd3556585122 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1199,6 +1199,42 @@ try_next_bio: return 1; } +/** + * bio_list_copy_data - copy contents of data buffers from one chain of bios to + * another + * @src: source bio list + * @dst: destination bio list + * + * Stops when it reaches the end of either the @src list or @dst list - that is, + * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of + * bios). + */ +static void bio_list_copy_data(struct bio *dst, struct bio *src) +{ + struct bvec_iter src_iter = src->bi_iter; + struct bvec_iter dst_iter = dst->bi_iter; + + while (1) { + if (!src_iter.bi_size) { + src = src->bi_next; + if (!src) + break; + + src_iter = src->bi_iter; + } + + if (!dst_iter.bi_size) { + dst = dst->bi_next; + if (!dst) + break; + + dst_iter = dst->bi_iter; + } + + bio_copy_data_iter(dst, &dst_iter, src, &src_iter); + } +} + /* * Assemble a bio to write one packet and queue the bio for processing * by the underlying block device. diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c index d4aa6bfc9555..324afdd63a96 100644 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c @@ -34,6 +34,7 @@ enum { RNBD_OPT_DEV_PATH = 1 << 2, RNBD_OPT_ACCESS_MODE = 1 << 3, RNBD_OPT_SESSNAME = 1 << 6, + RNBD_OPT_NR_POLL_QUEUES = 1 << 7, }; static const unsigned int rnbd_opt_mandatory[] = { @@ -42,12 +43,13 @@ static const unsigned int rnbd_opt_mandatory[] = { }; static const match_table_t rnbd_opt_tokens = { - {RNBD_OPT_PATH, "path=%s" }, - {RNBD_OPT_DEV_PATH, "device_path=%s"}, - {RNBD_OPT_DEST_PORT, "dest_port=%d" }, - {RNBD_OPT_ACCESS_MODE, "access_mode=%s"}, - {RNBD_OPT_SESSNAME, "sessname=%s" }, - {RNBD_OPT_ERR, NULL }, + {RNBD_OPT_PATH, "path=%s" }, + {RNBD_OPT_DEV_PATH, "device_path=%s" }, + {RNBD_OPT_DEST_PORT, "dest_port=%d" }, + {RNBD_OPT_ACCESS_MODE, "access_mode=%s" }, + {RNBD_OPT_SESSNAME, "sessname=%s" }, + {RNBD_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" }, + {RNBD_OPT_ERR, NULL }, }; struct rnbd_map_options { @@ -57,6 +59,7 @@ struct rnbd_map_options { char *pathname; u16 *dest_port; enum rnbd_access_mode *access_mode; + u32 *nr_poll_queues; }; static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt, @@ -68,7 +71,7 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt, int opt_mask = 0; int token; int ret = -EINVAL; - int i, dest_port; + int i, dest_port, nr_poll_queues; int p_cnt = 0; options = kstrdup(buf, GFP_KERNEL); @@ -96,7 +99,7 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt, kfree(p); goto out; } - strlcpy(opt->sessname, p, NAME_MAX); + strscpy(opt->sessname, p, NAME_MAX); kfree(p); break; @@ -139,7 +142,7 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt, kfree(p); goto out; } - strlcpy(opt->pathname, p, NAME_MAX); + strscpy(opt->pathname, p, NAME_MAX); kfree(p); break; @@ -178,6 +181,19 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt, kfree(p); break; + case RNBD_OPT_NR_POLL_QUEUES: + if (match_int(args, &nr_poll_queues) || nr_poll_queues < -1 || + nr_poll_queues > (int)nr_cpu_ids) { + pr_err("bad nr_poll_queues parameter '%d'\n", + nr_poll_queues); + ret = -EINVAL; + goto out; + } + if (nr_poll_queues == -1) + nr_poll_queues = nr_cpu_ids; + *opt->nr_poll_queues = nr_poll_queues; + break; + default: pr_err("map_device: Unknown parameter or missing value '%s'\n", p); @@ -227,6 +243,19 @@ static ssize_t state_show(struct kobject *kobj, static struct kobj_attribute rnbd_clt_state_attr = __ATTR_RO(state); +static ssize_t nr_poll_queues_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + struct rnbd_clt_dev *dev; + + dev = container_of(kobj, struct rnbd_clt_dev, kobj); + + return sysfs_emit(page, "%d\n", dev->nr_poll_queues); +} + +static struct kobj_attribute rnbd_clt_nr_poll_queues = + __ATTR_RO(nr_poll_queues); + static ssize_t mapping_path_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { @@ -421,6 +450,7 @@ static struct attribute *rnbd_dev_attrs[] = { &rnbd_clt_state_attr.attr, &rnbd_clt_session_attr.attr, &rnbd_clt_access_mode.attr, + &rnbd_clt_nr_poll_queues.attr, NULL, }; @@ -432,10 +462,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev) * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because * of sysfs link already was removed already. */ - if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) { - sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name); + if (dev->blk_symlink_name) { + if (try_module_get(THIS_MODULE)) { + sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name); + module_put(THIS_MODULE); + } + /* It should be freed always. */ kfree(dev->blk_symlink_name); - module_put(THIS_MODULE); + dev->blk_symlink_name = NULL; } } @@ -456,6 +490,7 @@ static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev) ret); kobject_put(&dev->kobj); } + kobject_uevent(gd_kobj, KOBJ_ONLINE); return ret; } @@ -465,7 +500,7 @@ static ssize_t rnbd_clt_map_device_show(struct kobject *kobj, char *page) { return scnprintf(page, PAGE_SIZE, - "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n", + "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n", attr->attr.name); } @@ -475,15 +510,11 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf, int ret; char pathname[NAME_MAX], *s; - strlcpy(pathname, dev->pathname, sizeof(pathname)); + strscpy(pathname, dev->pathname, sizeof(pathname)); while ((s = strchr(pathname, '/'))) s[0] = '!'; - ret = snprintf(buf, len, "%s", pathname); - if (ret >= len) - return -ENAMETOOLONG; - - ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname); + ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname); if (ret >= len) return -ENAMETOOLONG; @@ -537,6 +568,7 @@ static ssize_t rnbd_clt_map_device_store(struct kobject *kobj, char sessname[NAME_MAX]; enum rnbd_access_mode access_mode = RNBD_ACCESS_RW; u16 port_nr = RTRS_PORT; + u32 nr_poll_queues = 0; struct sockaddr_storage *addrs; struct rtrs_addr paths[6]; @@ -548,6 +580,7 @@ static ssize_t rnbd_clt_map_device_store(struct kobject *kobj, opt.pathname = pathname; opt.dest_port = &port_nr; opt.access_mode = &access_mode; + opt.nr_poll_queues = &nr_poll_queues; addrs = kcalloc(ARRAY_SIZE(paths) * 2, sizeof(*addrs), GFP_KERNEL); if (!addrs) return -ENOMEM; @@ -561,12 +594,13 @@ static ssize_t rnbd_clt_map_device_store(struct kobject *kobj, if (ret) goto out; - pr_info("Mapping device %s on session %s, (access_mode: %s)\n", + pr_info("Mapping device %s on session %s, (access_mode: %s, nr_poll_queues: %d)\n", pathname, sessname, - rnbd_access_mode_str(access_mode)); + rnbd_access_mode_str(access_mode), + nr_poll_queues); dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname, - access_mode); + access_mode, nr_poll_queues); if (IS_ERR(dev)) { ret = PTR_ERR(dev); goto out; @@ -639,13 +673,9 @@ cls_destroy: return err; } -void rnbd_clt_destroy_default_group(void) -{ - sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group); -} - void rnbd_clt_destroy_sysfs_files(void) { + sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group); kobject_del(rnbd_devs_kobj); kobject_put(rnbd_devs_kobj); device_destroy(rnbd_dev_class, MKDEV(0, 0)); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 45a470076652..c01786afe1b1 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -110,6 +110,7 @@ static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev, static int process_msg_open_rsp(struct rnbd_clt_dev *dev, struct rnbd_msg_open_rsp *rsp) { + struct kobject *gd_kobj; int err = 0; mutex_lock(&dev->lock); @@ -128,6 +129,8 @@ static int process_msg_open_rsp(struct rnbd_clt_dev *dev, */ if (dev->nsectors != nsectors) rnbd_clt_change_capacity(dev, nsectors); + gd_kobj = &disk_to_dev(dev->gd)->kobj; + kobject_uevent(gd_kobj, KOBJ_ONLINE); rnbd_clt_info(dev, "Device online, device remapped successfully\n"); } err = rnbd_clt_set_dev_attr(dev, rsp); @@ -312,13 +315,11 @@ static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess) static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess, enum rtrs_clt_con_type con_type, - int wait) + enum wait_type wait) { struct rtrs_permit *permit; - permit = rtrs_clt_get_permit(sess->rtrs, con_type, - wait ? RTRS_PERMIT_WAIT : - RTRS_PERMIT_NOWAIT); + permit = rtrs_clt_get_permit(sess->rtrs, con_type, wait); if (likely(permit)) /* We have a subtle rare case here, when all permits can be * consumed before busy counter increased. This is safe, @@ -344,7 +345,7 @@ static void rnbd_put_permit(struct rnbd_clt_session *sess, static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess, enum rtrs_clt_con_type con_type, - int wait) + enum wait_type wait) { struct rnbd_iu *iu; struct rtrs_permit *permit; @@ -354,9 +355,7 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess, return NULL; } - permit = rnbd_get_permit(sess, con_type, - wait ? RTRS_PERMIT_WAIT : - RTRS_PERMIT_NOWAIT); + permit = rnbd_get_permit(sess, con_type, wait); if (unlikely(!permit)) { kfree(iu); return NULL; @@ -435,16 +434,11 @@ static void msg_conf(void *priv, int errno) schedule_work(&iu->work); } -enum wait_type { - NO_WAIT = 0, - WAIT = 1 -}; - static int send_usr_msg(struct rtrs_clt *rtrs, int dir, struct rnbd_iu *iu, struct kvec *vec, size_t len, struct scatterlist *sg, unsigned int sg_len, void (*conf)(struct work_struct *work), - int *errno, enum wait_type wait) + int *errno, int wait) { int err; struct rtrs_clt_req_ops req_ops; @@ -476,7 +470,8 @@ static void msg_close_conf(struct work_struct *work) rnbd_clt_put_dev(dev); } -static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait) +static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, + enum wait_type wait) { struct rnbd_clt_session *sess = dev->sess; struct rnbd_msg_close msg; @@ -530,7 +525,7 @@ static void msg_open_conf(struct work_struct *work) * If server thinks its fine, but we fail to process * then be nice and send a close to server. */ - (void)send_msg_close(dev, device_id, NO_WAIT); + send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT); } } kfree(rsp); @@ -554,7 +549,7 @@ static void msg_sess_info_conf(struct work_struct *work) rnbd_clt_put_sess(sess); } -static int send_msg_open(struct rnbd_clt_dev *dev, bool wait) +static int send_msg_open(struct rnbd_clt_dev *dev, enum wait_type wait) { struct rnbd_clt_session *sess = dev->sess; struct rnbd_msg_open_rsp *rsp; @@ -583,7 +578,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait) msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN); msg.access_mode = dev->access_mode; - strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); + strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); WARN_ON(!rnbd_clt_get_dev(dev)); err = send_usr_msg(sess->rtrs, READ, iu, @@ -601,7 +596,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait) return err; } -static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait) +static int send_msg_sess_info(struct rnbd_clt_session *sess, enum wait_type wait) { struct rnbd_msg_sess_info_rsp *rsp; struct rnbd_msg_sess_info msg; @@ -657,14 +652,18 @@ put_iu: static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess) { struct rnbd_clt_dev *dev; + struct kobject *gd_kobj; mutex_lock(&sess->lock); list_for_each_entry(dev, &sess->devs_list, list) { rnbd_clt_err(dev, "Device disconnected.\n"); mutex_lock(&dev->lock); - if (dev->dev_state == DEV_STATE_MAPPED) + if (dev->dev_state == DEV_STATE_MAPPED) { dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED; + gd_kobj = &disk_to_dev(dev->gd)->kobj; + kobject_uevent(gd_kobj, KOBJ_OFFLINE); + } mutex_unlock(&dev->lock); } mutex_unlock(&sess->lock); @@ -687,7 +686,7 @@ static void remap_devs(struct rnbd_clt_session *sess) * be asynchronous. */ - err = send_msg_sess_info(sess, NO_WAIT); + err = send_msg_sess_info(sess, RTRS_PERMIT_NOWAIT); if (err) { pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err); return; @@ -711,7 +710,7 @@ static void remap_devs(struct rnbd_clt_session *sess) continue; rnbd_clt_info(dev, "session reconnected, remapping device\n"); - err = send_msg_open(dev, NO_WAIT); + err = send_msg_open(dev, RTRS_PERMIT_NOWAIT); if (err) { rnbd_clt_err(dev, "send_msg_open(): %d\n", err); break; @@ -801,7 +800,7 @@ static struct rnbd_clt_session *alloc_sess(const char *sessname) sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE); if (!sess) return ERR_PTR(-ENOMEM); - strlcpy(sess->sessname, sessname, sizeof(sess->sessname)); + strscpy(sess->sessname, sessname, sizeof(sess->sessname)); atomic_set(&sess->busy, 0); mutex_init(&sess->lock); INIT_LIST_HEAD(&sess->devs_list); @@ -918,6 +917,7 @@ again: return NULL; } +/* caller is responsible for initializing 'first' to false */ static struct rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first) { @@ -933,8 +933,7 @@ rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first) } list_add(&sess->list, &sess_list); *first = true; - } else - *first = false; + } mutex_unlock(&sess_lock); return sess; @@ -1173,9 +1172,54 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; } +static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx) +{ + struct rnbd_queue *q = hctx->driver_data; + struct rnbd_clt_dev *dev = q->dev; + int cnt; + + cnt = rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num); + return cnt; +} + +static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set) +{ + struct rnbd_clt_session *sess = set->driver_data; + + /* shared read/write queues */ + set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus(); + set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; + set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus(); + set->map[HCTX_TYPE_READ].queue_offset = 0; + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); + + if (sess->nr_poll_queues) { + /* dedicated queue for poll */ + set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues; + set->map[HCTX_TYPE_POLL].queue_offset = set->map[HCTX_TYPE_READ].queue_offset + + set->map[HCTX_TYPE_READ].nr_queues; + blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); + pr_info("[session=%s] mapped %d/%d/%d default/read/poll queues.\n", + sess->sessname, + set->map[HCTX_TYPE_DEFAULT].nr_queues, + set->map[HCTX_TYPE_READ].nr_queues, + set->map[HCTX_TYPE_POLL].nr_queues); + } else { + pr_info("[session=%s] mapped %d/%d default/read queues.\n", + sess->sessname, + set->map[HCTX_TYPE_DEFAULT].nr_queues, + set->map[HCTX_TYPE_READ].nr_queues); + } + + return 0; +} + static struct blk_mq_ops rnbd_mq_ops = { .queue_rq = rnbd_queue_rq, .complete = rnbd_softirq_done_fn, + .map_queues = rnbd_rdma_map_queues, + .poll = rnbd_rdma_poll, }; static int setup_mq_tags(struct rnbd_clt_session *sess) @@ -1189,7 +1233,15 @@ static int setup_mq_tags(struct rnbd_clt_session *sess) tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_TAG_QUEUE_SHARED; tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; - tag_set->nr_hw_queues = num_online_cpus(); + + /* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */ + tag_set->nr_maps = sess->nr_poll_queues ? HCTX_MAX_TYPES : 2; + /* + * HCTX_TYPE_DEFAULT and HCTX_TYPE_READ share one set of queues + * others are for HCTX_TYPE_POLL + */ + tag_set->nr_hw_queues = num_online_cpus() + sess->nr_poll_queues; + tag_set->driver_data = sess; return blk_mq_alloc_tag_set(tag_set); } @@ -1197,18 +1249,27 @@ static int setup_mq_tags(struct rnbd_clt_session *sess) static struct rnbd_clt_session * find_and_get_or_create_sess(const char *sessname, const struct rtrs_addr *paths, - size_t path_cnt, u16 port_nr) + size_t path_cnt, u16 port_nr, u32 nr_poll_queues) { struct rnbd_clt_session *sess; struct rtrs_attrs attrs; int err; - bool first; + bool first = false; struct rtrs_clt_ops rtrs_ops; sess = find_or_create_sess(sessname, &first); if (sess == ERR_PTR(-ENOMEM)) return ERR_PTR(-ENOMEM); - else if (!first) + else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) { + /* + * A device MUST have its own session to use the polling-mode. + * It must fail to map new device with the same session. + */ + err = -EINVAL; + goto put_sess; + } + + if (!first) return sess; if (!path_cnt) { @@ -1228,8 +1289,7 @@ find_and_get_or_create_sess(const char *sessname, paths, path_cnt, port_nr, 0, /* Do not use pdu of rtrs */ RECONNECT_DELAY, BMAX_SEGMENTS, - BLK_MAX_SEGMENT_SIZE, - MAX_RECONNECTS); + MAX_RECONNECTS, nr_poll_queues); if (IS_ERR(sess->rtrs)) { err = PTR_ERR(sess->rtrs); goto wake_up_and_put; @@ -1237,12 +1297,13 @@ find_and_get_or_create_sess(const char *sessname, rtrs_clt_query(sess->rtrs, &attrs); sess->max_io_size = attrs.max_io_size; sess->queue_depth = attrs.queue_depth; + sess->nr_poll_queues = nr_poll_queues; err = setup_mq_tags(sess); if (err) goto close_rtrs; - err = send_msg_sess_info(sess, WAIT); + err = send_msg_sess_info(sess, RTRS_PERMIT_WAIT); if (err) goto close_rtrs; @@ -1352,12 +1413,12 @@ static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx) if (!dev->rotational) blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); + add_disk(dev->gd); } -static int rnbd_client_setup_device(struct rnbd_clt_session *sess, - struct rnbd_clt_dev *dev, int idx) +static int rnbd_client_setup_device(struct rnbd_clt_dev *dev) { - int err; + int err, idx = dev->clt_device_id; dev->size = dev->nsectors * dev->logical_block_size; @@ -1380,7 +1441,8 @@ static int rnbd_client_setup_device(struct rnbd_clt_session *sess, static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, enum rnbd_access_mode access_mode, - const char *pathname) + const char *pathname, + u32 nr_poll_queues) { struct rnbd_clt_dev *dev; int ret; @@ -1389,7 +1451,12 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, if (!dev) return ERR_PTR(-ENOMEM); - dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues), + /* + * nr_cpu_ids: the number of softirq queues + * nr_poll_queues: the number of polling queues + */ + dev->hw_queues = kcalloc(nr_cpu_ids + nr_poll_queues, + sizeof(*dev->hw_queues), GFP_KERNEL); if (!dev->hw_queues) { ret = -ENOMEM; @@ -1415,6 +1482,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, dev->clt_device_id = ret; dev->sess = sess; dev->access_mode = access_mode; + dev->nr_poll_queues = nr_poll_queues; mutex_init(&dev->lock); refcount_set(&dev->refcount, 1); dev->dev_state = DEV_STATE_INIT; @@ -1471,14 +1539,13 @@ static bool exists_devpath(const char *pathname, const char *sessname) return found; } -static bool insert_dev_if_not_exists_devpath(const char *pathname, - struct rnbd_clt_session *sess, - struct rnbd_clt_dev *dev) +static bool insert_dev_if_not_exists_devpath(struct rnbd_clt_dev *dev) { bool found; + struct rnbd_clt_session *sess = dev->sess; mutex_lock(&sess_lock); - found = __exists_dev(pathname, sess->sessname); + found = __exists_dev(dev->pathname, sess->sessname); if (!found) { mutex_lock(&sess->lock); list_add_tail(&dev->list, &sess->devs_list); @@ -1502,7 +1569,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, struct rtrs_addr *paths, size_t path_cnt, u16 port_nr, const char *pathname, - enum rnbd_access_mode access_mode) + enum rnbd_access_mode access_mode, + u32 nr_poll_queues) { struct rnbd_clt_session *sess; struct rnbd_clt_dev *dev; @@ -1511,22 +1579,22 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, if (unlikely(exists_devpath(pathname, sessname))) return ERR_PTR(-EEXIST); - sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr); + sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr, nr_poll_queues); if (IS_ERR(sess)) return ERR_CAST(sess); - dev = init_dev(sess, access_mode, pathname); + dev = init_dev(sess, access_mode, pathname, nr_poll_queues); if (IS_ERR(dev)) { pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n", pathname, sess->sessname, PTR_ERR(dev)); ret = PTR_ERR(dev); goto put_sess; } - if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) { + if (insert_dev_if_not_exists_devpath(dev)) { ret = -EEXIST; goto put_dev; } - ret = send_msg_open(dev, WAIT); + ret = send_msg_open(dev, RTRS_PERMIT_WAIT); if (ret) { rnbd_clt_err(dev, "map_device: failed, can't open remote device, err: %d\n", @@ -1536,7 +1604,7 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, mutex_lock(&dev->lock); pr_debug("Opened remote device: session=%s, path='%s'\n", sess->sessname, pathname); - ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id); + ret = rnbd_client_setup_device(dev); if (ret) { rnbd_clt_err(dev, "map_device: Failed to configure device, err: %d\n", @@ -1555,14 +1623,12 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua); mutex_unlock(&dev->lock); - - add_disk(dev->gd); rnbd_clt_put_sess(sess); return dev; send_close: - send_msg_close(dev, dev->device_id, WAIT); + send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT); del_dev: delete_dev(dev); put_dev: @@ -1622,7 +1688,7 @@ int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force, destroy_sysfs(dev, sysfs_self); destroy_gen_disk(dev); if (was_mapped && sess->rtrs) - send_msg_close(dev, dev->device_id, WAIT); + send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT); rnbd_clt_info(dev, "Device is unmapped\n"); @@ -1656,7 +1722,7 @@ int rnbd_clt_remap_device(struct rnbd_clt_dev *dev) mutex_unlock(&dev->lock); if (!err) { rnbd_clt_info(dev, "Remapping device.\n"); - err = send_msg_open(dev, WAIT); + err = send_msg_open(dev, RTRS_PERMIT_WAIT); if (err) rnbd_clt_err(dev, "remap_device: %d\n", err); } @@ -1678,7 +1744,6 @@ static void rnbd_destroy_sessions(void) struct rnbd_clt_dev *dev, *tn; /* Firstly forbid access through sysfs interface */ - rnbd_clt_destroy_default_group(); rnbd_clt_destroy_sysfs_files(); /* diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h index 537d499dad3b..451e7383738f 100644 --- a/drivers/block/rnbd/rnbd-clt.h +++ b/drivers/block/rnbd/rnbd-clt.h @@ -90,6 +90,7 @@ struct rnbd_clt_session { int queue_depth; u32 max_io_size; struct blk_mq_tag_set tag_set; + u32 nr_poll_queues; struct mutex lock; /* protects state and devs_list */ struct list_head devs_list; /* list of struct rnbd_clt_dev */ refcount_t refcount; @@ -118,6 +119,7 @@ struct rnbd_clt_dev { enum rnbd_clt_dev_state dev_state; char *pathname; enum rnbd_access_mode access_mode; + u32 nr_poll_queues; bool read_only; bool rotational; bool wc; @@ -147,7 +149,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, struct rtrs_addr *paths, size_t path_cnt, u16 port_nr, const char *pathname, - enum rnbd_access_mode access_mode); + enum rnbd_access_mode access_mode, + u32 nr_poll_queues); int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force, const struct attribute *sysfs_self); @@ -159,7 +162,6 @@ int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize); int rnbd_clt_create_sysfs_files(void); void rnbd_clt_destroy_sysfs_files(void); -void rnbd_clt_destroy_default_group(void); void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev); diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c index 05ffe488ddc6..acf5fced11ef 100644 --- a/drivers/block/rnbd/rnbd-srv-sysfs.c +++ b/drivers/block/rnbd/rnbd-srv-sysfs.c @@ -147,10 +147,7 @@ static ssize_t rnbd_srv_dev_session_force_close_store(struct kobject *kobj, } rnbd_srv_info(sess_dev, "force close requested\n"); - - /* first remove sysfs itself to avoid deadlock */ - sysfs_remove_file_self(&sess_dev->kobj, &attr->attr); - rnbd_srv_sess_dev_force_close(sess_dev); + rnbd_srv_sess_dev_force_close(sess_dev, attr); return count; } diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index a6a68d44f517..899dd9d7c10b 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -114,8 +114,7 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess) return sess_dev; } -static int process_rdma(struct rtrs_srv *sess, - struct rnbd_srv_session *srv_sess, +static int process_rdma(struct rnbd_srv_session *srv_sess, struct rtrs_srv_op *id, void *data, u32 datalen, const void *usr, size_t usrlen) { @@ -178,8 +177,10 @@ err: return err; } -static void destroy_device(struct rnbd_srv_dev *dev) +static void destroy_device(struct kref *kref) { + struct rnbd_srv_dev *dev = container_of(kref, struct rnbd_srv_dev, kref); + WARN_ONCE(!list_empty(&dev->sess_dev_list), "Device %s is being destroyed but still in use!\n", dev->id); @@ -198,18 +199,9 @@ static void destroy_device(struct rnbd_srv_dev *dev) kfree(dev); } -static void destroy_device_cb(struct kref *kref) -{ - struct rnbd_srv_dev *dev; - - dev = container_of(kref, struct rnbd_srv_dev, kref); - - destroy_device(dev); -} - static void rnbd_put_srv_dev(struct rnbd_srv_dev *dev) { - kref_put(&dev->kref, destroy_device_cb); + kref_put(&dev->kref, destroy_device); } void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id) @@ -306,7 +298,7 @@ static int create_sess(struct rtrs_srv *rtrs) mutex_unlock(&sess_lock); srv_sess->rtrs = rtrs; - strlcpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname)); + strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname)); rtrs_srv_set_sess_priv(rtrs, srv_sess); @@ -336,18 +328,22 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs, } } -void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev) +void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev, + struct kobj_attribute *attr) { struct rnbd_srv_session *sess = sess_dev->sess; sess_dev->keep_id = true; - mutex_lock(&sess->lock); + /* It is already started to close by client's close message. */ + if (!mutex_trylock(&sess->lock)) + return; + /* first remove sysfs itself to avoid deadlock */ + sysfs_remove_file_self(&sess_dev->kobj, &attr->attr); rnbd_srv_destroy_dev_session_sysfs(sess_dev); mutex_unlock(&sess->lock); } -static int process_msg_close(struct rtrs_srv *rtrs, - struct rnbd_srv_session *srv_sess, +static int process_msg_close(struct rnbd_srv_session *srv_sess, void *data, size_t datalen, const void *usr, size_t usrlen) { @@ -366,20 +362,18 @@ static int process_msg_close(struct rtrs_srv *rtrs, return 0; } -static int process_msg_open(struct rtrs_srv *rtrs, - struct rnbd_srv_session *srv_sess, +static int process_msg_open(struct rnbd_srv_session *srv_sess, const void *msg, size_t len, void *data, size_t datalen); -static int process_msg_sess_info(struct rtrs_srv *rtrs, - struct rnbd_srv_session *srv_sess, +static int process_msg_sess_info(struct rnbd_srv_session *srv_sess, const void *msg, size_t len, void *data, size_t datalen); -static int rnbd_srv_rdma_ev(struct rtrs_srv *rtrs, void *priv, - struct rtrs_srv_op *id, int dir, - void *data, size_t datalen, const void *usr, - size_t usrlen) +static int rnbd_srv_rdma_ev(void *priv, + struct rtrs_srv_op *id, int dir, + void *data, size_t datalen, const void *usr, + size_t usrlen) { struct rnbd_srv_session *srv_sess = priv; const struct rnbd_msg_hdr *hdr = usr; @@ -393,19 +387,16 @@ static int rnbd_srv_rdma_ev(struct rtrs_srv *rtrs, void *priv, switch (type) { case RNBD_MSG_IO: - return process_rdma(rtrs, srv_sess, id, data, datalen, usr, - usrlen); + return process_rdma(srv_sess, id, data, datalen, usr, usrlen); case RNBD_MSG_CLOSE: - ret = process_msg_close(rtrs, srv_sess, data, datalen, - usr, usrlen); + ret = process_msg_close(srv_sess, data, datalen, usr, usrlen); break; case RNBD_MSG_OPEN: - ret = process_msg_open(rtrs, srv_sess, usr, usrlen, - data, datalen); + ret = process_msg_open(srv_sess, usr, usrlen, data, datalen); break; case RNBD_MSG_SESS_INFO: - ret = process_msg_sess_info(rtrs, srv_sess, usr, usrlen, - data, datalen); + ret = process_msg_sess_info(srv_sess, usr, usrlen, data, + datalen); break; default: pr_warn("Received unexpected message type %d with dir %d from session %s\n", @@ -446,7 +437,7 @@ static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id) if (!dev) return ERR_PTR(-ENOMEM); - strlcpy(dev->id, id, sizeof(dev->id)); + strscpy(dev->id, id, sizeof(dev->id)); kref_init(&dev->kref); INIT_LIST_HEAD(&dev->sess_dev_list); mutex_init(&dev->lock); @@ -598,7 +589,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess, kref_init(&sdev->kref); - strlcpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname)); + strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname)); sdev->rnbd_dev = rnbd_dev; sdev->sess = srv_sess; @@ -658,8 +649,7 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess, return full_path; } -static int process_msg_sess_info(struct rtrs_srv *rtrs, - struct rnbd_srv_session *srv_sess, +static int process_msg_sess_info(struct rnbd_srv_session *srv_sess, const void *msg, size_t len, void *data, size_t datalen) { @@ -700,8 +690,7 @@ find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name) return NULL; } -static int process_msg_open(struct rtrs_srv *rtrs, - struct rnbd_srv_session *srv_sess, +static int process_msg_open(struct rnbd_srv_session *srv_sess, const void *msg, size_t len, void *data, size_t datalen) { diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h index b157371c25ed..98ddc31eb408 100644 --- a/drivers/block/rnbd/rnbd-srv.h +++ b/drivers/block/rnbd/rnbd-srv.h @@ -64,7 +64,8 @@ struct rnbd_srv_sess_dev { enum rnbd_access_mode access_mode; }; -void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev); +void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev, + struct kobj_attribute *attr); /* rnbd-srv-sysfs.c */ int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev, diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 227e1be4c6f9..83636714b8d7 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -392,7 +392,7 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) } /*----------------- Card Event Handler -------------------*/ -static const char * const rsxx_card_state_to_str(unsigned int state) +static const char *rsxx_card_state_to_str(unsigned int state) { static const char * const state_strings[] = { "Unknown", "Shutdown", "Starting", "Formatting", diff --git a/drivers/block/swim.c b/drivers/block/swim.c index cc6a0bc6c005..2917b21f48ff 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -816,8 +816,6 @@ static int swim_floppy_init(struct swim_priv *swd) } swd->unit[drive].disk->queue = q; - blk_queue_bounce_limit(swd->unit[drive].disk->queue, - BLK_BOUNCE_HIGH); swd->unit[drive].disk->queue->queuedata = &swd->unit[drive]; swd->unit[drive].swd = swd; } diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index c2d922d125e2..a515d0c1d2cb 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -234,7 +234,6 @@ static unsigned short write_postamble[] = { }; static void seek_track(struct floppy_state *fs, int n); -static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count); static void act(struct floppy_state *fs); static void scan_timeout(struct timer_list *t); static void seek_timeout(struct timer_list *t); @@ -404,12 +403,28 @@ static inline void seek_track(struct floppy_state *fs, int n) fs->settle_time = 0; } +/* + * XXX: this is a horrible hack, but at least allows ppc32 to get + * out of defining virt_to_bus, and this driver out of using the + * deprecated block layer bounce buffering for highmem addresses + * for no good reason. + */ +static unsigned long swim3_phys_to_bus(phys_addr_t paddr) +{ + return paddr + PCI_DRAM_OFFSET; +} + +static phys_addr_t swim3_bio_phys(struct bio *bio) +{ + return page_to_phys(bio_page(bio)) + bio_offset(bio); +} + static inline void init_dma(struct dbdma_cmd *cp, int cmd, - void *buf, int count) + phys_addr_t paddr, int count) { cp->req_count = cpu_to_le16(count); cp->command = cpu_to_le16(cmd); - cp->phy_addr = cpu_to_le32(virt_to_bus(buf)); + cp->phy_addr = cpu_to_le32(swim3_phys_to_bus(paddr)); cp->xfer_status = 0; } @@ -441,16 +456,18 @@ static inline void setup_transfer(struct floppy_state *fs) out_8(&sw->sector, fs->req_sector); out_8(&sw->nsect, n); out_8(&sw->gap3, 0); - out_le32(&dr->cmdptr, virt_to_bus(cp)); + out_le32(&dr->cmdptr, swim3_phys_to_bus(virt_to_phys(cp))); if (rq_data_dir(req) == WRITE) { /* Set up 3 dma commands: write preamble, data, postamble */ - init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); + init_dma(cp, OUTPUT_MORE, virt_to_phys(write_preamble), + sizeof(write_preamble)); ++cp; - init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512); + init_dma(cp, OUTPUT_MORE, swim3_bio_phys(req->bio), 512); ++cp; - init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); + init_dma(cp, OUTPUT_LAST, virt_to_phys(write_postamble), + sizeof(write_postamble)); } else { - init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); + init_dma(cp, INPUT_LAST, swim3_bio_phys(req->bio), n * 512); } ++cp; out_le16(&cp->command, DBDMA_STOP); @@ -1201,7 +1218,6 @@ static int swim3_attach(struct macio_dev *mdev, disk->queue = NULL; goto out_put_disk; } - blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH); disk->queue->queuedata = fs; rc = swim3_add_device(mdev, floppy_count); diff --git a/drivers/block/umem.c b/drivers/block/umem.c deleted file mode 100644 index 664280f23bee..000000000000 --- a/drivers/block/umem.c +++ /dev/null @@ -1,1130 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3 - * - * (C) 2001 San Mehat <nettwerk@valinux.com> - * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com> - * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au> - * - * This driver for the Micro Memory PCI Memory Module with Battery Backup - * is Copyright Micro Memory Inc 2001-2002. All rights reserved. - * - * This driver provides a standard block device interface for Micro Memory(tm) - * PCI based RAM boards. - * 10/05/01: Phap Nguyen - Rebuilt the driver - * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning - * 29oct2001:NeilBrown - Use make_request_fn instead of request_fn - * - use stand disk partitioning (so fdisk works). - * 08nov2001:NeilBrown - change driver name from "mm" to "umem" - * - incorporate into main kernel - * 08apr2002:NeilBrown - Move some of interrupt handle to tasklet - * - use spin_lock_bh instead of _irq - * - Never block on make_request. queue - * bh's instead. - * - unregister umem from devfs at mod unload - * - Change version to 2.3 - * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal) - * 07Jan2002: P. Nguyen - Used PCI Memory Write & Invalidate for DMA - * 15May2002:NeilBrown - convert to bio for 2.5 - * 17May2002:NeilBrown - remove init_mem initialisation. Instead detect - * - a sequence of writes that cover the card, and - * - set initialised bit then. - */ - -#undef DEBUG /* #define DEBUG if you want debugging info (pr_debug) */ -#include <linux/fs.h> -#include <linux/bio.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/mman.h> -#include <linux/gfp.h> -#include <linux/ioctl.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/timer.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> - -#include <linux/fcntl.h> /* O_ACCMODE */ -#include <linux/hdreg.h> /* HDIO_GETGEO */ - -#include "umem.h" - -#include <linux/uaccess.h> -#include <asm/io.h> - -#define MM_MAXCARDS 4 -#define MM_RAHEAD 2 /* two sectors */ -#define MM_BLKSIZE 1024 /* 1k blocks */ -#define MM_HARDSECT 512 /* 512-byte hardware sectors */ -#define MM_SHIFT 6 /* max 64 partitions on 4 cards */ - -/* - * Version Information - */ - -#define DRIVER_NAME "umem" -#define DRIVER_VERSION "v2.3" -#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown" -#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver" - -static int debug; -/* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */ -#define HW_TRACE(x) - -#define DEBUG_LED_ON_TRANSFER 0x01 -#define DEBUG_BATTERY_POLLING 0x02 - -module_param(debug, int, 0644); -MODULE_PARM_DESC(debug, "Debug bitmask"); - -static int pci_read_cmd = 0x0C; /* Read Multiple */ -module_param(pci_read_cmd, int, 0); -MODULE_PARM_DESC(pci_read_cmd, "PCI read command"); - -static int pci_write_cmd = 0x0F; /* Write and Invalidate */ -module_param(pci_write_cmd, int, 0); -MODULE_PARM_DESC(pci_write_cmd, "PCI write command"); - -static int pci_cmds; - -static int major_nr; - -#include <linux/blkdev.h> -#include <linux/blkpg.h> - -struct cardinfo { - struct pci_dev *dev; - - unsigned char __iomem *csr_remap; - unsigned int mm_size; /* size in kbytes */ - - unsigned int init_size; /* initial segment, in sectors, - * that we know to - * have been written - */ - struct bio *bio, *currentbio, **biotail; - struct bvec_iter current_iter; - - struct request_queue *queue; - - struct mm_page { - dma_addr_t page_dma; - struct mm_dma_desc *desc; - int cnt, headcnt; - struct bio *bio, **biotail; - struct bvec_iter iter; - } mm_pages[2]; -#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) - - int Active, Ready; - - struct tasklet_struct tasklet; - unsigned int dma_status; - - struct { - int good; - int warned; - unsigned long last_change; - } battery[2]; - - spinlock_t lock; - int check_batteries; - - int flags; -}; - -static struct cardinfo cards[MM_MAXCARDS]; -static struct timer_list battery_timer; - -static int num_cards; - -static struct gendisk *mm_gendisk[MM_MAXCARDS]; - -static void check_batteries(struct cardinfo *card); - -static int get_userbit(struct cardinfo *card, int bit) -{ - unsigned char led; - - led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); - return led & bit; -} - -static int set_userbit(struct cardinfo *card, int bit, unsigned char state) -{ - unsigned char led; - - led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); - if (state) - led |= bit; - else - led &= ~bit; - writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); - - return 0; -} - -/* - * NOTE: For the power LED, use the LED_POWER_* macros since they differ - */ -static void set_led(struct cardinfo *card, int shift, unsigned char state) -{ - unsigned char led; - - led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); - if (state == LED_FLIP) - led ^= (1<<shift); - else { - led &= ~(0x03 << shift); - led |= (state << shift); - } - writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); - -} - -#ifdef MM_DIAG -static void dump_regs(struct cardinfo *card) -{ - unsigned char *p; - int i, i1; - - p = card->csr_remap; - for (i = 0; i < 8; i++) { - printk(KERN_DEBUG "%p ", p); - - for (i1 = 0; i1 < 16; i1++) - printk("%02x ", *p++); - - printk("\n"); - } -} -#endif - -static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) -{ - dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - "); - if (dmastat & DMASCR_ANY_ERR) - printk(KERN_CONT "ANY_ERR "); - if (dmastat & DMASCR_MBE_ERR) - printk(KERN_CONT "MBE_ERR "); - if (dmastat & DMASCR_PARITY_ERR_REP) - printk(KERN_CONT "PARITY_ERR_REP "); - if (dmastat & DMASCR_PARITY_ERR_DET) - printk(KERN_CONT "PARITY_ERR_DET "); - if (dmastat & DMASCR_SYSTEM_ERR_SIG) - printk(KERN_CONT "SYSTEM_ERR_SIG "); - if (dmastat & DMASCR_TARGET_ABT) - printk(KERN_CONT "TARGET_ABT "); - if (dmastat & DMASCR_MASTER_ABT) - printk(KERN_CONT "MASTER_ABT "); - if (dmastat & DMASCR_CHAIN_COMPLETE) - printk(KERN_CONT "CHAIN_COMPLETE "); - if (dmastat & DMASCR_DMA_COMPLETE) - printk(KERN_CONT "DMA_COMPLETE "); - printk("\n"); -} - -/* - * Theory of request handling - * - * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME - * We have two pages of mm_dma_desc, holding about 64 descriptors - * each. These are allocated at init time. - * One page is "Ready" and is either full, or can have request added. - * The other page might be "Active", which DMA is happening on it. - * - * Whenever IO on the active page completes, the Ready page is activated - * and the ex-Active page is clean out and made Ready. - * Otherwise the Ready page is only activated when it becomes full. - * - * If a request arrives while both pages a full, it is queued, and b_rdev is - * overloaded to record whether it was a read or a write. - * - * The interrupt handler only polls the device to clear the interrupt. - * The processing of the result is done in a tasklet. - */ - -static void mm_start_io(struct cardinfo *card) -{ - /* we have the lock, we know there is - * no IO active, and we know that card->Active - * is set - */ - struct mm_dma_desc *desc; - struct mm_page *page; - int offset; - - /* make the last descriptor end the chain */ - page = &card->mm_pages[card->Active]; - pr_debug("start_io: %d %d->%d\n", - card->Active, page->headcnt, page->cnt - 1); - desc = &page->desc[page->cnt-1]; - - desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); - desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN); - desc->sem_control_bits = desc->control_bits; - - - if (debug & DEBUG_LED_ON_TRANSFER) - set_led(card, LED_REMOVE, LED_ON); - - desc = &page->desc[page->headcnt]; - writel(0, card->csr_remap + DMA_PCI_ADDR); - writel(0, card->csr_remap + DMA_PCI_ADDR + 4); - - writel(0, card->csr_remap + DMA_LOCAL_ADDR); - writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4); - - writel(0, card->csr_remap + DMA_TRANSFER_SIZE); - writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4); - - writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR); - writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4); - - offset = ((char *)desc) - ((char *)page->desc); - writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff), - card->csr_remap + DMA_DESCRIPTOR_ADDR); - /* Force the value to u64 before shifting otherwise >> 32 is undefined C - * and on some ports will do nothing ! */ - writel(cpu_to_le32(((u64)page->page_dma)>>32), - card->csr_remap + DMA_DESCRIPTOR_ADDR + 4); - - /* Go, go, go */ - writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds), - card->csr_remap + DMA_STATUS_CTRL); -} - -static int add_bio(struct cardinfo *card); - -static void activate(struct cardinfo *card) -{ - /* if No page is Active, and Ready is - * not empty, then switch Ready page - * to active and start IO. - * Then add any bh's that are available to Ready - */ - - do { - while (add_bio(card)) - ; - - if (card->Active == -1 && - card->mm_pages[card->Ready].cnt > 0) { - card->Active = card->Ready; - card->Ready = 1-card->Ready; - mm_start_io(card); - } - - } while (card->Active == -1 && add_bio(card)); -} - -static inline void reset_page(struct mm_page *page) -{ - page->cnt = 0; - page->headcnt = 0; - page->bio = NULL; - page->biotail = &page->bio; -} - -/* - * If there is room on Ready page, take - * one bh off list and add it. - * return 1 if there was room, else 0. - */ -static int add_bio(struct cardinfo *card) -{ - struct mm_page *p; - struct mm_dma_desc *desc; - dma_addr_t dma_handle; - int offset; - struct bio *bio; - struct bio_vec vec; - - bio = card->currentbio; - if (!bio && card->bio) { - card->currentbio = card->bio; - card->current_iter = card->bio->bi_iter; - card->bio = card->bio->bi_next; - if (card->bio == NULL) - card->biotail = &card->bio; - card->currentbio->bi_next = NULL; - return 1; - } - if (!bio) - return 0; - - if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) - return 0; - - vec = bio_iter_iovec(bio, card->current_iter); - - dma_handle = dma_map_page(&card->dev->dev, - vec.bv_page, - vec.bv_offset, - vec.bv_len, - bio_op(bio) == REQ_OP_READ ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - - p = &card->mm_pages[card->Ready]; - desc = &p->desc[p->cnt]; - p->cnt++; - if (p->bio == NULL) - p->iter = card->current_iter; - if ((p->biotail) != &bio->bi_next) { - *(p->biotail) = bio; - p->biotail = &(bio->bi_next); - bio->bi_next = NULL; - } - - desc->data_dma_handle = dma_handle; - - desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); - desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9); - desc->transfer_size = cpu_to_le32(vec.bv_len); - offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); - desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); - desc->zero1 = desc->zero2 = 0; - offset = (((char *)(desc+1)) - ((char *)p->desc)); - desc->next_desc_addr = cpu_to_le64(p->page_dma+offset); - desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN| - DMASCR_PARITY_INT_EN| - DMASCR_CHAIN_EN | - DMASCR_SEM_EN | - pci_cmds); - if (bio_op(bio) == REQ_OP_WRITE) - desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); - desc->sem_control_bits = desc->control_bits; - - - bio_advance_iter(bio, &card->current_iter, vec.bv_len); - if (!card->current_iter.bi_size) - card->currentbio = NULL; - - return 1; -} - -static void process_page(unsigned long data) -{ - /* check if any of the requests in the page are DMA_COMPLETE, - * and deal with them appropriately. - * If we find a descriptor without DMA_COMPLETE in the semaphore, then - * dma must have hit an error on that descriptor, so use dma_status - * instead and assume that all following descriptors must be re-tried. - */ - struct mm_page *page; - struct bio *return_bio = NULL; - struct cardinfo *card = (struct cardinfo *)data; - unsigned int dma_status = card->dma_status; - - spin_lock(&card->lock); - if (card->Active < 0) - goto out_unlock; - page = &card->mm_pages[card->Active]; - - while (page->headcnt < page->cnt) { - struct bio *bio = page->bio; - struct mm_dma_desc *desc = &page->desc[page->headcnt]; - int control = le32_to_cpu(desc->sem_control_bits); - int last = 0; - struct bio_vec vec; - - if (!(control & DMASCR_DMA_COMPLETE)) { - control = dma_status; - last = 1; - } - - page->headcnt++; - vec = bio_iter_iovec(bio, page->iter); - bio_advance_iter(bio, &page->iter, vec.bv_len); - - if (!page->iter.bi_size) { - page->bio = bio->bi_next; - if (page->bio) - page->iter = page->bio->bi_iter; - } - - dma_unmap_page(&card->dev->dev, desc->data_dma_handle, - vec.bv_len, - (control & DMASCR_TRANSFER_READ) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (control & DMASCR_HARD_ERROR) { - /* error */ - bio->bi_status = BLK_STS_IOERR; - dev_printk(KERN_WARNING, &card->dev->dev, - "I/O error on sector %d/%d\n", - le32_to_cpu(desc->local_addr)>>9, - le32_to_cpu(desc->transfer_size)); - dump_dmastat(card, control); - } else if (op_is_write(bio_op(bio)) && - le32_to_cpu(desc->local_addr) >> 9 == - card->init_size) { - card->init_size += le32_to_cpu(desc->transfer_size) >> 9; - if (card->init_size >> 1 >= card->mm_size) { - dev_printk(KERN_INFO, &card->dev->dev, - "memory now initialised\n"); - set_userbit(card, MEMORY_INITIALIZED, 1); - } - } - if (bio != page->bio) { - bio->bi_next = return_bio; - return_bio = bio; - } - - if (last) - break; - } - - if (debug & DEBUG_LED_ON_TRANSFER) - set_led(card, LED_REMOVE, LED_OFF); - - if (card->check_batteries) { - card->check_batteries = 0; - check_batteries(card); - } - if (page->headcnt >= page->cnt) { - reset_page(page); - card->Active = -1; - activate(card); - } else { - /* haven't finished with this one yet */ - pr_debug("do some more\n"); - mm_start_io(card); - } - out_unlock: - spin_unlock(&card->lock); - - while (return_bio) { - struct bio *bio = return_bio; - - return_bio = bio->bi_next; - bio->bi_next = NULL; - bio_endio(bio); - } -} - -static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule) -{ - struct cardinfo *card = cb->data; - - spin_lock_irq(&card->lock); - activate(card); - spin_unlock_irq(&card->lock); - kfree(cb); -} - -static int mm_check_plugged(struct cardinfo *card) -{ - return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); -} - -static blk_qc_t mm_submit_bio(struct bio *bio) -{ - struct cardinfo *card = bio->bi_bdev->bd_disk->private_data; - - pr_debug("mm_make_request %llu %u\n", - (unsigned long long)bio->bi_iter.bi_sector, - bio->bi_iter.bi_size); - - blk_queue_split(&bio); - - spin_lock_irq(&card->lock); - *card->biotail = bio; - bio->bi_next = NULL; - card->biotail = &bio->bi_next; - if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card)) - activate(card); - spin_unlock_irq(&card->lock); - - return BLK_QC_T_NONE; -} - -static irqreturn_t mm_interrupt(int irq, void *__card) -{ - struct cardinfo *card = (struct cardinfo *) __card; - unsigned int dma_status; - unsigned short cfg_status; - -HW_TRACE(0x30); - - dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL)); - - if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) { - /* interrupt wasn't for me ... */ - return IRQ_NONE; - } - - /* clear COMPLETION interrupts */ - if (card->flags & UM_FLAG_NO_BYTE_STATUS) - writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE), - card->csr_remap + DMA_STATUS_CTRL); - else - writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, - card->csr_remap + DMA_STATUS_CTRL + 2); - - /* log errors and clear interrupt status */ - if (dma_status & DMASCR_ANY_ERR) { - unsigned int data_log1, data_log2; - unsigned int addr_log1, addr_log2; - unsigned char stat, count, syndrome, check; - - stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS); - - data_log1 = le32_to_cpu(readl(card->csr_remap + - ERROR_DATA_LOG)); - data_log2 = le32_to_cpu(readl(card->csr_remap + - ERROR_DATA_LOG + 4)); - addr_log1 = le32_to_cpu(readl(card->csr_remap + - ERROR_ADDR_LOG)); - addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4); - - count = readb(card->csr_remap + ERROR_COUNT); - syndrome = readb(card->csr_remap + ERROR_SYNDROME); - check = readb(card->csr_remap + ERROR_CHECK); - - dump_dmastat(card, dma_status); - - if (stat & 0x01) - dev_printk(KERN_ERR, &card->dev->dev, - "Memory access error detected (err count %d)\n", - count); - if (stat & 0x02) - dev_printk(KERN_ERR, &card->dev->dev, - "Multi-bit EDC error\n"); - - dev_printk(KERN_ERR, &card->dev->dev, - "Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n", - addr_log2, addr_log1, data_log2, data_log1); - dev_printk(KERN_ERR, &card->dev->dev, - "Fault Check 0x%02x, Fault Syndrome 0x%02x\n", - check, syndrome); - - writeb(0, card->csr_remap + ERROR_COUNT); - } - - if (dma_status & DMASCR_PARITY_ERR_REP) { - dev_printk(KERN_ERR, &card->dev->dev, - "PARITY ERROR REPORTED\n"); - pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); - pci_write_config_word(card->dev, PCI_STATUS, cfg_status); - } - - if (dma_status & DMASCR_PARITY_ERR_DET) { - dev_printk(KERN_ERR, &card->dev->dev, - "PARITY ERROR DETECTED\n"); - pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); - pci_write_config_word(card->dev, PCI_STATUS, cfg_status); - } - - if (dma_status & DMASCR_SYSTEM_ERR_SIG) { - dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n"); - pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); - pci_write_config_word(card->dev, PCI_STATUS, cfg_status); - } - - if (dma_status & DMASCR_TARGET_ABT) { - dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n"); - pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); - pci_write_config_word(card->dev, PCI_STATUS, cfg_status); - } - - if (dma_status & DMASCR_MASTER_ABT) { - dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n"); - pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); - pci_write_config_word(card->dev, PCI_STATUS, cfg_status); - } - - /* and process the DMA descriptors */ - card->dma_status = dma_status; - tasklet_schedule(&card->tasklet); - -HW_TRACE(0x36); - - return IRQ_HANDLED; -} - -/* - * If both batteries are good, no LED - * If either battery has been warned, solid LED - * If both batteries are bad, flash the LED quickly - * If either battery is bad, flash the LED semi quickly - */ -static void set_fault_to_battery_status(struct cardinfo *card) -{ - if (card->battery[0].good && card->battery[1].good) - set_led(card, LED_FAULT, LED_OFF); - else if (card->battery[0].warned || card->battery[1].warned) - set_led(card, LED_FAULT, LED_ON); - else if (!card->battery[0].good && !card->battery[1].good) - set_led(card, LED_FAULT, LED_FLASH_7_0); - else - set_led(card, LED_FAULT, LED_FLASH_3_5); -} - -static void init_battery_timer(void); - -static int check_battery(struct cardinfo *card, int battery, int status) -{ - if (status != card->battery[battery].good) { - card->battery[battery].good = !card->battery[battery].good; - card->battery[battery].last_change = jiffies; - - if (card->battery[battery].good) { - dev_printk(KERN_ERR, &card->dev->dev, - "Battery %d now good\n", battery + 1); - card->battery[battery].warned = 0; - } else - dev_printk(KERN_ERR, &card->dev->dev, - "Battery %d now FAILED\n", battery + 1); - - return 1; - } else if (!card->battery[battery].good && - !card->battery[battery].warned && - time_after_eq(jiffies, card->battery[battery].last_change + - (HZ * 60 * 60 * 5))) { - dev_printk(KERN_ERR, &card->dev->dev, - "Battery %d still FAILED after 5 hours\n", battery + 1); - card->battery[battery].warned = 1; - - return 1; - } - - return 0; -} - -static void check_batteries(struct cardinfo *card) -{ - /* NOTE: this must *never* be called while the card - * is doing (bus-to-card) DMA, or you will need the - * reset switch - */ - unsigned char status; - int ret1, ret2; - - status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); - if (debug & DEBUG_BATTERY_POLLING) - dev_printk(KERN_DEBUG, &card->dev->dev, - "checking battery status, 1 = %s, 2 = %s\n", - (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK", - (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK"); - - ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE)); - ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE)); - - if (ret1 || ret2) - set_fault_to_battery_status(card); -} - -static void check_all_batteries(struct timer_list *unused) -{ - int i; - - for (i = 0; i < num_cards; i++) - if (!(cards[i].flags & UM_FLAG_NO_BATT)) { - struct cardinfo *card = &cards[i]; - spin_lock_bh(&card->lock); - if (card->Active >= 0) - card->check_batteries = 1; - else - check_batteries(card); - spin_unlock_bh(&card->lock); - } - - init_battery_timer(); -} - -static void init_battery_timer(void) -{ - timer_setup(&battery_timer, check_all_batteries, 0); - battery_timer.expires = jiffies + (HZ * 60); - add_timer(&battery_timer); -} - -static void del_battery_timer(void) -{ - del_timer(&battery_timer); -} - -/* - * Note no locks taken out here. In a worst case scenario, we could drop - * a chunk of system memory. But that should never happen, since validation - * happens at open or mount time, when locks are held. - * - * That's crap, since doing that while some partitions are opened - * or mounted will give you really nasty results. - */ -static int mm_revalidate(struct gendisk *disk) -{ - struct cardinfo *card = disk->private_data; - set_capacity(disk, card->mm_size << 1); - return 0; -} - -static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo) -{ - struct cardinfo *card = bdev->bd_disk->private_data; - int size = card->mm_size * (1024 / MM_HARDSECT); - - /* - * get geometry: we have to fake one... trim the size to a - * multiple of 2048 (1M): tell we have 32 sectors, 64 heads, - * whatever cylinders. - */ - geo->heads = 64; - geo->sectors = 32; - geo->cylinders = size / (geo->heads * geo->sectors); - return 0; -} - -static const struct block_device_operations mm_fops = { - .owner = THIS_MODULE, - .submit_bio = mm_submit_bio, - .getgeo = mm_getgeo, - .revalidate_disk = mm_revalidate, -}; - -static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) -{ - int ret; - struct cardinfo *card = &cards[num_cards]; - unsigned char mem_present; - unsigned char batt_status; - unsigned int saved_bar, data; - unsigned long csr_base; - unsigned long csr_len; - int magic_number; - static int printed_version; - - if (!printed_version++) - printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n"); - - ret = pci_enable_device(dev); - if (ret) - return ret; - - pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8); - pci_set_master(dev); - - card->dev = dev; - - csr_base = pci_resource_start(dev, 0); - csr_len = pci_resource_len(dev, 0); - if (!csr_base || !csr_len) - return -ENODEV; - - dev_printk(KERN_INFO, &dev->dev, - "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n"); - - if (dma_set_mask(&dev->dev, DMA_BIT_MASK(64)) && - dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) { - dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n"); - return -ENOMEM; - } - - ret = pci_request_regions(dev, DRIVER_NAME); - if (ret) { - dev_printk(KERN_ERR, &card->dev->dev, - "Unable to request memory region\n"); - goto failed_req_csr; - } - - card->csr_remap = ioremap(csr_base, csr_len); - if (!card->csr_remap) { - dev_printk(KERN_ERR, &card->dev->dev, - "Unable to remap memory region\n"); - ret = -ENOMEM; - - goto failed_remap_csr; - } - - dev_printk(KERN_INFO, &card->dev->dev, - "CSR 0x%08lx -> 0x%p (0x%lx)\n", - csr_base, card->csr_remap, csr_len); - - switch (card->dev->device) { - case 0x5415: - card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG; - magic_number = 0x59; - break; - - case 0x5425: - card->flags |= UM_FLAG_NO_BYTE_STATUS; - magic_number = 0x5C; - break; - - case 0x6155: - card->flags |= UM_FLAG_NO_BYTE_STATUS | - UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT; - magic_number = 0x99; - break; - - default: - magic_number = 0x100; - break; - } - - if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) { - dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n"); - ret = -ENOMEM; - goto failed_magic; - } - - card->mm_pages[0].desc = dma_alloc_coherent(&card->dev->dev, - PAGE_SIZE * 2, &card->mm_pages[0].page_dma, GFP_KERNEL); - card->mm_pages[1].desc = dma_alloc_coherent(&card->dev->dev, - PAGE_SIZE * 2, &card->mm_pages[1].page_dma, GFP_KERNEL); - if (card->mm_pages[0].desc == NULL || - card->mm_pages[1].desc == NULL) { - dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); - ret = -ENOMEM; - goto failed_alloc; - } - reset_page(&card->mm_pages[0]); - reset_page(&card->mm_pages[1]); - card->Ready = 0; /* page 0 is ready */ - card->Active = -1; /* no page is active */ - card->bio = NULL; - card->biotail = &card->bio; - spin_lock_init(&card->lock); - - card->queue = blk_alloc_queue(NUMA_NO_NODE); - if (!card->queue) { - ret = -ENOMEM; - goto failed_alloc; - } - - tasklet_init(&card->tasklet, process_page, (unsigned long)card); - - card->check_batteries = 0; - - mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY); - switch (mem_present) { - case MEM_128_MB: - card->mm_size = 1024 * 128; - break; - case MEM_256_MB: - card->mm_size = 1024 * 256; - break; - case MEM_512_MB: - card->mm_size = 1024 * 512; - break; - case MEM_1_GB: - card->mm_size = 1024 * 1024; - break; - case MEM_2_GB: - card->mm_size = 1024 * 2048; - break; - default: - card->mm_size = 0; - break; - } - - /* Clear the LED's we control */ - set_led(card, LED_REMOVE, LED_OFF); - set_led(card, LED_FAULT, LED_OFF); - - batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); - - card->battery[0].good = !(batt_status & BATTERY_1_FAILURE); - card->battery[1].good = !(batt_status & BATTERY_2_FAILURE); - card->battery[0].last_change = card->battery[1].last_change = jiffies; - - if (card->flags & UM_FLAG_NO_BATT) - dev_printk(KERN_INFO, &card->dev->dev, - "Size %d KB\n", card->mm_size); - else { - dev_printk(KERN_INFO, &card->dev->dev, - "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", - card->mm_size, - batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled", - card->battery[0].good ? "OK" : "FAILURE", - batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled", - card->battery[1].good ? "OK" : "FAILURE"); - - set_fault_to_battery_status(card); - } - - pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar); - data = 0xffffffff; - pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data); - pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data); - pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar); - data &= 0xfffffff0; - data = ~data; - data += 1; - - if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, - card)) { - dev_printk(KERN_ERR, &card->dev->dev, - "Unable to allocate IRQ\n"); - ret = -ENODEV; - goto failed_req_irq; - } - - dev_printk(KERN_INFO, &card->dev->dev, - "Window size %d bytes, IRQ %d\n", data, dev->irq); - - pci_set_drvdata(dev, card); - - if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */ - pci_write_cmd = 0x07; /* then Memory Write command */ - - if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */ - unsigned short cfg_command; - pci_read_config_word(dev, PCI_COMMAND, &cfg_command); - cfg_command |= 0x10; /* Memory Write & Invalidate Enable */ - pci_write_config_word(dev, PCI_COMMAND, cfg_command); - } - pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24); - - num_cards++; - - if (!get_userbit(card, MEMORY_INITIALIZED)) { - dev_printk(KERN_INFO, &card->dev->dev, - "memory NOT initialized. Consider over-writing whole device.\n"); - card->init_size = 0; - } else { - dev_printk(KERN_INFO, &card->dev->dev, - "memory already initialized\n"); - card->init_size = card->mm_size; - } - - /* Enable ECC */ - writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL); - - return 0; - - failed_req_irq: - failed_alloc: - if (card->mm_pages[0].desc) - dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2, - card->mm_pages[0].desc, - card->mm_pages[0].page_dma); - if (card->mm_pages[1].desc) - dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2, - card->mm_pages[1].desc, - card->mm_pages[1].page_dma); - failed_magic: - iounmap(card->csr_remap); - failed_remap_csr: - pci_release_regions(dev); - failed_req_csr: - - return ret; -} - -static void mm_pci_remove(struct pci_dev *dev) -{ - struct cardinfo *card = pci_get_drvdata(dev); - - tasklet_kill(&card->tasklet); - free_irq(dev->irq, card); - iounmap(card->csr_remap); - - if (card->mm_pages[0].desc) - dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2, - card->mm_pages[0].desc, - card->mm_pages[0].page_dma); - if (card->mm_pages[1].desc) - dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2, - card->mm_pages[1].desc, - card->mm_pages[1].page_dma); - blk_cleanup_queue(card->queue); - - pci_release_regions(dev); - pci_disable_device(dev); -} - -static const struct pci_device_id mm_pci_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5415CN)}, - {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5425CN)}, - {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_6155)}, - { - .vendor = 0x8086, - .device = 0xB555, - .subvendor = 0x1332, - .subdevice = 0x5460, - .class = 0x050000, - .class_mask = 0, - }, { /* end: all zeroes */ } -}; - -MODULE_DEVICE_TABLE(pci, mm_pci_ids); - -static struct pci_driver mm_pci_driver = { - .name = DRIVER_NAME, - .id_table = mm_pci_ids, - .probe = mm_pci_probe, - .remove = mm_pci_remove, -}; - -static int __init mm_init(void) -{ - int retval, i; - int err; - - retval = pci_register_driver(&mm_pci_driver); - if (retval) - return -ENOMEM; - - err = major_nr = register_blkdev(0, DRIVER_NAME); - if (err < 0) { - pci_unregister_driver(&mm_pci_driver); - return -EIO; - } - - for (i = 0; i < num_cards; i++) { - mm_gendisk[i] = alloc_disk(1 << MM_SHIFT); - if (!mm_gendisk[i]) - goto out; - } - - for (i = 0; i < num_cards; i++) { - struct gendisk *disk = mm_gendisk[i]; - sprintf(disk->disk_name, "umem%c", 'a'+i); - spin_lock_init(&cards[i].lock); - disk->major = major_nr; - disk->first_minor = i << MM_SHIFT; - disk->fops = &mm_fops; - disk->private_data = &cards[i]; - disk->queue = cards[i].queue; - set_capacity(disk, cards[i].mm_size << 1); - add_disk(disk); - } - - init_battery_timer(); - printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE); -/* printk("mm_init: Done. 10-19-01 9:00\n"); */ - return 0; - -out: - pci_unregister_driver(&mm_pci_driver); - unregister_blkdev(major_nr, DRIVER_NAME); - while (i--) - put_disk(mm_gendisk[i]); - return -ENOMEM; -} - -static void __exit mm_cleanup(void) -{ - int i; - - del_battery_timer(); - - for (i = 0; i < num_cards ; i++) { - del_gendisk(mm_gendisk[i]); - put_disk(mm_gendisk[i]); - } - - pci_unregister_driver(&mm_pci_driver); - - unregister_blkdev(major_nr, DRIVER_NAME); -} - -module_init(mm_init); -module_exit(mm_cleanup); - -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL"); diff --git a/drivers/block/umem.h b/drivers/block/umem.h deleted file mode 100644 index 58384978ff05..000000000000 --- a/drivers/block/umem.h +++ /dev/null @@ -1,132 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -/* - * This file contains defines for the - * Micro Memory MM5415 - * family PCI Memory Module with Battery Backup. - * - * Copyright Micro Memory INC 2001. All rights reserved. - */ - -#ifndef _DRIVERS_BLOCK_MM_H -#define _DRIVERS_BLOCK_MM_H - - -#define IRQ_TIMEOUT (1 * HZ) - -/* CSR register definition */ -#define MEMCTRLSTATUS_MAGIC 0x00 -#define MM_MAGIC_VALUE (unsigned char)0x59 - -#define MEMCTRLSTATUS_BATTERY 0x04 -#define BATTERY_1_DISABLED 0x01 -#define BATTERY_1_FAILURE 0x02 -#define BATTERY_2_DISABLED 0x04 -#define BATTERY_2_FAILURE 0x08 - -#define MEMCTRLSTATUS_MEMORY 0x07 -#define MEM_128_MB 0xfe -#define MEM_256_MB 0xfc -#define MEM_512_MB 0xf8 -#define MEM_1_GB 0xf0 -#define MEM_2_GB 0xe0 - -#define MEMCTRLCMD_LEDCTRL 0x08 -#define LED_REMOVE 2 -#define LED_FAULT 4 -#define LED_POWER 6 -#define LED_FLIP 255 -#define LED_OFF 0x00 -#define LED_ON 0x01 -#define LED_FLASH_3_5 0x02 -#define LED_FLASH_7_0 0x03 -#define LED_POWER_ON 0x00 -#define LED_POWER_OFF 0x01 -#define USER_BIT1 0x01 -#define USER_BIT2 0x02 - -#define MEMORY_INITIALIZED USER_BIT1 - -#define MEMCTRLCMD_ERRCTRL 0x0C -#define EDC_NONE_DEFAULT 0x00 -#define EDC_NONE 0x01 -#define EDC_STORE_READ 0x02 -#define EDC_STORE_CORRECT 0x03 - -#define MEMCTRLCMD_ERRCNT 0x0D -#define MEMCTRLCMD_ERRSTATUS 0x0E - -#define ERROR_DATA_LOG 0x20 -#define ERROR_ADDR_LOG 0x28 -#define ERROR_COUNT 0x3D -#define ERROR_SYNDROME 0x3E -#define ERROR_CHECK 0x3F - -#define DMA_PCI_ADDR 0x40 -#define DMA_LOCAL_ADDR 0x48 -#define DMA_TRANSFER_SIZE 0x50 -#define DMA_DESCRIPTOR_ADDR 0x58 -#define DMA_SEMAPHORE_ADDR 0x60 -#define DMA_STATUS_CTRL 0x68 -#define DMASCR_GO 0x00001 -#define DMASCR_TRANSFER_READ 0x00002 -#define DMASCR_CHAIN_EN 0x00004 -#define DMASCR_SEM_EN 0x00010 -#define DMASCR_DMA_COMP_EN 0x00020 -#define DMASCR_CHAIN_COMP_EN 0x00040 -#define DMASCR_ERR_INT_EN 0x00080 -#define DMASCR_PARITY_INT_EN 0x00100 -#define DMASCR_ANY_ERR 0x00800 -#define DMASCR_MBE_ERR 0x01000 -#define DMASCR_PARITY_ERR_REP 0x02000 -#define DMASCR_PARITY_ERR_DET 0x04000 -#define DMASCR_SYSTEM_ERR_SIG 0x08000 -#define DMASCR_TARGET_ABT 0x10000 -#define DMASCR_MASTER_ABT 0x20000 -#define DMASCR_DMA_COMPLETE 0x40000 -#define DMASCR_CHAIN_COMPLETE 0x80000 - -/* -3.SOME PCs HAVE HOST BRIDGES WHICH APPARENTLY DO NOT CORRECTLY HANDLE -READ-LINE (0xE) OR READ-MULTIPLE (0xC) PCI COMMAND CODES DURING DMA -TRANSFERS. IN OTHER SYSTEMS THESE COMMAND CODES WILL CAUSE THE HOST BRIDGE -TO ALLOW LONGER BURSTS DURING DMA READ OPERATIONS. THE UPPER FOUR BITS -(31..28) OF THE DMA CSR HAVE BEEN MADE PROGRAMMABLE, SO THAT EITHER A 0x6, -AN 0xE OR A 0xC CAN BE WRITTEN TO THEM TO SET THE COMMAND CODE USED DURING -DMA READ OPERATIONS. -*/ -#define DMASCR_READ 0x60000000 -#define DMASCR_READLINE 0xE0000000 -#define DMASCR_READMULTI 0xC0000000 - - -#define DMASCR_ERROR_MASK (DMASCR_MASTER_ABT | DMASCR_TARGET_ABT | DMASCR_SYSTEM_ERR_SIG | DMASCR_PARITY_ERR_DET | DMASCR_MBE_ERR | DMASCR_ANY_ERR) -#define DMASCR_HARD_ERROR (DMASCR_MASTER_ABT | DMASCR_TARGET_ABT | DMASCR_SYSTEM_ERR_SIG | DMASCR_PARITY_ERR_DET | DMASCR_MBE_ERR) - -#define WINDOWMAP_WINNUM 0x7B - -#define DMA_READ_FROM_HOST 0 -#define DMA_WRITE_TO_HOST 1 - -struct mm_dma_desc { - __le64 pci_addr; - __le64 local_addr; - __le32 transfer_size; - u32 zero1; - __le64 next_desc_addr; - __le64 sem_addr; - __le32 control_bits; - u32 zero2; - - dma_addr_t data_dma_handle; - - /* Copy of the bits */ - __le64 sem_control_bits; -} __attribute__((aligned(8))); - -/* bits for card->flags */ -#define UM_FLAG_DMA_IN_REGS 1 -#define UM_FLAG_NO_BYTE_STATUS 2 -#define UM_FLAG_NO_BATTREG 4 -#define UM_FLAG_NO_BATT 8 -#endif diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index b0c71d3a81a0..bda5c815e441 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -313,6 +313,7 @@ struct xen_blkif { struct work_struct free_work; unsigned int nr_ring_pages; + bool multi_ref; /* All rings for this device. */ struct xen_blkif_ring *rings; unsigned int nr_rings; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index c2aaf690352c..125b22205d38 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) for (i = 0; i < nr_grefs; i++) { char ring_ref_name[RINGREF_NAME_LEN]; - snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); + if (blkif->multi_ref) + snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); + else { + WARN_ON(i != 0); + snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref"); + } + err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, "%u", &ring_ref[i]); if (err != 1) { - if (nr_grefs == 1) - break; - err = -EINVAL; xenbus_dev_fatal(dev, err, "reading %s/%s", dir, ring_ref_name); @@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) } } - if (err != 1) { - WARN_ON(nr_grefs != 1); - - err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", - &ring_ref[0]); - if (err != 1) { - err = -EINVAL; - xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); - return err; - } - } - err = -ENOMEM; for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); @@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be) blkif->nr_rings, blkif->blk_protocol, protocol, blkif->vbd.feature_gnt_persistent ? "persistent grants" : ""); - ring_page_order = xenbus_read_unsigned(dev->otherend, - "ring-page-order", 0); - - if (ring_page_order > xen_blkif_max_ring_order) { + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", + &ring_page_order); + if (err != 1) { + blkif->nr_ring_pages = 1; + blkif->multi_ref = false; + } else if (ring_page_order <= xen_blkif_max_ring_order) { + blkif->nr_ring_pages = 1 << ring_page_order; + blkif->multi_ref = true; + } else { err = -EINVAL; xenbus_dev_fatal(dev, err, "requested ring page order %d exceed max:%d", @@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be) return err; } - blkif->nr_ring_pages = 1 << ring_page_order; - if (blkif->nr_rings == 1) return read_per_ring_refs(&blkif->rings[0], dev->otherend); else { diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e1c6798889f4..10df39a8b18d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1949,7 +1949,7 @@ module_param(feature_persistent, bool, 0644); MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature"); -/** +/* * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to @@ -2075,7 +2075,7 @@ static int blkif_recover(struct blkfront_info *info) return 0; } -/** +/* * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the @@ -2397,7 +2397,7 @@ static void blkfront_connect(struct blkfront_info *info) } /* - * physcial-sector-size is a newer field, so old backends may not + * physical-sector-size is a newer field, so old backends may not * provide this. Assume physical sector size to be the same as * sector_size in that case. */ @@ -2440,7 +2440,7 @@ fail: return; } -/** +/* * Callback received when the backend's state changes. */ static void blkback_changed(struct xenbus_device *dev, diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c deleted file mode 100644 index eb8ef65778c3..000000000000 --- a/drivers/block/xsysace.c +++ /dev/null @@ -1,1273 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Xilinx SystemACE device driver - * - * Copyright 2007 Secret Lab Technologies Ltd. - */ - -/* - * The SystemACE chip is designed to configure FPGAs by loading an FPGA - * bitstream from a file on a CF card and squirting it into FPGAs connected - * to the SystemACE JTAG chain. It also has the advantage of providing an - * MPU interface which can be used to control the FPGA configuration process - * and to use the attached CF card for general purpose storage. - * - * This driver is a block device driver for the SystemACE. - * - * Initialization: - * The driver registers itself as a platform_device driver at module - * load time. The platform bus will take care of calling the - * ace_probe() method for all SystemACE instances in the system. Any - * number of SystemACE instances are supported. ace_probe() calls - * ace_setup() which initialized all data structures, reads the CF - * id structure and registers the device. - * - * Processing: - * Just about all of the heavy lifting in this driver is performed by - * a Finite State Machine (FSM). The driver needs to wait on a number - * of events; some raised by interrupts, some which need to be polled - * for. Describing all of the behaviour in a FSM seems to be the - * easiest way to keep the complexity low and make it easy to - * understand what the driver is doing. If the block ops or the - * request function need to interact with the hardware, then they - * simply need to flag the request and kick of FSM processing. - * - * The FSM itself is atomic-safe code which can be run from any - * context. The general process flow is: - * 1. obtain the ace->lock spinlock. - * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is - * cleared. - * 3. release the lock. - * - * Individual states do not sleep in any way. If a condition needs to - * be waited for then the state much clear the fsm_continue flag and - * either schedule the FSM to be run again at a later time, or expect - * an interrupt to call the FSM when the desired condition is met. - * - * In normal operation, the FSM is processed at interrupt context - * either when the driver's tasklet is scheduled, or when an irq is - * raised by the hardware. The tasklet can be scheduled at any time. - * The request method in particular schedules the tasklet when a new - * request has been indicated by the block layer. Once started, the - * FSM proceeds as far as it can processing the request until it - * needs on a hardware event. At this point, it must yield execution. - * - * A state has two options when yielding execution: - * 1. ace_fsm_yield() - * - Call if need to poll for event. - * - clears the fsm_continue flag to exit the processing loop - * - reschedules the tasklet to run again as soon as possible - * 2. ace_fsm_yieldirq() - * - Call if an irq is expected from the HW - * - clears the fsm_continue flag to exit the processing loop - * - does not reschedule the tasklet so the FSM will not be processed - * again until an irq is received. - * After calling a yield function, the state must return control back - * to the FSM main loop. - * - * Additionally, the driver maintains a kernel timer which can process - * the FSM. If the FSM gets stalled, typically due to a missed - * interrupt, then the kernel timer will expire and the driver can - * continue where it left off. - * - * To Do: - * - Add FPGA configuration control interface. - * - Request major number from lanana - */ - -#undef DEBUG - -#include <linux/module.h> -#include <linux/ctype.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/blk-mq.h> -#include <linux/mutex.h> -#include <linux/ata.h> -#include <linux/hdreg.h> -#include <linux/platform_device.h> -#if defined(CONFIG_OF) -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#endif - -MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); -MODULE_DESCRIPTION("Xilinx SystemACE device driver"); -MODULE_LICENSE("GPL"); - -/* SystemACE register definitions */ -#define ACE_BUSMODE (0x00) - -#define ACE_STATUS (0x04) -#define ACE_STATUS_CFGLOCK (0x00000001) -#define ACE_STATUS_MPULOCK (0x00000002) -#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */ -#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */ -#define ACE_STATUS_CFDETECT (0x00000010) -#define ACE_STATUS_DATABUFRDY (0x00000020) -#define ACE_STATUS_DATABUFMODE (0x00000040) -#define ACE_STATUS_CFGDONE (0x00000080) -#define ACE_STATUS_RDYFORCFCMD (0x00000100) -#define ACE_STATUS_CFGMODEPIN (0x00000200) -#define ACE_STATUS_CFGADDR_MASK (0x0000e000) -#define ACE_STATUS_CFBSY (0x00020000) -#define ACE_STATUS_CFRDY (0x00040000) -#define ACE_STATUS_CFDWF (0x00080000) -#define ACE_STATUS_CFDSC (0x00100000) -#define ACE_STATUS_CFDRQ (0x00200000) -#define ACE_STATUS_CFCORR (0x00400000) -#define ACE_STATUS_CFERR (0x00800000) - -#define ACE_ERROR (0x08) -#define ACE_CFGLBA (0x0c) -#define ACE_MPULBA (0x10) - -#define ACE_SECCNTCMD (0x14) -#define ACE_SECCNTCMD_RESET (0x0100) -#define ACE_SECCNTCMD_IDENTIFY (0x0200) -#define ACE_SECCNTCMD_READ_DATA (0x0300) -#define ACE_SECCNTCMD_WRITE_DATA (0x0400) -#define ACE_SECCNTCMD_ABORT (0x0600) - -#define ACE_VERSION (0x16) -#define ACE_VERSION_REVISION_MASK (0x00FF) -#define ACE_VERSION_MINOR_MASK (0x0F00) -#define ACE_VERSION_MAJOR_MASK (0xF000) - -#define ACE_CTRL (0x18) -#define ACE_CTRL_FORCELOCKREQ (0x0001) -#define ACE_CTRL_LOCKREQ (0x0002) -#define ACE_CTRL_FORCECFGADDR (0x0004) -#define ACE_CTRL_FORCECFGMODE (0x0008) -#define ACE_CTRL_CFGMODE (0x0010) -#define ACE_CTRL_CFGSTART (0x0020) -#define ACE_CTRL_CFGSEL (0x0040) -#define ACE_CTRL_CFGRESET (0x0080) -#define ACE_CTRL_DATABUFRDYIRQ (0x0100) -#define ACE_CTRL_ERRORIRQ (0x0200) -#define ACE_CTRL_CFGDONEIRQ (0x0400) -#define ACE_CTRL_RESETIRQ (0x0800) -#define ACE_CTRL_CFGPROG (0x1000) -#define ACE_CTRL_CFGADDR_MASK (0xe000) - -#define ACE_FATSTAT (0x1c) - -#define ACE_NUM_MINORS 16 -#define ACE_SECTOR_SIZE (512) -#define ACE_FIFO_SIZE (32) -#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE) - -#define ACE_BUS_WIDTH_8 0 -#define ACE_BUS_WIDTH_16 1 - -struct ace_reg_ops; - -struct ace_device { - /* driver state data */ - int id; - int media_change; - int users; - struct list_head list; - - /* finite state machine data */ - struct tasklet_struct fsm_tasklet; - uint fsm_task; /* Current activity (ACE_TASK_*) */ - uint fsm_state; /* Current state (ACE_FSM_STATE_*) */ - uint fsm_continue_flag; /* cleared to exit FSM mainloop */ - uint fsm_iter_num; - struct timer_list stall_timer; - - /* Transfer state/result, use for both id and block request */ - struct request *req; /* request being processed */ - void *data_ptr; /* pointer to I/O buffer */ - int data_count; /* number of buffers remaining */ - int data_result; /* Result of transfer; 0 := success */ - - int id_req_count; /* count of id requests */ - int id_result; - struct completion id_completion; /* used when id req finishes */ - int in_irq; - - /* Details of hardware device */ - resource_size_t physaddr; - void __iomem *baseaddr; - int irq; - int bus_width; /* 0 := 8 bit; 1 := 16 bit */ - struct ace_reg_ops *reg_ops; - int lock_count; - - /* Block device data structures */ - spinlock_t lock; - struct device *dev; - struct request_queue *queue; - struct gendisk *gd; - struct blk_mq_tag_set tag_set; - struct list_head rq_list; - - /* Inserted CF card parameters */ - u16 cf_id[ATA_ID_WORDS]; -}; - -static DEFINE_MUTEX(xsysace_mutex); -static int ace_major; - -/* --------------------------------------------------------------------- - * Low level register access - */ - -struct ace_reg_ops { - u16(*in) (struct ace_device * ace, int reg); - void (*out) (struct ace_device * ace, int reg, u16 val); - void (*datain) (struct ace_device * ace); - void (*dataout) (struct ace_device * ace); -}; - -/* 8 Bit bus width */ -static u16 ace_in_8(struct ace_device *ace, int reg) -{ - void __iomem *r = ace->baseaddr + reg; - return in_8(r) | (in_8(r + 1) << 8); -} - -static void ace_out_8(struct ace_device *ace, int reg, u16 val) -{ - void __iomem *r = ace->baseaddr + reg; - out_8(r, val); - out_8(r + 1, val >> 8); -} - -static void ace_datain_8(struct ace_device *ace) -{ - void __iomem *r = ace->baseaddr + 0x40; - u8 *dst = ace->data_ptr; - int i = ACE_FIFO_SIZE; - while (i--) - *dst++ = in_8(r++); - ace->data_ptr = dst; -} - -static void ace_dataout_8(struct ace_device *ace) -{ - void __iomem *r = ace->baseaddr + 0x40; - u8 *src = ace->data_ptr; - int i = ACE_FIFO_SIZE; - while (i--) - out_8(r++, *src++); - ace->data_ptr = src; -} - -static struct ace_reg_ops ace_reg_8_ops = { - .in = ace_in_8, - .out = ace_out_8, - .datain = ace_datain_8, - .dataout = ace_dataout_8, -}; - -/* 16 bit big endian bus attachment */ -static u16 ace_in_be16(struct ace_device *ace, int reg) -{ - return in_be16(ace->baseaddr + reg); -} - -static void ace_out_be16(struct ace_device *ace, int reg, u16 val) -{ - out_be16(ace->baseaddr + reg, val); -} - -static void ace_datain_be16(struct ace_device *ace) -{ - int i = ACE_FIFO_SIZE / 2; - u16 *dst = ace->data_ptr; - while (i--) - *dst++ = in_le16(ace->baseaddr + 0x40); - ace->data_ptr = dst; -} - -static void ace_dataout_be16(struct ace_device *ace) -{ - int i = ACE_FIFO_SIZE / 2; - u16 *src = ace->data_ptr; - while (i--) - out_le16(ace->baseaddr + 0x40, *src++); - ace->data_ptr = src; -} - -/* 16 bit little endian bus attachment */ -static u16 ace_in_le16(struct ace_device *ace, int reg) -{ - return in_le16(ace->baseaddr + reg); -} - -static void ace_out_le16(struct ace_device *ace, int reg, u16 val) -{ - out_le16(ace->baseaddr + reg, val); -} - -static void ace_datain_le16(struct ace_device *ace) -{ - int i = ACE_FIFO_SIZE / 2; - u16 *dst = ace->data_ptr; - while (i--) - *dst++ = in_be16(ace->baseaddr + 0x40); - ace->data_ptr = dst; -} - -static void ace_dataout_le16(struct ace_device *ace) -{ - int i = ACE_FIFO_SIZE / 2; - u16 *src = ace->data_ptr; - while (i--) - out_be16(ace->baseaddr + 0x40, *src++); - ace->data_ptr = src; -} - -static struct ace_reg_ops ace_reg_be16_ops = { - .in = ace_in_be16, - .out = ace_out_be16, - .datain = ace_datain_be16, - .dataout = ace_dataout_be16, -}; - -static struct ace_reg_ops ace_reg_le16_ops = { - .in = ace_in_le16, - .out = ace_out_le16, - .datain = ace_datain_le16, - .dataout = ace_dataout_le16, -}; - -static inline u16 ace_in(struct ace_device *ace, int reg) -{ - return ace->reg_ops->in(ace, reg); -} - -static inline u32 ace_in32(struct ace_device *ace, int reg) -{ - return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16); -} - -static inline void ace_out(struct ace_device *ace, int reg, u16 val) -{ - ace->reg_ops->out(ace, reg, val); -} - -static inline void ace_out32(struct ace_device *ace, int reg, u32 val) -{ - ace_out(ace, reg, val); - ace_out(ace, reg + 2, val >> 16); -} - -/* --------------------------------------------------------------------- - * Debug support functions - */ - -#if defined(DEBUG) -static void ace_dump_mem(void *base, int len) -{ - const char *ptr = base; - int i, j; - - for (i = 0; i < len; i += 16) { - printk(KERN_INFO "%.8x:", i); - for (j = 0; j < 16; j++) { - if (!(j % 4)) - printk(" "); - printk("%.2x", ptr[i + j]); - } - printk(" "); - for (j = 0; j < 16; j++) - printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.'); - printk("\n"); - } -} -#else -static inline void ace_dump_mem(void *base, int len) -{ -} -#endif - -static void ace_dump_regs(struct ace_device *ace) -{ - dev_info(ace->dev, - " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" - " status:%.8x mpu_lba:%.8x busmode:%4x\n" - " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", - ace_in32(ace, ACE_CTRL), - ace_in(ace, ACE_SECCNTCMD), - ace_in(ace, ACE_VERSION), - ace_in32(ace, ACE_STATUS), - ace_in32(ace, ACE_MPULBA), - ace_in(ace, ACE_BUSMODE), - ace_in32(ace, ACE_ERROR), - ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT)); -} - -static void ace_fix_driveid(u16 *id) -{ -#if defined(__BIG_ENDIAN) - int i; - - /* All half words have wrong byte order; swap the bytes */ - for (i = 0; i < ATA_ID_WORDS; i++, id++) - *id = le16_to_cpu(*id); -#endif -} - -/* --------------------------------------------------------------------- - * Finite State Machine (FSM) implementation - */ - -/* FSM tasks; used to direct state transitions */ -#define ACE_TASK_IDLE 0 -#define ACE_TASK_IDENTIFY 1 -#define ACE_TASK_READ 2 -#define ACE_TASK_WRITE 3 -#define ACE_FSM_NUM_TASKS 4 - -/* FSM state definitions */ -#define ACE_FSM_STATE_IDLE 0 -#define ACE_FSM_STATE_REQ_LOCK 1 -#define ACE_FSM_STATE_WAIT_LOCK 2 -#define ACE_FSM_STATE_WAIT_CFREADY 3 -#define ACE_FSM_STATE_IDENTIFY_PREPARE 4 -#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5 -#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6 -#define ACE_FSM_STATE_REQ_PREPARE 7 -#define ACE_FSM_STATE_REQ_TRANSFER 8 -#define ACE_FSM_STATE_REQ_COMPLETE 9 -#define ACE_FSM_STATE_ERROR 10 -#define ACE_FSM_NUM_STATES 11 - -/* Set flag to exit FSM loop and reschedule tasklet */ -static inline void ace_fsm_yieldpoll(struct ace_device *ace) -{ - tasklet_schedule(&ace->fsm_tasklet); - ace->fsm_continue_flag = 0; -} - -static inline void ace_fsm_yield(struct ace_device *ace) -{ - dev_dbg(ace->dev, "%s()\n", __func__); - ace_fsm_yieldpoll(ace); -} - -/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ -static inline void ace_fsm_yieldirq(struct ace_device *ace) -{ - dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); - - if (ace->irq > 0) - ace->fsm_continue_flag = 0; - else - ace_fsm_yieldpoll(ace); -} - -static bool ace_has_next_request(struct request_queue *q) -{ - struct ace_device *ace = q->queuedata; - - return !list_empty(&ace->rq_list); -} - -/* Get the next read/write request; ending requests that we don't handle */ -static struct request *ace_get_next_request(struct request_queue *q) -{ - struct ace_device *ace = q->queuedata; - struct request *rq; - - rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist); - if (rq) { - list_del_init(&rq->queuelist); - blk_mq_start_request(rq); - } - - return NULL; -} - -static void ace_fsm_dostate(struct ace_device *ace) -{ - struct request *req; - u32 status; - u16 val; - int count; - -#if defined(DEBUG) - dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n", - ace->fsm_state, ace->id_req_count); -#endif - - /* Verify that there is actually a CF in the slot. If not, then - * bail out back to the idle state and wake up all the waiters */ - status = ace_in32(ace, ACE_STATUS); - if ((status & ACE_STATUS_CFDETECT) == 0) { - ace->fsm_state = ACE_FSM_STATE_IDLE; - ace->media_change = 1; - set_capacity(ace->gd, 0); - dev_info(ace->dev, "No CF in slot\n"); - - /* Drop all in-flight and pending requests */ - if (ace->req) { - blk_mq_end_request(ace->req, BLK_STS_IOERR); - ace->req = NULL; - } - while ((req = ace_get_next_request(ace->queue)) != NULL) - blk_mq_end_request(req, BLK_STS_IOERR); - - /* Drop back to IDLE state and notify waiters */ - ace->fsm_state = ACE_FSM_STATE_IDLE; - ace->id_result = -EIO; - while (ace->id_req_count) { - complete(&ace->id_completion); - ace->id_req_count--; - } - } - - switch (ace->fsm_state) { - case ACE_FSM_STATE_IDLE: - /* See if there is anything to do */ - if (ace->id_req_count || ace_has_next_request(ace->queue)) { - ace->fsm_iter_num++; - ace->fsm_state = ACE_FSM_STATE_REQ_LOCK; - mod_timer(&ace->stall_timer, jiffies + HZ); - if (!timer_pending(&ace->stall_timer)) - add_timer(&ace->stall_timer); - break; - } - del_timer(&ace->stall_timer); - ace->fsm_continue_flag = 0; - break; - - case ACE_FSM_STATE_REQ_LOCK: - if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { - /* Already have the lock, jump to next state */ - ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; - break; - } - - /* Request the lock */ - val = ace_in(ace, ACE_CTRL); - ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ); - ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK; - break; - - case ACE_FSM_STATE_WAIT_LOCK: - if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { - /* got the lock; move to next state */ - ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; - break; - } - - /* wait a bit for the lock */ - ace_fsm_yield(ace); - break; - - case ACE_FSM_STATE_WAIT_CFREADY: - status = ace_in32(ace, ACE_STATUS); - if (!(status & ACE_STATUS_RDYFORCFCMD) || - (status & ACE_STATUS_CFBSY)) { - /* CF card isn't ready; it needs to be polled */ - ace_fsm_yield(ace); - break; - } - - /* Device is ready for command; determine what to do next */ - if (ace->id_req_count) - ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE; - else - ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE; - break; - - case ACE_FSM_STATE_IDENTIFY_PREPARE: - /* Send identify command */ - ace->fsm_task = ACE_TASK_IDENTIFY; - ace->data_ptr = ace->cf_id; - ace->data_count = ACE_BUF_PER_SECTOR; - ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); - - /* As per datasheet, put config controller in reset */ - val = ace_in(ace, ACE_CTRL); - ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); - - /* irq handler takes over from this point; wait for the - * transfer to complete */ - ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER; - ace_fsm_yieldirq(ace); - break; - - case ACE_FSM_STATE_IDENTIFY_TRANSFER: - /* Check that the sysace is ready to receive data */ - status = ace_in32(ace, ACE_STATUS); - if (status & ACE_STATUS_CFBSY) { - dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n", - ace->fsm_task, ace->fsm_iter_num, - ace->data_count); - ace_fsm_yield(ace); - break; - } - if (!(status & ACE_STATUS_DATABUFRDY)) { - ace_fsm_yield(ace); - break; - } - - /* Transfer the next buffer */ - ace->reg_ops->datain(ace); - ace->data_count--; - - /* If there are still buffers to be transfers; jump out here */ - if (ace->data_count != 0) { - ace_fsm_yieldirq(ace); - break; - } - - /* transfer finished; kick state machine */ - dev_dbg(ace->dev, "identify finished\n"); - ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE; - break; - - case ACE_FSM_STATE_IDENTIFY_COMPLETE: - ace_fix_driveid(ace->cf_id); - ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */ - - if (ace->data_result) { - /* Error occurred, disable the disk */ - ace->media_change = 1; - set_capacity(ace->gd, 0); - dev_err(ace->dev, "error fetching CF id (%i)\n", - ace->data_result); - } else { - ace->media_change = 0; - - /* Record disk parameters */ - set_capacity(ace->gd, - ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); - dev_info(ace->dev, "capacity: %i sectors\n", - ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); - } - - /* We're done, drop to IDLE state and notify waiters */ - ace->fsm_state = ACE_FSM_STATE_IDLE; - ace->id_result = ace->data_result; - while (ace->id_req_count) { - complete(&ace->id_completion); - ace->id_req_count--; - } - break; - - case ACE_FSM_STATE_REQ_PREPARE: - req = ace_get_next_request(ace->queue); - if (!req) { - ace->fsm_state = ACE_FSM_STATE_IDLE; - break; - } - - /* Okay, it's a data request, set it up for transfer */ - dev_dbg(ace->dev, - "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", - (unsigned long long)blk_rq_pos(req), - blk_rq_sectors(req), blk_rq_cur_sectors(req), - rq_data_dir(req)); - - ace->req = req; - ace->data_ptr = bio_data(req->bio); - ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; - ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); - - count = blk_rq_sectors(req); - if (rq_data_dir(req)) { - /* Kick off write request */ - dev_dbg(ace->dev, "write data\n"); - ace->fsm_task = ACE_TASK_WRITE; - ace_out(ace, ACE_SECCNTCMD, - count | ACE_SECCNTCMD_WRITE_DATA); - } else { - /* Kick off read request */ - dev_dbg(ace->dev, "read data\n"); - ace->fsm_task = ACE_TASK_READ; - ace_out(ace, ACE_SECCNTCMD, - count | ACE_SECCNTCMD_READ_DATA); - } - - /* As per datasheet, put config controller in reset */ - val = ace_in(ace, ACE_CTRL); - ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); - - /* Move to the transfer state. The systemace will raise - * an interrupt once there is something to do - */ - ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER; - if (ace->fsm_task == ACE_TASK_READ) - ace_fsm_yieldirq(ace); /* wait for data ready */ - break; - - case ACE_FSM_STATE_REQ_TRANSFER: - /* Check that the sysace is ready to receive data */ - status = ace_in32(ace, ACE_STATUS); - if (status & ACE_STATUS_CFBSY) { - dev_dbg(ace->dev, - "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", - ace->fsm_task, ace->fsm_iter_num, - blk_rq_cur_sectors(ace->req) * 16, - ace->data_count, ace->in_irq); - ace_fsm_yield(ace); /* need to poll CFBSY bit */ - break; - } - if (!(status & ACE_STATUS_DATABUFRDY)) { - dev_dbg(ace->dev, - "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", - ace->fsm_task, ace->fsm_iter_num, - blk_rq_cur_sectors(ace->req) * 16, - ace->data_count, ace->in_irq); - ace_fsm_yieldirq(ace); - break; - } - - /* Transfer the next buffer */ - if (ace->fsm_task == ACE_TASK_WRITE) - ace->reg_ops->dataout(ace); - else - ace->reg_ops->datain(ace); - ace->data_count--; - - /* If there are still buffers to be transfers; jump out here */ - if (ace->data_count != 0) { - ace_fsm_yieldirq(ace); - break; - } - - /* bio finished; is there another one? */ - if (blk_update_request(ace->req, BLK_STS_OK, - blk_rq_cur_bytes(ace->req))) { - /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", - * blk_rq_sectors(ace->req), - * blk_rq_cur_sectors(ace->req)); - */ - ace->data_ptr = bio_data(ace->req->bio); - ace->data_count = blk_rq_cur_sectors(ace->req) * 16; - ace_fsm_yieldirq(ace); - break; - } - - ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE; - break; - - case ACE_FSM_STATE_REQ_COMPLETE: - ace->req = NULL; - - /* Finished request; go to idle state */ - ace->fsm_state = ACE_FSM_STATE_IDLE; - break; - - default: - ace->fsm_state = ACE_FSM_STATE_IDLE; - break; - } -} - -static void ace_fsm_tasklet(unsigned long data) -{ - struct ace_device *ace = (void *)data; - unsigned long flags; - - spin_lock_irqsave(&ace->lock, flags); - - /* Loop over state machine until told to stop */ - ace->fsm_continue_flag = 1; - while (ace->fsm_continue_flag) - ace_fsm_dostate(ace); - - spin_unlock_irqrestore(&ace->lock, flags); -} - -static void ace_stall_timer(struct timer_list *t) -{ - struct ace_device *ace = from_timer(ace, t, stall_timer); - unsigned long flags; - - dev_warn(ace->dev, - "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n", - ace->fsm_state, ace->fsm_task, ace->fsm_iter_num, - ace->data_count); - spin_lock_irqsave(&ace->lock, flags); - - /* Rearm the stall timer *before* entering FSM (which may then - * delete the timer) */ - mod_timer(&ace->stall_timer, jiffies + HZ); - - /* Loop over state machine until told to stop */ - ace->fsm_continue_flag = 1; - while (ace->fsm_continue_flag) - ace_fsm_dostate(ace); - - spin_unlock_irqrestore(&ace->lock, flags); -} - -/* --------------------------------------------------------------------- - * Interrupt handling routines - */ -static int ace_interrupt_checkstate(struct ace_device *ace) -{ - u32 sreg = ace_in32(ace, ACE_STATUS); - u16 creg = ace_in(ace, ACE_CTRL); - - /* Check for error occurrence */ - if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) && - (creg & ACE_CTRL_ERRORIRQ)) { - dev_err(ace->dev, "transfer failure\n"); - ace_dump_regs(ace); - return -EIO; - } - - return 0; -} - -static irqreturn_t ace_interrupt(int irq, void *dev_id) -{ - u16 creg; - struct ace_device *ace = dev_id; - - /* be safe and get the lock */ - spin_lock(&ace->lock); - ace->in_irq = 1; - - /* clear the interrupt */ - creg = ace_in(ace, ACE_CTRL); - ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ); - ace_out(ace, ACE_CTRL, creg); - - /* check for IO failures */ - if (ace_interrupt_checkstate(ace)) - ace->data_result = -EIO; - - if (ace->fsm_task == 0) { - dev_err(ace->dev, - "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n", - ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL), - ace_in(ace, ACE_SECCNTCMD)); - dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n", - ace->fsm_task, ace->fsm_state, ace->data_count); - } - - /* Loop over state machine until told to stop */ - ace->fsm_continue_flag = 1; - while (ace->fsm_continue_flag) - ace_fsm_dostate(ace); - - /* done with interrupt; drop the lock */ - ace->in_irq = 0; - spin_unlock(&ace->lock); - - return IRQ_HANDLED; -} - -/* --------------------------------------------------------------------- - * Block ops - */ -static blk_status_t ace_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) -{ - struct ace_device *ace = hctx->queue->queuedata; - struct request *req = bd->rq; - - if (blk_rq_is_passthrough(req)) { - blk_mq_start_request(req); - return BLK_STS_IOERR; - } - - spin_lock_irq(&ace->lock); - list_add_tail(&req->queuelist, &ace->rq_list); - spin_unlock_irq(&ace->lock); - - tasklet_schedule(&ace->fsm_tasklet); - return BLK_STS_OK; -} - -static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing) -{ - struct ace_device *ace = gd->private_data; - dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change); - - return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0; -} - -static void ace_media_changed(struct ace_device *ace) -{ - unsigned long flags; - - dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); - - spin_lock_irqsave(&ace->lock, flags); - ace->id_req_count++; - spin_unlock_irqrestore(&ace->lock, flags); - - tasklet_schedule(&ace->fsm_tasklet); - wait_for_completion(&ace->id_completion); - - dev_dbg(ace->dev, "revalidate complete\n"); -} - -static int ace_open(struct block_device *bdev, fmode_t mode) -{ - struct ace_device *ace = bdev->bd_disk->private_data; - unsigned long flags; - - dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); - - mutex_lock(&xsysace_mutex); - spin_lock_irqsave(&ace->lock, flags); - ace->users++; - spin_unlock_irqrestore(&ace->lock, flags); - - if (bdev_check_media_change(bdev) && ace->media_change) - ace_media_changed(ace); - mutex_unlock(&xsysace_mutex); - - return 0; -} - -static void ace_release(struct gendisk *disk, fmode_t mode) -{ - struct ace_device *ace = disk->private_data; - unsigned long flags; - u16 val; - - dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); - - mutex_lock(&xsysace_mutex); - spin_lock_irqsave(&ace->lock, flags); - ace->users--; - if (ace->users == 0) { - val = ace_in(ace, ACE_CTRL); - ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); - } - spin_unlock_irqrestore(&ace->lock, flags); - mutex_unlock(&xsysace_mutex); -} - -static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) -{ - struct ace_device *ace = bdev->bd_disk->private_data; - u16 *cf_id = ace->cf_id; - - dev_dbg(ace->dev, "ace_getgeo()\n"); - - geo->heads = cf_id[ATA_ID_HEADS]; - geo->sectors = cf_id[ATA_ID_SECTORS]; - geo->cylinders = cf_id[ATA_ID_CYLS]; - - return 0; -} - -static const struct block_device_operations ace_fops = { - .owner = THIS_MODULE, - .open = ace_open, - .release = ace_release, - .check_events = ace_check_events, - .getgeo = ace_getgeo, -}; - -static const struct blk_mq_ops ace_mq_ops = { - .queue_rq = ace_queue_rq, -}; - -/* -------------------------------------------------------------------- - * SystemACE device setup/teardown code - */ -static int ace_setup(struct ace_device *ace) -{ - u16 version; - u16 val; - int rc; - - dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); - dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n", - (unsigned long long)ace->physaddr, ace->irq); - - spin_lock_init(&ace->lock); - init_completion(&ace->id_completion); - INIT_LIST_HEAD(&ace->rq_list); - - /* - * Map the device - */ - ace->baseaddr = ioremap(ace->physaddr, 0x80); - if (!ace->baseaddr) - goto err_ioremap; - - /* - * Initialize the state machine tasklet and stall timer - */ - tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace); - timer_setup(&ace->stall_timer, ace_stall_timer, 0); - - /* - * Initialize the request queue - */ - ace->queue = blk_mq_init_sq_queue(&ace->tag_set, &ace_mq_ops, 2, - BLK_MQ_F_SHOULD_MERGE); - if (IS_ERR(ace->queue)) { - rc = PTR_ERR(ace->queue); - ace->queue = NULL; - goto err_blk_initq; - } - ace->queue->queuedata = ace; - - blk_queue_logical_block_size(ace->queue, 512); - blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH); - - /* - * Allocate and initialize GD structure - */ - ace->gd = alloc_disk(ACE_NUM_MINORS); - if (!ace->gd) - goto err_alloc_disk; - - ace->gd->major = ace_major; - ace->gd->first_minor = ace->id * ACE_NUM_MINORS; - ace->gd->fops = &ace_fops; - ace->gd->events = DISK_EVENT_MEDIA_CHANGE; - ace->gd->queue = ace->queue; - ace->gd->private_data = ace; - snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); - - /* set bus width */ - if (ace->bus_width == ACE_BUS_WIDTH_16) { - /* 0x0101 should work regardless of endianess */ - ace_out_le16(ace, ACE_BUSMODE, 0x0101); - - /* read it back to determine endianess */ - if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001) - ace->reg_ops = &ace_reg_le16_ops; - else - ace->reg_ops = &ace_reg_be16_ops; - } else { - ace_out_8(ace, ACE_BUSMODE, 0x00); - ace->reg_ops = &ace_reg_8_ops; - } - - /* Make sure version register is sane */ - version = ace_in(ace, ACE_VERSION); - if ((version == 0) || (version == 0xFFFF)) - goto err_read; - - /* Put sysace in a sane state by clearing most control reg bits */ - ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | - ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); - - /* Now we can hook up the irq handler */ - if (ace->irq > 0) { - rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); - if (rc) { - /* Failure - fall back to polled mode */ - dev_err(ace->dev, "request_irq failed\n"); - ace->irq = rc; - } - } - - /* Enable interrupts */ - val = ace_in(ace, ACE_CTRL); - val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; - ace_out(ace, ACE_CTRL, val); - - /* Print the identification */ - dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", - (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); - dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n", - (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); - - ace->media_change = 1; - ace_media_changed(ace); - - /* Make the sysace device 'live' */ - add_disk(ace->gd); - - return 0; - -err_read: - /* prevent double queue cleanup */ - ace->gd->queue = NULL; - put_disk(ace->gd); -err_alloc_disk: - blk_cleanup_queue(ace->queue); - blk_mq_free_tag_set(&ace->tag_set); -err_blk_initq: - iounmap(ace->baseaddr); -err_ioremap: - dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n", - (unsigned long long) ace->physaddr); - return -ENOMEM; -} - -static void ace_teardown(struct ace_device *ace) -{ - if (ace->gd) { - del_gendisk(ace->gd); - put_disk(ace->gd); - } - - if (ace->queue) { - blk_cleanup_queue(ace->queue); - blk_mq_free_tag_set(&ace->tag_set); - } - - tasklet_kill(&ace->fsm_tasklet); - - if (ace->irq > 0) - free_irq(ace->irq, ace); - - iounmap(ace->baseaddr); -} - -static int ace_alloc(struct device *dev, int id, resource_size_t physaddr, - int irq, int bus_width) -{ - struct ace_device *ace; - int rc; - dev_dbg(dev, "ace_alloc(%p)\n", dev); - - /* Allocate and initialize the ace device structure */ - ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); - if (!ace) { - rc = -ENOMEM; - goto err_alloc; - } - - ace->dev = dev; - ace->id = id; - ace->physaddr = physaddr; - ace->irq = irq; - ace->bus_width = bus_width; - - /* Call the setup code */ - rc = ace_setup(ace); - if (rc) - goto err_setup; - - dev_set_drvdata(dev, ace); - return 0; - -err_setup: - dev_set_drvdata(dev, NULL); - kfree(ace); -err_alloc: - dev_err(dev, "could not initialize device, err=%i\n", rc); - return rc; -} - -static void ace_free(struct device *dev) -{ - struct ace_device *ace = dev_get_drvdata(dev); - dev_dbg(dev, "ace_free(%p)\n", dev); - - if (ace) { - ace_teardown(ace); - dev_set_drvdata(dev, NULL); - kfree(ace); - } -} - -/* --------------------------------------------------------------------- - * Platform Bus Support - */ - -static int ace_probe(struct platform_device *dev) -{ - int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ - resource_size_t physaddr; - struct resource *res; - u32 id = dev->id; - int irq; - int i; - - dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); - - /* device id and bus width */ - if (of_property_read_u32(dev->dev.of_node, "port-number", &id)) - id = 0; - if (of_find_property(dev->dev.of_node, "8-bit", NULL)) - bus_width = ACE_BUS_WIDTH_8; - - res = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - - physaddr = res->start; - if (!physaddr) - return -ENODEV; - - irq = platform_get_irq_optional(dev, 0); - - /* Call the bus-independent setup code */ - return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); -} - -/* - * Platform bus remove() method - */ -static int ace_remove(struct platform_device *dev) -{ - ace_free(&dev->dev); - return 0; -} - -#if defined(CONFIG_OF) -/* Match table for of_platform binding */ -static const struct of_device_id ace_of_match[] = { - { .compatible = "xlnx,opb-sysace-1.00.b", }, - { .compatible = "xlnx,opb-sysace-1.00.c", }, - { .compatible = "xlnx,xps-sysace-1.00.a", }, - { .compatible = "xlnx,sysace", }, - {}, -}; -MODULE_DEVICE_TABLE(of, ace_of_match); -#else /* CONFIG_OF */ -#define ace_of_match NULL -#endif /* CONFIG_OF */ - -static struct platform_driver ace_platform_driver = { - .probe = ace_probe, - .remove = ace_remove, - .driver = { - .name = "xsysace", - .of_match_table = ace_of_match, - }, -}; - -/* --------------------------------------------------------------------- - * Module init/exit routines - */ -static int __init ace_init(void) -{ - int rc; - - ace_major = register_blkdev(ace_major, "xsysace"); - if (ace_major <= 0) { - rc = -ENOMEM; - goto err_blk; - } - - rc = platform_driver_register(&ace_platform_driver); - if (rc) - goto err_plat; - - pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major); - return 0; - -err_plat: - unregister_blkdev(ace_major, "xsysace"); -err_blk: - printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc); - return rc; -} -module_init(ace_init); - -static void __exit ace_exit(void) -{ - pr_debug("Unregistering Xilinx SystemACE driver\n"); - platform_driver_unregister(&ace_platform_driver); - unregister_blkdev(ace_major, "xsysace"); -} -module_exit(ace_exit); |