diff options
Diffstat (limited to 'drivers')
585 files changed, 20474 insertions, 9837 deletions
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index 15e4604efba7..1f4128487dd4 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -265,7 +265,7 @@ static int acpi_aml_write_kern(const char *buf, int len) char *p; ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; /* sync tail before inserting logs */ smp_mb(); @@ -286,7 +286,7 @@ static int acpi_aml_readb_kern(void) char *p; ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; /* sync head before removing cmds */ smp_rmb(); @@ -330,7 +330,7 @@ again: goto again; break; } - if (IS_ERR_VALUE(ret)) + if (ret < 0) break; size += ret; count -= ret; @@ -373,7 +373,7 @@ again: if (ret == 0) goto again; } - if (IS_ERR_VALUE(ret)) + if (ret < 0) break; *(msg + size) = (char)ret; size++; @@ -526,7 +526,7 @@ static int acpi_aml_open(struct inode *inode, struct file *file) } acpi_aml_io.users++; err_lock: - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { if (acpi_aml_active_reader == file) acpi_aml_active_reader = NULL; } @@ -587,7 +587,7 @@ static int acpi_aml_read_user(char __user *buf, int len) char *p; ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; /* sync head before removing logs */ smp_rmb(); @@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len) crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); ret = n; out: - acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret)); + acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret); return ret; } @@ -634,7 +634,7 @@ again: goto again; } } - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { if (!acpi_aml_running()) ret = 0; break; @@ -657,7 +657,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) char *p; ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; /* sync tail before inserting cmds */ smp_mb(); @@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); ret = n; out: - acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret)); + acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret); return n; } @@ -704,7 +704,7 @@ again: goto again; } } - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { if (!acpi_aml_running()) ret = 0; break; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 0d92d0f915e9..c7ba948d253c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->throttling.duty_width = acpi_gbl_FADT.duty_width; pr->pblk = object.processor.pblk_address; - - /* - * We don't care about error returns - we just try to mark - * these reserved so that nobody else is confused into thinking - * that this region might be unused.. - * - * (In particular, allocating the IO range for Cardbus) - */ - request_region(pr->throttling.address, 6, "ACPI CPU throttle"); } /* diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 3d5b8a099351..c1d138e128cb 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, } int acpi_video_get_levels(struct acpi_device *device, - struct acpi_video_device_brightness **dev_br) + struct acpi_video_device_brightness **dev_br, + int *pmax_level) { union acpi_object *obj = NULL; int i, max_level = 0, count = 0, level_ac_battery = 0; @@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device, br->count = count; *dev_br = br; + if (pmax_level) + *pmax_level = max_level; out: kfree(obj); @@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) struct acpi_video_device_brightness *br = NULL; int result = -EINVAL; - result = acpi_video_get_levels(device->dev, &br); + result = acpi_video_get_levels(device->dev, &br, &max_level); if (result) return result; device->brightness = br; @@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video) mutex_lock(&video->device_list_lock); list_for_each_entry(dev, &video->video_device_list, entry) { - if (!acpi_video_device_lcd_query_levels(dev, &levels)) + if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels)) kfree(levels); } mutex_unlock(&video->device_list_lock); diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 0f18dbc9a37f..daceb80022b0 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value, static u8 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) { - u64 address; - if (!reg->access_width) { + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + max_bit_width = 32; + } + /* * Detect old register descriptors where only the bit_width field - * makes senses. The target address is copied to handle possible - * alignment issues. + * makes senses. */ - ACPI_MOVE_64_TO_64(&address, ®->address); - if (!reg->bit_offset && reg->bit_width && + if (reg->bit_width < max_bit_width && + !reg->bit_offset && reg->bit_width && ACPI_IS_POWER_OF_TWO(reg->bit_width) && - ACPI_IS_ALIGNED(reg->bit_width, 8) && - ACPI_IS_ALIGNED(address, reg->bit_width)) { + ACPI_IS_ALIGNED(reg->bit_width, 8)) { return (reg->bit_width); - } else { - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { - return (32); - } else { - return (max_bit_width); - } } + return (max_bit_width); } else { return (1 << (reg->access_width + 2)); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b719ab3090bb..ab234791a0ba 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -1316,7 +1316,7 @@ static int __init acpi_battery_init(void) static void __exit acpi_battery_exit(void) { - async_synchronize_cookie(async_cookie); + async_synchronize_cookie(async_cookie + 1); acpi_bus_unregister_driver(&acpi_battery_driver); #ifdef CONFIG_ACPI_PROCFS_POWER acpi_unlock_battery_dir(acpi_battery_dir); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index cd2c3d6d40e0..993fd31394c8 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -319,6 +319,7 @@ int acpi_device_fix_up_power(struct acpi_device *device) return ret; } +EXPORT_SYMBOL_GPL(acpi_device_fix_up_power); int acpi_device_update_power(struct acpi_device *device, int *state_p) { diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index f170d746336d..c72e64893d03 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) if (!pr->flags.throttling) return -ENODEV; + /* + * We don't care about error returns - we just try to mark + * these reserved so that nobody else is confused into thinking + * that this region might be unused.. + * + * (In particular, allocating the IO range for Cardbus) + */ + request_region(pr->throttling.address, 6, "ACPI CPU throttle"); + pr->throttling.state = 0; duty_mask = pr->throttling.state_count - 1; diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 8638d575b2b9..aafb8cc03523 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -197,7 +197,7 @@ static void highbank_set_em_messages(struct device *dev, for (i = 0; i < SGPIO_PINS; i++) { err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); - if (IS_ERR_VALUE(err)) + if (err < 0) return; pdata->sgpio_gpio[i] = err; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index a969a7e443be..85aaf2222587 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -181,13 +181,17 @@ static char *res_strings[] = { "reserved 27", "reserved 28", "reserved 29", - "reserved 30", + "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */ "reassembly abort: no buffers", "receive buffer overflow", "change in GFC", "receive buffer full", "low priority discard - no receive descriptor", "low priority discard - missing end of packet", + "reserved 37", + "reserved 38", + "reserved 39", + "reseverd 40", "reserved 41", "reserved 42", "reserved 43", diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 7d00f2994738..809dd1e02091 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev) /* make the ptr point to the corresponding buffer desc entry */ buf_desc_ptr += desc; if (!desc || (desc > iadev->num_rx_desc) || - ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { + ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) { free_desc(dev, desc); IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) return -1; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c81667d4bb60..e44944f4be77 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1267,14 +1267,15 @@ int dpm_suspend_late(pm_message_t state) error = device_suspend_late(dev); mutex_lock(&dpm_list_mtx); + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_late_early_list); + if (error) { pm_dev_err(dev, state, " late", error); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; } - if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_late_early_list); put_device(dev); if (async_error) diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c index 04d706ca5f43..35b13a08ca3e 100644 --- a/drivers/bcma/driver_chipcommon_sflash.c +++ b/drivers/bcma/driver_chipcommon_sflash.c @@ -146,7 +146,6 @@ int bcma_sflash_init(struct bcma_drv_cc *cc) return -ENOTSUPP; } - sflash->window = BCMA_SOC_FLASH2; sflash->blocksize = e->blocksize; sflash->numblocks = e->numblocks; sflash->size = sflash->blocksize * sflash->numblocks; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 51a071e32221..c04bd9bc39fd 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, #ifdef CONFIG_BLK_DEV_RAM_DAX static long brd_direct_access(struct block_device *bdev, sector_t sector, - void __pmem **kaddr, pfn_t *pfn) + void __pmem **kaddr, pfn_t *pfn, long size) { struct brd_device *brd = bdev->bd_disk->private_data; struct page *page; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0ede6d7e2568..81666a56415e 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -350,12 +350,12 @@ struct rbd_device { struct rbd_spec *spec; struct rbd_options *opts; - char *header_name; + struct ceph_object_id header_oid; + struct ceph_object_locator header_oloc; struct ceph_file_layout layout; - struct ceph_osd_event *watch_event; - struct rbd_obj_request *watch_request; + struct ceph_osd_linger_request *watch_handle; struct rbd_spec *parent_spec; u64 parent_overlap; @@ -1596,12 +1596,6 @@ static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) return __rbd_obj_request_wait(obj_request, 0); } -static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request, - unsigned long timeout) -{ - return __rbd_obj_request_wait(obj_request, timeout); -} - static void rbd_img_request_complete(struct rbd_img_request *img_request) { @@ -1751,12 +1745,6 @@ static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) complete_all(&obj_request->completion); } -static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request) -{ - dout("%s: obj %p\n", __func__, obj_request); - obj_request_done_set(obj_request); -} - static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request = NULL; @@ -1828,13 +1816,12 @@ static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) obj_request_done_set(obj_request); } -static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, - struct ceph_msg *msg) +static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) { struct rbd_obj_request *obj_request = osd_req->r_priv; u16 opcode; - dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg); + dout("%s: osd_req %p\n", __func__, osd_req); rbd_assert(osd_req == obj_request->osd_req); if (obj_request_img_data_test(obj_request)) { rbd_assert(obj_request->img_request); @@ -1878,10 +1865,6 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, case CEPH_OSD_OP_CALL: rbd_osd_call_callback(obj_request); break; - case CEPH_OSD_OP_NOTIFY_ACK: - case CEPH_OSD_OP_WATCH: - rbd_osd_trivial_callback(obj_request); - break; default: rbd_warn(NULL, "%s: unsupported op %hu", obj_request->object_name, (unsigned short) opcode); @@ -1896,27 +1879,17 @@ static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request = obj_request->img_request; struct ceph_osd_request *osd_req = obj_request->osd_req; - u64 snap_id; - rbd_assert(osd_req != NULL); - - snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP; - ceph_osdc_build_request(osd_req, obj_request->offset, - NULL, snap_id, NULL); + if (img_request) + osd_req->r_snapid = img_request->snap_id; } static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) { - struct rbd_img_request *img_request = obj_request->img_request; struct ceph_osd_request *osd_req = obj_request->osd_req; - struct ceph_snap_context *snapc; - struct timespec mtime = CURRENT_TIME; - rbd_assert(osd_req != NULL); - - snapc = img_request ? img_request->snapc : NULL; - ceph_osdc_build_request(osd_req, obj_request->offset, - snapc, CEPH_NOSNAP, &mtime); + osd_req->r_mtime = CURRENT_TIME; + osd_req->r_data_offset = obj_request->offset; } /* @@ -1954,7 +1927,7 @@ static struct ceph_osd_request *rbd_osd_req_create( osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); if (!osd_req) - return NULL; /* ENOMEM */ + goto fail; if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; @@ -1965,9 +1938,18 @@ static struct ceph_osd_request *rbd_osd_req_create( osd_req->r_priv = obj_request; osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout); - ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name); + if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", + obj_request->object_name)) + goto fail; + + if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO)) + goto fail; return osd_req; + +fail: + ceph_osdc_put_request(osd_req); + return NULL; } /* @@ -2003,16 +1985,25 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops, false, GFP_NOIO); if (!osd_req) - return NULL; /* ENOMEM */ + goto fail; osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; osd_req->r_callback = rbd_osd_req_callback; osd_req->r_priv = obj_request; osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout); - ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name); + if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s", + obj_request->object_name)) + goto fail; + + if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO)) + goto fail; return osd_req; + +fail: + ceph_osdc_put_request(osd_req); + return NULL; } @@ -2973,17 +2964,20 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) { struct rbd_obj_request *obj_request; struct rbd_obj_request *next_obj_request; + int ret = 0; dout("%s: img %p\n", __func__, img_request); - for_each_obj_request_safe(img_request, obj_request, next_obj_request) { - int ret; + rbd_img_request_get(img_request); + for_each_obj_request_safe(img_request, obj_request, next_obj_request) { ret = rbd_img_obj_request_submit(obj_request); if (ret) - return ret; + goto out_put_ireq; } - return 0; +out_put_ireq: + rbd_img_request_put(img_request); + return ret; } static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) @@ -3090,45 +3084,18 @@ out_err: obj_request_done_set(obj_request); } -static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id) -{ - struct rbd_obj_request *obj_request; - struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - int ret; - - obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0, - OBJ_REQUEST_NODATA); - if (!obj_request) - return -ENOMEM; - - ret = -ENOMEM; - obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, - obj_request); - if (!obj_request->osd_req) - goto out; - - osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, - notify_id, 0, 0); - rbd_osd_req_format_read(obj_request); +static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev); +static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev); - ret = rbd_obj_request_submit(osdc, obj_request); - if (ret) - goto out; - ret = rbd_obj_request_wait(obj_request); -out: - rbd_obj_request_put(obj_request); - - return ret; -} - -static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) +static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie, + u64 notifier_id, void *data, size_t data_len) { - struct rbd_device *rbd_dev = (struct rbd_device *)data; + struct rbd_device *rbd_dev = arg; + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; int ret; - dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, - rbd_dev->header_name, (unsigned long long)notify_id, - (unsigned int)opcode); + dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev, + cookie, notify_id); /* * Until adequate refresh error handling is in place, there is @@ -3140,63 +3107,31 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) if (ret) rbd_warn(rbd_dev, "refresh failed: %d", ret); - ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id); + ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid, + &rbd_dev->header_oloc, notify_id, cookie, + NULL, 0); if (ret) rbd_warn(rbd_dev, "notify_ack ret %d", ret); } -/* - * Send a (un)watch request and wait for the ack. Return a request - * with a ref held on success or error. - */ -static struct rbd_obj_request *rbd_obj_watch_request_helper( - struct rbd_device *rbd_dev, - bool watch) +static void rbd_watch_errcb(void *arg, u64 cookie, int err) { - struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - struct ceph_options *opts = osdc->client->options; - struct rbd_obj_request *obj_request; + struct rbd_device *rbd_dev = arg; int ret; - obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0, - OBJ_REQUEST_NODATA); - if (!obj_request) - return ERR_PTR(-ENOMEM); - - obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1, - obj_request); - if (!obj_request->osd_req) { - ret = -ENOMEM; - goto out; - } - - osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, 0, watch); - rbd_osd_req_format_write(obj_request); + rbd_warn(rbd_dev, "encountered watch error: %d", err); - if (watch) - ceph_osdc_set_request_linger(osdc, obj_request->osd_req); - - ret = rbd_obj_request_submit(osdc, obj_request); - if (ret) - goto out; + __rbd_dev_header_unwatch_sync(rbd_dev); - ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout); - if (ret) - goto out; - - ret = obj_request->result; + ret = rbd_dev_header_watch_sync(rbd_dev); if (ret) { - if (watch) - rbd_obj_request_end(obj_request); - goto out; + rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); + return; } - return obj_request; - -out: - rbd_obj_request_put(obj_request); - return ERR_PTR(ret); + ret = rbd_dev_refresh(rbd_dev); + if (ret) + rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret); } /* @@ -3205,35 +3140,33 @@ out: static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - struct rbd_obj_request *obj_request; - int ret; + struct ceph_osd_linger_request *handle; - rbd_assert(!rbd_dev->watch_event); - rbd_assert(!rbd_dev->watch_request); + rbd_assert(!rbd_dev->watch_handle); - ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev, - &rbd_dev->watch_event); - if (ret < 0) - return ret; + handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid, + &rbd_dev->header_oloc, rbd_watch_cb, + rbd_watch_errcb, rbd_dev); + if (IS_ERR(handle)) + return PTR_ERR(handle); - obj_request = rbd_obj_watch_request_helper(rbd_dev, true); - if (IS_ERR(obj_request)) { - ceph_osdc_cancel_event(rbd_dev->watch_event); - rbd_dev->watch_event = NULL; - return PTR_ERR(obj_request); - } + rbd_dev->watch_handle = handle; + return 0; +} - /* - * A watch request is set to linger, so the underlying osd - * request won't go away until we unregister it. We retain - * a pointer to the object request during that time (in - * rbd_dev->watch_request), so we'll keep a reference to it. - * We'll drop that reference after we've unregistered it in - * rbd_dev_header_unwatch_sync(). - */ - rbd_dev->watch_request = obj_request; +static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev) +{ + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + int ret; - return 0; + if (!rbd_dev->watch_handle) + return; + + ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle); + if (ret) + rbd_warn(rbd_dev, "failed to unwatch: %d", ret); + + rbd_dev->watch_handle = NULL; } /* @@ -3241,24 +3174,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev) */ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev) { - struct rbd_obj_request *obj_request; - - rbd_assert(rbd_dev->watch_event); - rbd_assert(rbd_dev->watch_request); - - rbd_obj_request_end(rbd_dev->watch_request); - rbd_obj_request_put(rbd_dev->watch_request); - rbd_dev->watch_request = NULL; - - obj_request = rbd_obj_watch_request_helper(rbd_dev, false); - if (!IS_ERR(obj_request)) - rbd_obj_request_put(obj_request); - else - rbd_warn(rbd_dev, "unable to tear down watch request (%ld)", - PTR_ERR(obj_request)); - - ceph_osdc_cancel_event(rbd_dev->watch_event); - rbd_dev->watch_event = NULL; + __rbd_dev_header_unwatch_sync(rbd_dev); dout("%s flushing notifies\n", __func__); ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); @@ -3591,7 +3507,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) if (!ondisk) return -ENOMEM; - ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name, 0, size, ondisk); if (ret < 0) goto out; @@ -4033,6 +3949,8 @@ static void rbd_dev_release(struct device *dev) struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); bool need_put = !!rbd_dev->opts; + ceph_oid_destroy(&rbd_dev->header_oid); + rbd_put_client(rbd_dev->rbd_client); rbd_spec_put(rbd_dev->spec); kfree(rbd_dev->opts); @@ -4063,6 +3981,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, INIT_LIST_HEAD(&rbd_dev->node); init_rwsem(&rbd_dev->header_rwsem); + ceph_oid_init(&rbd_dev->header_oid); + ceph_oloc_init(&rbd_dev->header_oloc); + rbd_dev->dev.bus = &rbd_bus_type; rbd_dev->dev.type = &rbd_device_type; rbd_dev->dev.parent = &rbd_root_dev; @@ -4111,7 +4032,7 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, __le64 size; } __attribute__ ((packed)) size_buf = { 0 }; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, "rbd", "get_size", &snapid, sizeof (snapid), &size_buf, sizeof (size_buf)); @@ -4151,7 +4072,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) if (!reply_buf) return -ENOMEM; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, "rbd", "get_object_prefix", NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); @@ -4186,7 +4107,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, u64 unsup; int ret; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, "rbd", "get_features", &snapid, sizeof (snapid), &features_buf, sizeof (features_buf)); @@ -4248,7 +4169,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) } snapid = cpu_to_le64(rbd_dev->spec->snap_id); - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, "rbd", "get_parent", &snapid, sizeof (snapid), reply_buf, size); @@ -4351,7 +4272,7 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) u64 stripe_count; int ret; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, "rbd", "get_stripe_unit_count", NULL, 0, (char *)&striping_info_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); @@ -4599,7 +4520,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) if (!reply_buf) return -ENOMEM; - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, "rbd", "get_snapcontext", NULL, 0, reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); @@ -4664,7 +4585,7 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, return ERR_PTR(-ENOMEM); snapid = cpu_to_le64(snap_id); - ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name, "rbd", "get_snapshot_name", &snapid, sizeof (snapid), reply_buf, size); @@ -4975,13 +4896,13 @@ static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name) again: ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name); if (ret == -ENOENT && tries++ < 1) { - ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap", - &newest_epoch); + ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap", + &newest_epoch); if (ret < 0) return ret; if (rbdc->client->osdc.osdmap->epoch < newest_epoch) { - ceph_monc_request_next_osdmap(&rbdc->client->monc); + ceph_osdc_maybe_request_map(&rbdc->client->osdc); (void) ceph_monc_wait_osdmap(&rbdc->client->monc, newest_epoch, opts->mount_timeout); @@ -5260,35 +5181,26 @@ err_out_unlock: static int rbd_dev_header_name(struct rbd_device *rbd_dev) { struct rbd_spec *spec = rbd_dev->spec; - size_t size; + int ret; /* Record the header object name for this rbd image. */ rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout); if (rbd_dev->image_format == 1) - size = strlen(spec->image_name) + sizeof (RBD_SUFFIX); + ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", + spec->image_name, RBD_SUFFIX); else - size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id); - - rbd_dev->header_name = kmalloc(size, GFP_KERNEL); - if (!rbd_dev->header_name) - return -ENOMEM; + ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", + RBD_HEADER_PREFIX, spec->image_id); - if (rbd_dev->image_format == 1) - sprintf(rbd_dev->header_name, "%s%s", - spec->image_name, RBD_SUFFIX); - else - sprintf(rbd_dev->header_name, "%s%s", - RBD_HEADER_PREFIX, spec->image_id); - return 0; + return ret; } static void rbd_dev_image_release(struct rbd_device *rbd_dev) { rbd_dev_unprobe(rbd_dev); - kfree(rbd_dev->header_name); - rbd_dev->header_name = NULL; rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; @@ -5327,7 +5239,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) pr_info("image %s/%s does not exist\n", rbd_dev->spec->pool_name, rbd_dev->spec->image_name); - goto out_header_name; + goto err_out_format; } } @@ -5373,7 +5285,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) goto err_out_probe; dout("discovered format %u image, header name is %s\n", - rbd_dev->image_format, rbd_dev->header_name); + rbd_dev->image_format, rbd_dev->header_oid.name); return 0; err_out_probe: @@ -5381,9 +5293,6 @@ err_out_probe: err_out_watch: if (!depth) rbd_dev_header_unwatch_sync(rbd_dev); -out_header_name: - kfree(rbd_dev->header_name); - rbd_dev->header_name = NULL; err_out_format: rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c index 883045814dac..1630a1f085f7 100644 --- a/drivers/clk/clk-pwm.c +++ b/drivers/clk/clk-pwm.c @@ -59,6 +59,7 @@ static int clk_pwm_probe(struct platform_device *pdev) struct clk_init_data init; struct clk_pwm *clk_pwm; struct pwm_device *pwm; + struct pwm_args pargs; const char *clk_name; struct clk *clk; int ret; @@ -71,22 +72,28 @@ static int clk_pwm_probe(struct platform_device *pdev) if (IS_ERR(pwm)) return PTR_ERR(pwm); - if (!pwm->period) { + pwm_get_args(pwm, &pargs); + if (!pargs.period) { dev_err(&pdev->dev, "invalid PWM period\n"); return -EINVAL; } if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate)) - clk_pwm->fixed_rate = NSEC_PER_SEC / pwm->period; + clk_pwm->fixed_rate = NSEC_PER_SEC / pargs.period; - if (pwm->period != NSEC_PER_SEC / clk_pwm->fixed_rate && - pwm->period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) { + if (pargs.period != NSEC_PER_SEC / clk_pwm->fixed_rate && + pargs.period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) { dev_err(&pdev->dev, "clock-frequency does not match PWM period\n"); return -EINVAL; } - ret = pwm_config(pwm, (pwm->period + 1) >> 1, pwm->period); + /* + * FIXME: pwm_apply_args() should be removed when switching to the + * atomic PWM API. + */ + pwm_apply_args(pwm); + ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period); if (ret < 0) return ret; diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index b8551813ec43..456cf586d2c2 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -1221,7 +1221,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw, p = rate >= params->vco_min ? 1 : -EINVAL; } - if (IS_ERR_VALUE(p)) + if (p < 0) return -EINVAL; cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 035513b012ee..9009295f5134 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -78,9 +78,14 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static unsigned int __cpufreq_get(struct cpufreq_policy *policy); static int cpufreq_start_governor(struct cpufreq_policy *policy); -static inline int cpufreq_exit_governor(struct cpufreq_policy *policy) +static inline void cpufreq_exit_governor(struct cpufreq_policy *policy) { - return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + (void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); +} + +static inline void cpufreq_stop_governor(struct cpufreq_policy *policy) +{ + (void)cpufreq_governor(policy, CPUFREQ_GOV_STOP); } /** @@ -1026,13 +1031,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp return 0; down_write(&policy->rwsem); - if (has_target()) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); - if (ret) { - pr_err("%s: Failed to stop governor\n", __func__); - goto unlock; - } - } + if (has_target()) + cpufreq_stop_governor(policy); cpumask_set_cpu(cpu, policy->cpus); @@ -1041,8 +1041,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp if (ret) pr_err("%s: Failed to start governor\n", __func__); } - -unlock: up_write(&policy->rwsem); return ret; } @@ -1354,11 +1352,8 @@ static void cpufreq_offline(unsigned int cpu) } down_write(&policy->rwsem); - if (has_target()) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); - if (ret) - pr_err("%s: Failed to stop governor\n", __func__); - } + if (has_target()) + cpufreq_stop_governor(policy); cpumask_clear_cpu(cpu, policy->cpus); @@ -1387,12 +1382,8 @@ static void cpufreq_offline(unsigned int cpu) if (cpufreq_driver->stop_cpu) cpufreq_driver->stop_cpu(policy); - /* If cpu is last user of policy, free policy */ - if (has_target()) { - ret = cpufreq_exit_governor(policy); - if (ret) - pr_err("%s: Failed to exit governor\n", __func__); - } + if (has_target()) + cpufreq_exit_governor(policy); /* * Perform the ->exit() even during light-weight tear-down, @@ -1626,7 +1617,6 @@ EXPORT_SYMBOL(cpufreq_generic_suspend); void cpufreq_suspend(void) { struct cpufreq_policy *policy; - int ret; if (!cpufreq_driver) return; @@ -1639,14 +1629,8 @@ void cpufreq_suspend(void) for_each_active_policy(policy) { if (has_target()) { down_write(&policy->rwsem); - ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); + cpufreq_stop_governor(policy); up_write(&policy->rwsem); - - if (ret) { - pr_err("%s: Failed to stop governor for policy: %p\n", - __func__, policy); - continue; - } } if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) @@ -1848,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { - clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, policy->min, policy->max); return cpufreq_driver->fast_switch(policy, target_freq); } @@ -2049,16 +2033,15 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) ret = policy->governor->governor(policy, event); - if (!ret) { - if (event == CPUFREQ_GOV_POLICY_INIT) + if (event == CPUFREQ_GOV_POLICY_INIT) { + if (ret) + module_put(policy->governor->owner); + else policy->governor->initialized++; - else if (event == CPUFREQ_GOV_POLICY_EXIT) - policy->governor->initialized--; - } - - if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || - ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret)) + } else if (event == CPUFREQ_GOV_POLICY_EXIT) { + policy->governor->initialized--; module_put(policy->governor->owner); + } return ret; } @@ -2221,20 +2204,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, old_gov = policy->governor; /* end old governor */ if (old_gov) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); - if (ret) { - /* This can happen due to race with other operations */ - pr_debug("%s: Failed to Stop Governor: %s (%d)\n", - __func__, old_gov->name, ret); - return ret; - } - - ret = cpufreq_exit_governor(policy); - if (ret) { - pr_err("%s: Failed to Exit Governor: %s (%d)\n", - __func__, old_gov->name, ret); - return ret; - } + cpufreq_stop_governor(policy); + cpufreq_exit_governor(policy); } /* start new governor */ @@ -2495,10 +2466,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) register_hotcpu_notifier(&cpufreq_cpu_notifier); pr_debug("driver %s up and running\n", driver_data->name); - -out: - put_online_cpus(); - return ret; + goto out; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); @@ -2508,7 +2476,9 @@ err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - goto out; +out: + put_online_cpus(); + return ret; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b76a98dd9988..0d159b513469 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) cpu->acpi_perf_data.states[0].core_frequency = policy->cpuinfo.max_freq / 1000; cpu->valid_pss_table = true; - pr_info("_PPC limits will be enforced\n"); + pr_debug("_PPC limits will be enforced\n"); return; @@ -1461,12 +1461,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) intel_pstate_clear_update_util_hook(policy->cpu); cpu = all_cpu_data[0]; - if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) { - if (policy->max < policy->cpuinfo.max_freq && - policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { - pr_debug("policy->max > max non turbo frequency\n"); - policy->max = policy->cpuinfo.max_freq; - } + if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && + policy->max < policy->cpuinfo.max_freq && + policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { + pr_debug("policy->max > max non turbo frequency\n"); + policy->max = policy->cpuinfo.max_freq; } if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c index 6f602c7a71bd..643f43179df1 100644 --- a/drivers/cpufreq/mt8173-cpufreq.c +++ b/drivers/cpufreq/mt8173-cpufreq.c @@ -307,17 +307,24 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, return 0; } +#define DYNAMIC_POWER "dynamic-power-coefficient" + static void mtk_cpufreq_ready(struct cpufreq_policy *policy) { struct mtk_cpu_dvfs_info *info = policy->driver_data; struct device_node *np = of_node_get(info->cpu_dev->of_node); + u32 capacitance = 0; if (WARN_ON(!np)) return; if (of_find_property(np, "#cooling-cells", NULL)) { - info->cdev = of_cpufreq_cooling_register(np, - policy->related_cpus); + of_property_read_u32(np, DYNAMIC_POWER, &capacitance); + + info->cdev = of_cpufreq_power_cooling_register(np, + policy->related_cpus, + capacitance, + NULL); if (IS_ERR(info->cdev)) { dev_err(info->cpu_dev, diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index cead9bec4843..376e63ca94e8 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -54,7 +54,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index) freq = new_freq * 1000; ret = clk_round_rate(policy->clk, freq); - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { dev_warn(mpu_dev, "CPUfreq: Cannot find matching frequency for %lu\n", freq); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 2b8e6ce62e81..a4d0059e232c 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -214,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, tick_broadcast_exit(); } - if (!cpuidle_state_is_coupled(drv, entered_state)) + if (!cpuidle_state_is_coupled(drv, index)) local_irq_enable(); /* diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 44d30b45f3cc..5ad5f3009ae0 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -402,7 +402,7 @@ int caam_get_era(void) ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); of_node_put(caam_node); - return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop; + return ret ? -ENOTSUPP : prop; } EXPORT_SYMBOL(caam_get_era); diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 52c7395cb8d8..0d0d4529ee36 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); unsigned int unit; + u32 unit_size; int ret; if (!ctx->u.aes.key_len) @@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, if (!req->info) return -EINVAL; - for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) - if (!(req->nbytes & (unit_size_map[unit].size - 1))) - break; + unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; + if (req->nbytes <= unit_size_map[0].size) { + for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { + if (!(req->nbytes & (unit_size_map[unit].size - 1))) { + unit_size = unit_size_map[unit].value; + break; + } + } + } - if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || + if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || (ctx->u.aes.key_len != AES_KEYSIZE_128)) { /* Use the fallback to process the request for any * unsupported unit sizes or key sizes @@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; - rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; + rctx->cmd.u.xts.unit_size = unit_size; rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; rctx->cmd.u.xts.iv = &rctx->iv_sg; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 6eefaa2fe58f..63464e86f2b1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1986,7 +1986,7 @@ err_algs: &dd->pdata->algs_info[i].algs_list[j]); err_pm: pm_runtime_disable(dev); - if (dd->polling_mode) + if (!dd->polling_mode) dma_release_channel(dd->dma_lch); data_err: dev_err(dev, "initialization failed.\n"); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a2c07ee6677..6355ab38d630 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -33,6 +33,7 @@ #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/reservation.h> +#include <linux/mm.h> #include <uapi/linux/dma-buf.h> @@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) dmabuf = file->private_data; /* check for overflowing the buffer's size */ - if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + if (vma->vm_pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; @@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* check for offset overflow */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) + if (pgoff + vma_pages(vma) < pgoff) return -EOVERFLOW; /* check for overflowing the buffer's size */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + if (pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index c0bd5722c997..9566a62ad8e3 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -35,6 +35,17 @@ #include <linux/reservation.h> #include <linux/export.h> +/** + * DOC: Reservation Object Overview + * + * The reservation object provides a mechanism to manage shared and + * exclusive fences associated with a buffer. A reservation object + * can have attached one exclusive fence (normally associated with + * write operations) or N shared fences (read operations). The RCU + * mechanism is used to protect read access to fences from locked + * write-side updates. + */ + DEFINE_WW_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); @@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class); const char reservation_seqcount_string[] = "reservation_seqcount"; EXPORT_SYMBOL(reservation_seqcount_string); -/* - * Reserve space to add a shared fence to a reservation_object, - * must be called with obj->lock held. + +/** + * reservation_object_reserve_shared - Reserve space to add a shared + * fence to a reservation_object. + * @obj: reservation object + * + * Should be called before reservation_object_add_shared_fence(). Must + * be called with obj->lock held. + * + * RETURNS + * Zero for success, or -errno */ int reservation_object_reserve_shared(struct reservation_object *obj) { @@ -180,7 +199,11 @@ done: fence_put(old_fence); } -/* +/** + * reservation_object_add_shared_fence - Add a fence to a shared slot + * @obj: the reservation object + * @fence: the shared fence to add + * * Add a fence to a shared slot, obj->lock must be held, and * reservation_object_reserve_shared_fence has been called. */ @@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, } EXPORT_SYMBOL(reservation_object_add_shared_fence); +/** + * reservation_object_add_excl_fence - Add an exclusive fence. + * @obj: the reservation object + * @fence: the shared fence to add + * + * Add a fence to the exclusive slot. The obj->lock must be held. + */ void reservation_object_add_excl_fence(struct reservation_object *obj, struct fence *fence) { @@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, } EXPORT_SYMBOL(reservation_object_add_excl_fence); +/** + * reservation_object_get_fences_rcu - Get an object's shared and exclusive + * fences without update side lock held + * @obj: the reservation object + * @pfence_excl: the returned exclusive fence (or NULL) + * @pshared_count: the number of shared fences returned + * @pshared: the array of shared fence ptrs returned (array is krealloc'd to + * the required size, and must be freed by caller) + * + * RETURNS + * Zero or -errno + */ int reservation_object_get_fences_rcu(struct reservation_object *obj, struct fence **pfence_excl, unsigned *pshared_count, @@ -319,6 +361,18 @@ unlock: } EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); +/** + * reservation_object_wait_timeout_rcu - Wait on reservation's objects + * shared and/or exclusive fences. + * @obj: the reservation object + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @intr: if true, do interruptible wait + * @timeout: timeout value in jiffies or zero to return immediately + * + * RETURNS + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than zer on success. + */ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout) @@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence) return ret; } +/** + * reservation_object_test_signaled_rcu - Test if a reservation object's + * fences have been signaled. + * @obj: the reservation object + * @test_all: if true, test all fences, otherwise only test the exclusive + * fence + * + * RETURNS + * true if all fences signaled, else false + */ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, bool test_all) { diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index e0df233dde92..57aa227bfadb 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -461,25 +461,25 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, /* Source burst */ ret = convert_burst(sconfig->src_maxburst); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); /* Destination burst */ ret = convert_burst(sconfig->dst_maxburst); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); /* Source bus width */ ret = convert_buswidth(sconfig->src_addr_width); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); /* Destination bus width */ ret = convert_buswidth(sconfig->dst_addr_width); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); @@ -518,25 +518,25 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, /* Source burst */ ret = convert_burst(sconfig->src_maxburst); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); /* Destination burst */ ret = convert_burst(sconfig->dst_maxburst); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); /* Source bus width */ ret = convert_buswidth(sconfig->src_addr_width); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); /* Destination bus width */ ret = convert_buswidth(sconfig->dst_addr_width); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto fail; promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index d39014daeef9..fc5f197906ac 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c @@ -29,7 +29,6 @@ #include <mach/hardware.h> #include <mach/platform.h> -#include <mach/irqs.h> #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) @@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin) static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) { - return IRQ_LPC32XX_P0_P1_IRQ; + return -ENXIO; } -static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = { - IRQ_LPC32XX_GPIO_00, - IRQ_LPC32XX_GPIO_01, - IRQ_LPC32XX_GPIO_02, - IRQ_LPC32XX_GPIO_03, - IRQ_LPC32XX_GPIO_04, - IRQ_LPC32XX_GPIO_05, -}; - static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) { - if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table)) - return lpc32xx_gpio_to_irq_gpio_p3_table[offset]; return -ENXIO; } -static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = { - IRQ_LPC32XX_GPI_00, - IRQ_LPC32XX_GPI_01, - IRQ_LPC32XX_GPI_02, - IRQ_LPC32XX_GPI_03, - IRQ_LPC32XX_GPI_04, - IRQ_LPC32XX_GPI_05, - IRQ_LPC32XX_GPI_06, - IRQ_LPC32XX_GPI_07, - IRQ_LPC32XX_GPI_08, - IRQ_LPC32XX_GPI_09, - -ENXIO, /* 10 */ - -ENXIO, /* 11 */ - -ENXIO, /* 12 */ - -ENXIO, /* 13 */ - -ENXIO, /* 14 */ - -ENXIO, /* 15 */ - -ENXIO, /* 16 */ - -ENXIO, /* 17 */ - -ENXIO, /* 18 */ - IRQ_LPC32XX_GPI_19, - -ENXIO, /* 20 */ - -ENXIO, /* 21 */ - -ENXIO, /* 22 */ - -ENXIO, /* 23 */ - -ENXIO, /* 24 */ - -ENXIO, /* 25 */ - -ENXIO, /* 26 */ - -ENXIO, /* 27 */ - IRQ_LPC32XX_GPI_28, -}; - static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) { - if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table)) - return lpc32xx_gpio_to_irq_gpi_p3_table[offset]; return -ENXIO; } diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c index 08897dc11915..1a33a19d95b9 100644 --- a/drivers/gpio/gpio-xlp.c +++ b/drivers/gpio/gpio-xlp.c @@ -393,7 +393,7 @@ static int xlp_gpio_probe(struct platform_device *pdev) irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0); else irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); - if (IS_ERR_VALUE(irq_base)) { + if (irq_base < 0) { dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); return irq_base; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d407f904a31c..24f60d28f0c0 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -20,6 +20,7 @@ #include <linux/cdev.h> #include <linux/fs.h> #include <linux/uaccess.h> +#include <linux/compat.h> #include <uapi/linux/gpio.h> #include "gpiolib.h" @@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct gpio_device *gdev = filp->private_data; struct gpio_chip *chip = gdev->chip; - int __user *ip = (int __user *)arg; + void __user *ip = (void __user *)arg; /* We fail any subsequent ioctl():s when the chip is gone */ if (!chip) @@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EINVAL; } +#ifdef CONFIG_COMPAT +static long gpio_ioctl_compat(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + /** * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev @@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = { .owner = THIS_MODULE, .llseek = noop_llseek, .unlocked_ioctl = gpio_ioctl, - .compat_ioctl = gpio_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = gpio_ioctl_compat, +#endif }; static void gpiodevice_release(struct device *dev) @@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) goto err_free_label; } + spin_unlock_irqrestore(&gpio_lock, flags); + for (i = 0; i < chip->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; @@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) } } - spin_unlock_irqrestore(&gpio_lock, flags); - #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); #endif @@ -1356,10 +1367,13 @@ done: /* * This descriptor validation needs to be inserted verbatim into each * function taking a descriptor, so we need to use a preprocessor - * macro to avoid endless duplication. + * macro to avoid endless duplication. If the desc is NULL it is an + * optional GPIO and calls should just bail out. */ #define VALIDATE_DESC(desc) do { \ - if (!desc || !desc->gdev) { \ + if (!desc) \ + return 0; \ + if (!desc->gdev) { \ pr_warn("%s: invalid GPIO\n", __func__); \ return -EINVAL; \ } \ @@ -1370,7 +1384,9 @@ done: } } while (0) #define VALIDATE_DESC_VOID(desc) do { \ - if (!desc || !desc->gdev) { \ + if (!desc) \ + return; \ + if (!desc->gdev) { \ pr_warn("%s: invalid GPIO\n", __func__); \ return; \ } \ @@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); */ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) { - if (offset >= chip->ngpio) - return -EINVAL; + struct gpio_desc *desc; + + desc = gpiochip_get_desc(chip, offset); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + /* Flush direction if something changed behind our back */ + if (chip->get_direction) { + int dir = chip->get_direction(chip, offset); + + if (dir) + clear_bit(FLAG_IS_OUT, &desc->flags); + else + set_bit(FLAG_IS_OUT, &desc->flags); + } - if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { + if (test_bit(FLAG_IS_OUT, &desc->flags)) { chip_err(chip, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; } - set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); + set_bit(FLAG_USED_AS_IRQ, &desc->flags); return 0; } EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2bd3e5aa43c6..be43afb08c69 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -23,7 +23,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ - drm_kms_helper_common.o + drm_kms_helper_common.o drm_dp_dual_mode_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index ca77ec10147c..e503e3d6d920 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -2,6 +2,7 @@ menu "ACP (Audio CoProcessor) Configuration" config DRM_AMD_ACP bool "Enable AMD Audio CoProcessor IP support" + depends on DRM_AMDGPU select MFD_CORE select PM_GENERIC_DOMAINS if PM help diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2a009c398dcb..01c36b8d6222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -602,6 +602,8 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); +int amdgpu_fence_slab_init(void); +void amdgpu_fence_slab_fini(void); /* * GART structures, functions & helpers @@ -797,6 +799,7 @@ struct amdgpu_ring { unsigned cond_exe_offs; u64 cond_exe_gpu_addr; volatile u32 *cond_exe_cpu_addr; + int vmid; }; /* @@ -934,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr, uint32_t gds_base, uint32_t gds_size, uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size); + uint32_t oa_base, uint32_t oa_size, + bool vmid_switch); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 199f76baf22c..8943099eb135 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) return result; } +static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) +{ + CGS_FUNC_ADEV; + if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { + release_firmware(adev->pm.fw); + return 0; + } + /* cannot release other firmware because they are not created by cgs */ + return -EINVAL; +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_pm_query_clock_limits, amdgpu_cgs_set_camera_voltages, amdgpu_cgs_get_firmware_info, + amdgpu_cgs_rel_firmware, amdgpu_cgs_set_powergating_state, amdgpu_cgs_set_clockgating_state, amdgpu_cgs_get_active_displays_info, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 60a0c9ac11b2..cb07da41152b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) bpc = 8; DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", connector->name, bpc); - } else if (bpc > 8) { - /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ - DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", - connector->name); - bpc = 8; } + } else if (bpc > 8) { + /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ + DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", + connector->name); + bpc = 8; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bb8b149786d7..964f31404f17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) */ static void amdgpu_atombios_fini(struct amdgpu_device *adev) { - if (adev->mode_info.atom_context) + if (adev->mode_info.atom_context) { kfree(adev->mode_info.atom_context->scratch); + kfree(adev->mode_info.atom_context->iio); + } kfree(adev->mode_info.atom_context); adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); @@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) adev->ip_block_status[i].valid = false; } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (adev->ip_blocks[i].funcs->late_fini) + adev->ip_blocks[i].funcs->late_fini((void *)adev); + } + return 0; } @@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_atombios_has_gpu_virtualization_table(adev); /* Post card if necessary */ - if (!amdgpu_card_posted(adev) || - adev->virtualization.supports_sr_iov) { + if (!amdgpu_card_posted(adev)) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 1dab5f2b725b..f888c015f76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -50,9 +50,11 @@ * KMS wrapper. * - 3.0.0 - initial driver * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) + * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same + * at the end of IBs. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 1 +#define KMS_DRIVER_MINOR 2 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -279,14 +281,26 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, /* Polaris11 */ {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, - {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, - {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, + {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, /* Polaris10 */ {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0, 0, 0} }; @@ -563,9 +577,12 @@ static struct pci_driver amdgpu_kms_pci_driver = { .driver.pm = &amdgpu_pm_ops, }; + + static int __init amdgpu_init(void) { amdgpu_sync_init(); + amdgpu_fence_slab_init(); if (vgacon_text_force()) { DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); return -EINVAL; @@ -576,7 +593,6 @@ static int __init amdgpu_init(void) driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); - /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); } @@ -587,6 +603,7 @@ static void __exit amdgpu_exit(void) drm_pci_exit(driver, pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); + amdgpu_fence_slab_fini(); } module_init(amdgpu_init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index ba9c04283d01..d1558768cfb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -55,8 +55,21 @@ struct amdgpu_fence { }; static struct kmem_cache *amdgpu_fence_slab; -static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); +int amdgpu_fence_slab_init(void) +{ + amdgpu_fence_slab = kmem_cache_create( + "amdgpu_fence", sizeof(struct amdgpu_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!amdgpu_fence_slab) + return -ENOMEM; + return 0; +} + +void amdgpu_fence_slab_fini(void) +{ + kmem_cache_destroy(amdgpu_fence_slab); +} /* * Cast helper */ @@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, */ int amdgpu_fence_driver_init(struct amdgpu_device *adev) { - if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { - amdgpu_fence_slab = kmem_cache_create( - "amdgpu_fence", sizeof(struct amdgpu_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!amdgpu_fence_slab) - return -ENOMEM; - } if (amdgpu_debugfs_fence_init(adev)) dev_err(adev->dev, "fence debugfs file creation failed\n"); @@ -437,13 +443,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amd_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) - fence_put(ring->fence_drv.fences[i]); + fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); ring->fence_drv.initialized = false; } - - if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) - kmem_cache_destroy(amdgpu_fence_slab); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 34e35423b78e..7a0b1e50f293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; + int vmid = 0, old_vmid = ring->vmid; struct fence *hwf; uint64_t ctx; @@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (job) { vm = job->vm; ctx = job->ctx; + vmid = job->vm_id; } else { vm = NULL; ctx = 0; + vmid = 0; } if (!ring->ready) { @@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, job->gds_base, job->gds_size, job->gws_base, job->gws_size, - job->oa_base, job->oa_size); + job->oa_base, job->oa_size, + (ring->current_ctx == ctx) && (old_vmid != vmid)); if (r) { amdgpu_ring_undo(ring); return r; @@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, need_ctx_switch = ring->current_ctx != ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; - /* drop preamble IBs if we don't have a context switch */ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) continue; @@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, need_ctx_switch); need_ctx_switch = false; + ring->vmid = vmid; } if (ring->funcs->emit_hdp_invalidate) @@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vm_id) amdgpu_vm_reset_id(adev, job->vm_id); + ring->vmid = old_vmid; amdgpu_ring_undo(ring); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 6bd961fb43dc..82256558e0f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle) if (ret) return ret; -#ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { - amdgpu_pm_sysfs_fini(adev); - amd_powerplay_fini(adev->powerplay.pp_handle); - } -#endif - return ret; } @@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle) return ret; } +static void amdgpu_pp_late_fini(void *handle) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pp_enabled) { + amdgpu_pm_sysfs_fini(adev); + amd_powerplay_fini(adev->powerplay.pp_handle); + } + + if (adev->powerplay.ip_funcs->late_fini) + adev->powerplay.ip_funcs->late_fini( + adev->powerplay.pp_handle); +#endif +} + static int amdgpu_pp_suspend(void *handle) { int ret = 0; @@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { .sw_fini = amdgpu_pp_sw_fini, .hw_init = amdgpu_pp_hw_init, .hw_fini = amdgpu_pp_hw_fini, + .late_fini = amdgpu_pp_late_fini, .suspend = amdgpu_pp_suspend, .resume = amdgpu_pp_resume, .is_idle = amdgpu_pp_is_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3b02272db678..870f9494252c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ring->ring = NULL; ring->ring_obj = NULL; + amdgpu_wb_free(ring->adev, ring->cond_exe_offs); amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..48618ee324eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, return r; } r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); + memset(sa_manager->cpu_ptr, 0, sa_manager->size); amdgpu_bo_unreserve(sa_manager->bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 01abfc21b4a2..e19520c4b4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int r; - if (adev->uvd.vcpu_bo == NULL) - return 0; + kfree(adev->uvd.saved_bo); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (!r) { - amdgpu_bo_kunmap(adev->uvd.vcpu_bo); - amdgpu_bo_unpin(adev->uvd.vcpu_bo); - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - } + if (adev->uvd.vcpu_bo) { + r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); + if (!r) { + amdgpu_bo_kunmap(adev->uvd.vcpu_bo); + amdgpu_bo_unpin(adev->uvd.vcpu_bo); + amdgpu_bo_unreserve(adev->uvd.vcpu_bo); + } - amdgpu_bo_unref(&adev->uvd.vcpu_bo); + amdgpu_bo_unref(&adev->uvd.vcpu_bo); + } amdgpu_ring_fini(&adev->uvd.ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ea708cb94862..62a4c127620f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -53,6 +53,18 @@ /* Special value that no flush is necessary */ #define AMDGPU_VM_NO_FLUSH (~0ll) +/* Local structure. Encapsulate some VM table update parameters to reduce + * the number of function parameters + */ +struct amdgpu_vm_update_params { + /* address where to copy page table entries from */ + uint64_t src; + /* DMA addresses to use for mapping */ + dma_addr_t *pages_addr; + /* indirect buffer to fill with commands */ + struct amdgpu_ib *ib; +}; + /** * amdgpu_vm_num_pde - return the number of page directory entries * @@ -286,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr, uint32_t gds_base, uint32_t gds_size, uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size) + uint32_t oa_base, uint32_t oa_size, + bool vmid_switch) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; @@ -300,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, int r; if (ring->funcs->emit_pipeline_sync && ( - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || - ring->type == AMDGPU_RING_TYPE_COMPUTE)) + pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch)) amdgpu_ring_emit_pipeline_sync(ring); if (ring->funcs->emit_vm_flush && @@ -389,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer - * @src: address where to copy page table entries from - * @pages_addr: DMA addresses to use for mapping - * @ib: indirect buffer to fill with commands + * @vm_update_params: see amdgpu_vm_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update @@ -402,29 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - uint64_t src, - dma_addr_t *pages_addr, - struct amdgpu_ib *ib, + struct amdgpu_vm_update_params + *vm_update_params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if (src) { - src += (addr >> 12) * 8; - amdgpu_vm_copy_pte(adev, ib, pe, src, count); + if (vm_update_params->src) { + amdgpu_vm_copy_pte(adev, vm_update_params->ib, + pe, (vm_update_params->src + (addr >> 12) * 8), count); - } else if (pages_addr) { - amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, - count, incr, flags); + } else if (vm_update_params->pages_addr) { + amdgpu_vm_write_pte(adev, vm_update_params->ib, + vm_update_params->pages_addr, + pe, addr, count, incr, flags); } else if (count < 3) { - amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, + amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, count, incr, flags); } else { - amdgpu_vm_set_pte_pde(adev, ib, pe, addr, + amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, count, incr, flags); } } @@ -444,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_ring *ring; struct fence *fence = NULL; struct amdgpu_job *job; + struct amdgpu_vm_update_params vm_update_params; unsigned entries; uint64_t addr; int r; + memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -465,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; - amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries, + vm_update_params.ib = &job->ibs[0]; + amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -538,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; - struct amdgpu_ib *ib; + struct amdgpu_vm_update_params vm_update_params; struct fence *fence = NULL; int r; + memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -555,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; - ib = &job->ibs[0]; + vm_update_params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -575,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, 0, NULL, ib, + amdgpu_vm_update_pages(adev, &vm_update_params, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); @@ -590,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + amdgpu_vm_update_pages(adev, &vm_update_params, + last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); - if (ib->length_dw != 0) { - amdgpu_ring_pad_ib(ring, ib); + if (vm_update_params.ib->length_dw != 0) { + amdgpu_ring_pad_ib(ring, vm_update_params.ib); amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(ib->length_dw > ndw); + WARN_ON(vm_update_params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) @@ -623,18 +638,15 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer - * @src: address where to copy page table entries from - * @pages_addr: DMA addresses to use for mapping - * @ib: IB for the update + * @vm_update_params: see amdgpu_vm_update_params definition * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - uint64_t src, - dma_addr_t *pages_addr, - struct amdgpu_ib *ib, + struct amdgpu_vm_update_params + *vm_update_params, uint64_t pe_start, uint64_t pe_end, uint64_t addr, uint32_t flags) { @@ -671,11 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, return; /* system pages are non continuously */ - if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) || - (frag_start >= frag_end)) { + if (vm_update_params->src || vm_update_params->pages_addr || + !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start, + amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); return; @@ -684,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr, + amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count, + amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr, + amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -707,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer - * @src: address where to copy page table entries from - * @pages_addr: DMA addresses to use for mapping + * @vm_update_params: see amdgpu_vm_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range @@ -718,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * Update the page tables in the range @start - @end. */ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - uint64_t src, - dma_addr_t *pages_addr, + struct amdgpu_vm_update_params + *vm_update_params, struct amdgpu_vm *vm, - struct amdgpu_ib *ib, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) { @@ -747,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, if (last_pe_end != pe_start) { - amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, + amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, last_pe_end, last_dst, flags); @@ -762,7 +772,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start, + amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, last_pe_end, last_dst, flags); } @@ -794,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; - struct amdgpu_ib *ib; + struct amdgpu_vm_update_params vm_update_params; struct fence *f = NULL; int r; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + memset(&vm_update_params, 0, sizeof(vm_update_params)); + vm_update_params.src = src; + vm_update_params.pages_addr = pages_addr; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -815,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if (src) { + if (vm_update_params.src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (pages_addr) { + } else if (vm_update_params.pages_addr) { /* header for write data commands */ ndw += ncmds * 4; @@ -838,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) return r; - ib = &job->ibs[0]; + vm_update_params.ib = &job->ibs[0]; r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, owner); @@ -849,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start, + amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, last + 1, addr, flags); - amdgpu_ring_pad_ib(ring, ib); - WARN_ON(ib->length_dw > ndw); + amdgpu_ring_pad_ib(ring, vm_update_params.ib); + WARN_ON(vm_update_params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index ea407db1fbcf..5ec1f1e9c983 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle) ci_dpm_fini(adev); mutex_unlock(&adev->pm.mutex); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 845c21b1b2ee..be3d6f79a864 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev) */ static int cik_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; u64 wptr_off; @@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev) /* enable irqs */ cik_ih_enable_interrupts(adev); - return ret; + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 518dca43b133..9dc4e24e31e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); + +static void cik_sdma_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous @@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + + cik_sdma_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) if (r) return r; - /* unhalt the MEs */ - cik_sdma_enable(adev, true); + /* halt the engine before programing */ + cik_sdma_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + cik_sdma_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index fa4449e126e6..933e425a8154 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) { - int ret = 0; struct cz_power_info *pi = cz_get_pi(adev); if (pi->caps_sclk_ds) { @@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) CZ_MIN_DEEP_SLEEP_SCLK); } - return ret; + return 0; } /* ?? without dal support, is this still needed in setpowerstate list*/ static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) { - int ret = 0; struct cz_power_info *pi = cz_get_pi(adev); cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetWatermarkFrequency, pi->sclk_dpm.soft_max_clk); - return ret; + return 0; } static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) @@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) { - int ret = 0; struct cz_power_info *pi = cz_get_pi(adev); struct cz_ps *ps = &pi->requested_ps; @@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) cz_dpm_nbdpm_lm_pstate_enable(adev, true); } - return ret; + return 0; } /* with dpm enabled */ static int cz_dpm_set_power_state(struct amdgpu_device *adev) { - int ret = 0; - cz_dpm_update_sclk_limit(adev); cz_dpm_set_deep_sleep_sclk_threshold(adev); cz_dpm_set_watermark_threshold(adev); cz_dpm_enable_nbdpm(adev); cz_dpm_update_low_memory_pstate(adev); - return ret; + return 0; } static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 863cb16f6126..3d23a70b6432 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev) */ static int cz_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; u64 wptr_off; @@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ cz_ih_enable_interrupts(adev); - return ret; + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c11b6007af80..af26ec0bc59d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -137,7 +137,7 @@ static const u32 polaris11_golden_settings_a11[] = mmDCI_CLK_CNTL, 0x00000080, 0x00000000, mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, mmFBC_DEBUG1, 0xffffffff, 0x00000008, - mmFBC_MISC, 0x9f313fff, 0x14300008, + mmFBC_MISC, 0x9f313fff, 0x14302008, mmHDMI_CONTROL, 0x313f031f, 0x00000011, }; @@ -145,7 +145,7 @@ static const u32 polaris10_golden_settings_a11[] = { mmDCI_CLK_CNTL, 0x00000080, 0x00000000, mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, - mmFBC_MISC, 0x9f313fff, 0x14300008, + mmFBC_MISC, 0x9f313fff, 0x14302008, mmHDMI_CONTROL, 0x313f031f, 0x00000011, }; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 245cabf06575..ed03b75175d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) static int fiji_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 7f18a53ab53a..8c6ad1e72f02 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -991,6 +991,22 @@ out: return err; } +static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) +{ + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; +} + /** * gfx_v7_0_tiling_mode_table_init - init the hw tiling table * @@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle) gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); + gfx_v7_0_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 92647fbf5b8b..9f6f8669edc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -267,10 +267,13 @@ static const u32 tonga_mgcg_cgcg_init[] = static const u32 golden_settings_polaris11_a11[] = { + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012, + mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, mmSQ_CONFIG, 0x07f80000, 0x07180000, @@ -284,8 +287,6 @@ static const u32 golden_settings_polaris11_a11[] = static const u32 polaris11_golden_common_all[] = { mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, - mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, - mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, @@ -296,6 +297,7 @@ static const u32 polaris11_golden_common_all[] = static const u32 golden_settings_polaris10_a11[] = { mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, @@ -834,6 +836,26 @@ err1: return r; } + +static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + if ((adev->asic_type != CHIP_STONEY) && + (adev->asic_type != CHIP_TOPAZ)) + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + + kfree(adev->gfx.rlc.register_list_format); +} + static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; @@ -1981,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle) gfx_v8_0_rlc_fini(adev); - kfree(adev->gfx.rlc.register_list_format); + gfx_v8_0_free_microcode(adev); return 0; } @@ -3972,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x3a00161a); amdgpu_ring_write(ring, 0x0000002e); break; - case CHIP_TOPAZ: case CHIP_CARRIZO: amdgpu_ring_write(ring, 0x00000002); amdgpu_ring_write(ring, 0x00000000); break; + case CHIP_TOPAZ: + amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? + 0x00000000 : 0x00000002); + amdgpu_ring_write(ring, 0x00000000); + break; case CHIP_STONEY: amdgpu_ring_write(ring, 0x00000000); amdgpu_ring_write(ring, 0x00000000); @@ -5725,6 +5751,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5))); amdgpu_ring_write(ring, addr & 0xfffffffc); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 460bc8ad37e6..825ccd63f2dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) static int iceland_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 39bfc52d0b42..3b8906ce3511 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev) */ static int iceland_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; u64 wptr_off; @@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ iceland_ih_enable_interrupts(adev); - return ret; + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index b45f54714574..a789a863d677 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2252,7 +2252,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, if (pi->caps_stable_p_state) { stable_p_state_sclk = (max_limits->sclk * 75) / 100; - for (i = table->count - 1; i >= 0; i++) { + for (i = table->count - 1; i >= 0; i--) { if (stable_p_state_sclk >= table->entries[i].clk) { stable_p_state_sclk = table->entries[i].clk; break; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f4c3130d3fdb..b556bd0a8797 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v2_4_init_microcode - load ucode images from disk * @@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + sdma_v2_4_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) return -EINVAL; } - /* unhalt the MEs */ - sdma_v2_4_enable(adev, true); + /* halt the engine before programing */ + sdma_v2_4_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v2_4_gfx_resume(adev); @@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v2_4_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 063f08a9957a..532ea88da66a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -109,10 +109,12 @@ static const u32 fiji_mgcg_cgcg_init[] = static const u32 golden_settings_polaris11_a11[] = { mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, @@ -234,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v3_0_init_microcode - load ucode images from disk * @@ -670,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -709,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + + /* unhalt the MEs */ + sdma_v3_0_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v3_0_ctx_switch_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -802,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) } } - /* unhalt the MEs */ - sdma_v3_0_enable(adev, true); - /* enable sdma ring preemption */ - sdma_v3_0_ctx_switch_enable(adev, true); + /* disble sdma engine before programing it */ + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v3_0_gfx_resume(adev); @@ -1245,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v3_0_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index b7615cefcac4..f06f6f4dc3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) static int tonga_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index f036af937fbc..c92055805a45 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) */ static int tonga_ih_irq_init(struct amdgpu_device *adev) { - int ret = 0; int rb_bufsz; u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; u64 wptr_off; @@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ tonga_ih_enable_interrupts(adev); - return ret; + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ac005796b71c..7708d90b9da9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, pqm_uninit(&p->pqm); /* Iterate over all process device data structure and check - * if we should reset all wavefronts */ - list_for_each_entry(pdd, &p->per_device_data, per_device_list) + * if we should delete debug managers and reset all wavefronts + */ + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { + if ((pdd->dev->dbgmgr) && + (pdd->dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(pdd->dev->dbgmgr); + if (pdd->reset_wavefronts) { pr_warn("amdkfd: Resetting all wave fronts\n"); dbgdev_wave_reset_wavefronts(pdd->dev, p); pdd->reset_wavefronts = false; } + } mutex_unlock(&p->mutex); @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) idx = srcu_read_lock(&kfd_processes_srcu); + /* + * Look for the process that matches the pasid. If there is no such + * process, we either released it in amdkfd's own notifier, or there + * is a bug. Unfortunately, there is no way to tell... + */ hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) - if (p->pasid == pasid) - break; + if (p->pasid == pasid) { - srcu_read_unlock(&kfd_processes_srcu, idx); + srcu_read_unlock(&kfd_processes_srcu, idx); - BUG_ON(p->pasid != pasid); + pr_debug("Unbinding process %d from IOMMU\n", pasid); - mutex_lock(&p->mutex); + mutex_lock(&p->mutex); - if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) - kfd_dbgmgr_destroy(dev->dbgmgr); + if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(dev->dbgmgr); - pqm_uninit(&p->pqm); + pqm_uninit(&p->pqm); - pdd = kfd_get_process_device_data(dev, p); + pdd = kfd_get_process_device_data(dev, p); - if (!pdd) { - mutex_unlock(&p->mutex); - return; - } + if (!pdd) { + mutex_unlock(&p->mutex); + return; + } - if (pdd->reset_wavefronts) { - dbgdev_wave_reset_wavefronts(pdd->dev, p); - pdd->reset_wavefronts = false; - } + if (pdd->reset_wavefronts) { + dbgdev_wave_reset_wavefronts(pdd->dev, p); + pdd->reset_wavefronts = false; + } - /* - * Just mark pdd as unbound, because we still need it to call - * amd_iommu_unbind_pasid() in when the process exits. - * We don't call amd_iommu_unbind_pasid() here - * because the IOMMU called us. - */ - pdd->bound = false; + /* + * Just mark pdd as unbound, because we still need it + * to call amd_iommu_unbind_pasid() in when the + * process exits. + * We don't call amd_iommu_unbind_pasid() here + * because the IOMMU called us. + */ + pdd->bound = false; - mutex_unlock(&p->mutex); + mutex_unlock(&p->mutex); + + return; + } + + srcu_read_unlock(&kfd_processes_srcu, idx); } struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 74909e72a009..884c96f50c3d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.simd_count); if (dev->mem_bank_count < dev->node_props.mem_banks_count) { - pr_warn("kfd: mem_banks_count truncated from %d to %d\n", + pr_info_once("kfd: mem_banks_count truncated from %d to %d\n", dev->node_props.mem_banks_count, dev->mem_bank_count); sysfs_show_32bit_prop(buffer, "mem_banks_count", diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 6080951d539d..afce1edbe250 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -157,6 +157,7 @@ struct amd_ip_funcs { int (*hw_init)(void *handle); /* tears down the hw state */ int (*hw_fini)(void *handle); + void (*late_fini)(void *handle); /* handles IP specific hw/sw changes for suspend */ int (*suspend)(void *handle); /* handles IP specific hw/sw changes for resume */ diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index a461e155a160..7464daf89ca1 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info); +typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, + enum cgs_ucode_id type); + typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_powergating_state state); @@ -645,6 +648,7 @@ struct cgs_ops { cgs_set_camera_voltages_t set_camera_voltages; /* Firmware Info */ cgs_get_firmware_info get_firmware_info; + cgs_rel_firmware rel_firmware; /* cg pg interface*/ cgs_set_powergating_state set_powergating_state; cgs_set_clockgating_state set_clockgating_state; @@ -738,6 +742,8 @@ struct cgs_device CGS_CALL(set_camera_voltages,dev,mask,voltages) #define cgs_get_firmware_info(dev, type, info) \ CGS_CALL(get_firmware_info, dev, type, info) +#define cgs_rel_firmware(dev, type) \ + CGS_CALL(rel_firmware, dev, type) #define cgs_set_powergating_state(dev, block_type, state) \ CGS_CALL(set_powergating_state, dev, block_type, state) #define cgs_set_clockgating_state(dev, block_type, state) \ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8e345bfddb69..e629f8a9fe93 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -73,11 +73,14 @@ static int pp_sw_init(void *handle) ret = hwmgr->hwmgr_func->backend_init(hwmgr); if (ret) - goto err; + goto err1; pr_info("amdgpu: powerplay initialized\n"); return 0; +err1: + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); err: pr_err("amdgpu: powerplay initialization failed\n"); return ret; @@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle) if (hwmgr->hwmgr_func->backend_fini != NULL) ret = hwmgr->hwmgr_func->backend_fini(hwmgr); + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index 46410e3c7349..fb88e4e5d625 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c @@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr) pem_unregister_interrupts(eventmgr); pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); - - if (eventmgr != NULL) - kfree(eventmgr); } int eventmgr_init(struct pp_instance *handle) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index c94f9faa220a..586f73276226 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) PP_ASSERT_WITH_CODE(false, "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i].value); + return vddci_table->entries[i-1].value); } static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, @@ -3573,46 +3573,11 @@ static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) return 0; } -static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_clock_voltage_dependency_table *table = - table_info->vddc_dep_on_dal_pwrl; - struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; - enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; - uint32_t req_vddc = 0, req_volt, i; - - if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW && - dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE)) - return; - - for (i= 0; i < table->count; i++) { - if (dal_power_level == table->entries[i].clk) { - req_vddc = table->entries[i].v; - break; - } - } - - vddc_table = table_info->vdd_dep_on_sclk; - for (i= 0; i < vddc_table->count; i++) { - if (req_vddc <= vddc_table->entries[i].vddc) { - req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE) - << VDDC_SHIFT; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VddC_Request, req_volt); - return; - } - } - printk(KERN_ERR "DAL requested level can not" - " found a available voltage in VDDC DPM Table \n"); -} - static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - fiji_apply_dal_min_voltage_request(hwmgr); + phm_apply_dal_min_voltage_request(hwmgr); if (!data->sclk_dpm_key_disabled) { if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) @@ -4349,7 +4314,7 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels( if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = fiji_populate_all_memory_levels(hwmgr); + result = fiji_populate_all_graphic_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", return result); @@ -5109,11 +5074,11 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); + data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, + hwmgr->soft_pp_table_size, + GFP_KERNEL); if (!data->soft_pp_table) return -ENOMEM; - memcpy(data->soft_pp_table, hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size); } *table = (char *)&data->soft_pp_table; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 7d69ed635bc2..20f20e075588 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -30,6 +30,9 @@ #include "pppcielanes.h" #include "pp_debug.h" #include "ppatomctrl.h" +#include "ppsmc.h" + +#define VOLTAGE_SCALE 4 extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); @@ -90,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) if (hwmgr == NULL || hwmgr->ps == NULL) return -EINVAL; + /* do hwmgr finish*/ + kfree(hwmgr->backend); + + kfree(hwmgr->start_thermal_controller.function_list); + + kfree(hwmgr->set_temperature_range.function_list); + kfree(hwmgr->ps); kfree(hwmgr); return 0; @@ -459,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u PP_ASSERT_WITH_CODE(false, "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i].value); + return vddci_table->entries[i-1].value); } int phm_find_boot_level(void *table, @@ -566,3 +576,38 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask) return level; } + +void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_clock_voltage_dependency_table *table = + table_info->vddc_dep_on_dal_pwrl; + struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; + enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; + uint32_t req_vddc = 0, req_volt, i; + + if (!table || table->count <= 0 + || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW + || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE) + return; + + for (i = 0; i < table->count; i++) { + if (dal_power_level == table->entries[i].clk) { + req_vddc = table->entries[i].v; + break; + } + } + + vddc_table = table_info->vdd_dep_on_sclk; + for (i = 0; i < vddc_table->count; i++) { + if (req_vddc <= vddc_table->entries[i].vddc) { + req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VddC_Request, req_volt); + return; + } + } + printk(KERN_ERR "DAL requested level can not" + " found a available voltage in VDDC DPM Table \n"); +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 93768fa1dcdc..aa6be033f21b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -189,41 +189,6 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) return decode_pcie_lane_width(link_width); } -void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_clock_voltage_dependency_table *table = - table_info->vddc_dep_on_dal_pwrl; - struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; - enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; - uint32_t req_vddc = 0, req_volt, i; - - if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW && - dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE)) - return; - - for (i = 0; i < table->count; i++) { - if (dal_power_level == table->entries[i].clk) { - req_vddc = table->entries[i].v; - break; - } - } - - vddc_table = table_info->vdd_dep_on_sclk; - for (i = 0; i < vddc_table->count; i++) { - if (req_vddc <= vddc_table->entries[i].vddc) { - req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE) - << VDDC_SHIFT; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VddC_Request, req_volt); - return; - } - } - printk(KERN_ERR "DAL requested level can not" - " found a available voltage in VDDC DPM Table \n"); -} - /** * Enable voltage control * @@ -2091,7 +2056,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to populate Clock Stretcher Data Table!", return result); } - + table->CurrSclkPllRange = 0xff; table->GraphicsVoltageChangeEnable = 1; table->GraphicsThermThrottleEnable = 1; table->GraphicsInterval = 1; @@ -2184,6 +2149,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); @@ -4760,11 +4726,11 @@ static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table) struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); + data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, + hwmgr->soft_pp_table_size, + GFP_KERNEL); if (!data->soft_pp_table) return -ENOMEM; - memcpy(data->soft_pp_table, hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size); } *table = (char *)&data->soft_pp_table; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index 0b99ab3ba0c5..ae96f14b827c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c @@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, (uint8_t *)&data->power_tune_table, - sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) + (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) PP_ASSERT_WITH_CODE(false, "Attempt to download PmFuseTable Failed!", return -EINVAL); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 1faad92b50d3..d27e8c40602a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) } } - /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ - for (i = 0; i < allowed_vdd_sclk_table->count; i++) { - data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; - /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ - /* param1 is for corresponding std voltage */ - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; - - if (NULL != allowed_vdd_mclk_table) { - /* Initialize Vddci DPM table based on allow Mclk values */ - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; - data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; - data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; - data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; - } - /* setup PCIE gen speed levels*/ tonga_setup_default_pcie_tables(hwmgr); @@ -5331,7 +5310,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { PP_ASSERT_WITH_CODE( - true == tonga_is_dpm_running(hwmgr), + 0 == tonga_is_dpm_running(hwmgr), "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( @@ -5344,7 +5323,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), + PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( @@ -5445,7 +5424,7 @@ static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr } if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = tonga_populate_all_memory_levels(hwmgr); + result = tonga_populate_all_graphic_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", return result); @@ -5647,7 +5626,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), + PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( @@ -5661,7 +5640,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { PP_ASSERT_WITH_CODE( - true == tonga_is_dpm_running(hwmgr), + 0 == tonga_is_dpm_running(hwmgr), "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( @@ -6056,11 +6035,11 @@ static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); + data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, + hwmgr->soft_pp_table_size, + GFP_KERNEL); if (!data->soft_pp_table) return -ENOMEM; - memcpy(data->soft_pp_table, hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size); } *table = (char *)&data->soft_pp_table; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 10e3630ee39d..296ec7ef6d45 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (NULL != hwmgr->soft_pp_table) { - kfree(hwmgr->soft_pp_table); + if (NULL != hwmgr->soft_pp_table) hwmgr->soft_pp_table = NULL; - } - if (NULL != pp_table_information->vdd_dep_on_sclk) - pp_table_information->vdd_dep_on_sclk = NULL; + kfree(pp_table_information->vdd_dep_on_sclk); + pp_table_information->vdd_dep_on_sclk = NULL; - if (NULL != pp_table_information->vdd_dep_on_mclk) - pp_table_information->vdd_dep_on_mclk = NULL; + kfree(pp_table_information->vdd_dep_on_mclk); + pp_table_information->vdd_dep_on_mclk = NULL; - if (NULL != pp_table_information->valid_mclk_values) - pp_table_information->valid_mclk_values = NULL; + kfree(pp_table_information->valid_mclk_values); + pp_table_information->valid_mclk_values = NULL; - if (NULL != pp_table_information->valid_sclk_values) - pp_table_information->valid_sclk_values = NULL; + kfree(pp_table_information->valid_sclk_values); + pp_table_information->valid_sclk_values = NULL; - if (NULL != pp_table_information->vddc_lookup_table) - pp_table_information->vddc_lookup_table = NULL; + kfree(pp_table_information->vddc_lookup_table); + pp_table_information->vddc_lookup_table = NULL; - if (NULL != pp_table_information->vddgfx_lookup_table) - pp_table_information->vddgfx_lookup_table = NULL; + kfree(pp_table_information->vddgfx_lookup_table); + pp_table_information->vddgfx_lookup_table = NULL; - if (NULL != pp_table_information->mm_dep_table) - pp_table_information->mm_dep_table = NULL; + kfree(pp_table_information->mm_dep_table); + pp_table_information->mm_dep_table = NULL; - if (NULL != pp_table_information->cac_dtp_table) - pp_table_information->cac_dtp_table = NULL; + kfree(pp_table_information->cac_dtp_table); + pp_table_information->cac_dtp_table = NULL; - if (NULL != hwmgr->dyn_state.cac_dtp_table) - hwmgr->dyn_state.cac_dtp_table = NULL; + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; - if (NULL != pp_table_information->ppm_parameter_table) - pp_table_information->ppm_parameter_table = NULL; + kfree(pp_table_information->ppm_parameter_table); + pp_table_information->ppm_parameter_table = NULL; - if (NULL != pp_table_information->pcie_table) - pp_table_information->pcie_table = NULL; + kfree(pp_table_information->pcie_table); + pp_table_information->pcie_table = NULL; - if (NULL != hwmgr->pptable) { - kfree(hwmgr->pptable); - hwmgr->pptable = NULL; - } + kfree(hwmgr->pptable); + hwmgr->pptable = NULL; return result; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index fd4ce7aaeee9..28f571449495 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -673,7 +673,7 @@ extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_volta extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); - +extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index da18f44fd1c8..87c023e518ab 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -639,7 +639,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) cz_smu->driver_buffer_length = 0; - for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { + for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { firmware_type = cz_translate_firmware_enum_to_arg(smumgr, firmware_list[i]); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 673a75c74e18..8e52a2e82db5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) static int fiji_smu_fini(struct pp_smumgr *smumgr) { + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + if (smumgr->backend) { kfree(smumgr->backend); smumgr->backend = NULL; } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index de618ead9db8..043b6ac09d5f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr) kfree(smumgr->backend); smumgr->backend = NULL; } + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c483baf6b4fb..0728c1e3d97a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) int smum_fini(struct pp_smumgr *smumgr) { + kfree(smumgr->device); kfree(smumgr); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 32820b680d88..b22722eabafc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, static int tonga_smu_fini(struct pp_smumgr *smumgr) { + struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + if (smumgr->backend != NULL) { kfree(smumgr->backend); smumgr->backend = NULL; } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index fef1b04c2aab..0813c2f06931 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -33,8 +33,17 @@ * */ +static void hdlcd_crtc_cleanup(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + /* stop the controller on cleanup */ + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); + drm_crtc_cleanup(crtc); +} + static const struct drm_crtc_funcs hdlcd_crtc_funcs = { - .destroy = drm_crtc_cleanup, + .destroy = hdlcd_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, @@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); struct drm_display_mode *m = &crtc->state->adjusted_mode; struct videomode vm; - unsigned int polarities, line_length, err; + unsigned int polarities, err; vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; @@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) if (m->flags & DRM_MODE_FLAG_PVSYNC) polarities |= HDLCD_POLARITY_VSYNC; - line_length = crtc->primary->state->fb->pitches[0]; - /* Allow max number of outstanding requests and largest burst size */ hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); - hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); err = hdlcd_set_pxl_fmt(crtc); @@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc) struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); clk_prepare_enable(hdlcd->clk); + hdlcd_crtc_mode_set_nofb(crtc); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); - drm_crtc_vblank_on(crtc); } static void hdlcd_crtc_disable(struct drm_crtc *crtc) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - if (!crtc->primary->fb) + if (!crtc->state->active) return; - clk_disable_unprepare(hdlcd->clk); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); - drm_crtc_vblank_off(crtc); + clk_disable_unprepare(hdlcd->clk); } static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, @@ -179,20 +182,17 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - unsigned long flags; - - if (crtc->state->event) { - struct drm_pending_vblank_event *event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + if (event) { crtc->state->event = NULL; - event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - list_add_tail(&event->base.link, &hdlcd->event_list); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); } } @@ -225,6 +225,15 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { static int hdlcd_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { + u32 src_w, src_h; + + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + /* we can't do any scaling of the plane source */ + if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) + return -EINVAL; + return 0; } @@ -233,20 +242,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, { struct hdlcd_drm_private *hdlcd; struct drm_gem_cma_object *gem; + unsigned int depth, bpp; + u32 src_w, src_h, dest_w, dest_h; dma_addr_t scanout_start; - if (!plane->state->crtc || !plane->state->fb) + if (!plane->state->fb) return; - hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); + drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp); + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + dest_w = plane->state->crtc_w; + dest_h = plane->state->crtc_h; gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); - scanout_start = gem->paddr; + scanout_start = gem->paddr + plane->state->fb->offsets[0] + + plane->state->crtc_y * plane->state->fb->pitches[0] + + plane->state->crtc_x * bpp / 8; + + hdlcd = plane->dev->dev_private; + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); } static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { - .prepare_fb = NULL, - .cleanup_fb = NULL, .atomic_check = hdlcd_plane_atomic_check, .atomic_update = hdlcd_plane_atomic_update, }; @@ -294,16 +314,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) return plane; } -void hdlcd_crtc_suspend(struct drm_crtc *crtc) -{ - hdlcd_crtc_disable(crtc); -} - -void hdlcd_crtc_resume(struct drm_crtc *crtc) -{ - hdlcd_crtc_enable(crtc); -} - int hdlcd_setup_crtc(struct drm_device *drm) { struct hdlcd_drm_private *hdlcd = drm->dev_private; diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index b987c63ba8d6..a6ca36f0096f 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) atomic_set(&hdlcd->dma_end_count, 0); #endif - INIT_LIST_HEAD(&hdlcd->event_list); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdlcd->mmio = devm_ioremap_resource(drm->dev, res); if (IS_ERR(hdlcd->mmio)) { @@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) goto setup_fail; } - pm_runtime_enable(drm->dev); - - pm_runtime_get_sync(drm->dev); ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); - pm_runtime_put_sync(drm->dev); if (ret < 0) { DRM_ERROR("failed to install IRQ handler\n"); goto irq_fail; @@ -164,24 +158,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg) atomic_inc(&hdlcd->vsync_count); #endif - if (irq_status & HDLCD_INTERRUPT_VSYNC) { - bool events_sent = false; - unsigned long flags; - struct drm_pending_vblank_event *e, *t; - + if (irq_status & HDLCD_INTERRUPT_VSYNC) drm_crtc_handle_vblank(&hdlcd->crtc); - spin_lock_irqsave(&drm->event_lock, flags); - list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) { - list_del(&e->base.link); - drm_crtc_send_vblank_event(&hdlcd->crtc, e); - events_sent = true; - } - if (events_sent) - drm_crtc_vblank_put(&hdlcd->crtc); - spin_unlock_irqrestore(&drm->event_lock, flags); - } - /* acknowledge interrupt(s) */ hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); @@ -275,6 +254,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) static struct drm_info_list hdlcd_debugfs_list[] = { { "interrupt_count", hdlcd_show_underrun_count, 0 }, { "clocks", hdlcd_show_pxlclock, 0 }, + { "fb", drm_fb_cma_debugfs_show, 0 }, }; static int hdlcd_debugfs_init(struct drm_minor *minor) @@ -357,6 +337,8 @@ static int hdlcd_drm_bind(struct device *dev) return -ENOMEM; drm->dev_private = hdlcd; + dev_set_drvdata(dev, drm); + hdlcd_setup_mode_config(drm); ret = hdlcd_load(drm, 0); if (ret) @@ -366,14 +348,18 @@ static int hdlcd_drm_bind(struct device *dev) if (ret) goto err_unload; - dev_set_drvdata(dev, drm); - ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); goto err_unregister; } + ret = pm_runtime_set_active(dev); + if (ret) + goto err_pm_active; + + pm_runtime_enable(dev); + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); @@ -399,16 +385,16 @@ err_fbdev: drm_mode_config_cleanup(drm); drm_vblank_cleanup(drm); err_vblank: + pm_runtime_disable(drm->dev); +err_pm_active: component_unbind_all(dev, drm); err_unregister: drm_dev_unregister(drm); err_unload: - pm_runtime_get_sync(drm->dev); drm_irq_uninstall(drm); - pm_runtime_put_sync(drm->dev); - pm_runtime_disable(drm->dev); of_reserved_mem_device_release(drm->dev); err_free: + dev_set_drvdata(dev, NULL); drm_dev_unref(drm); return ret; @@ -495,30 +481,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match); static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_crtc *crtc; + struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; - if (pm_runtime_suspended(dev)) + if (!hdlcd) return 0; - drm_modeset_lock_all(drm); - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) - hdlcd_crtc_suspend(crtc); - drm_modeset_unlock_all(drm); + drm_kms_helper_poll_disable(drm); + + hdlcd->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(hdlcd->state)) { + drm_kms_helper_poll_enable(drm); + return PTR_ERR(hdlcd->state); + } + return 0; } static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_crtc *crtc; + struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; - if (!pm_runtime_suspended(dev)) + if (!hdlcd) return 0; - drm_modeset_lock_all(drm); - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) - hdlcd_crtc_resume(crtc); - drm_modeset_unlock_all(drm); + drm_atomic_helper_resume(drm, hdlcd->state); + drm_kms_helper_poll_enable(drm); + pm_runtime_set_active(dev); + return 0; } diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h index aa234784f053..e3950a071152 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.h +++ b/drivers/gpu/drm/arm/hdlcd_drv.h @@ -9,10 +9,9 @@ struct hdlcd_drm_private { void __iomem *mmio; struct clk *clk; struct drm_fbdev_cma *fbdev; - struct drm_framebuffer *fb; - struct list_head event_list; struct drm_crtc crtc; struct drm_plane *plane; + struct drm_atomic_state *state; #ifdef CONFIG_DEBUG_FS atomic_t buffer_underrun_count; atomic_t bus_error_count; @@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg) int hdlcd_setup_crtc(struct drm_device *dev); void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); -void hdlcd_crtc_suspend(struct drm_crtc *crtc); -void hdlcd_crtc_resume(struct drm_crtc *crtc); #endif /* __HDLCD_DRV_H__ */ diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index cf23a755f777..bd12231ab0cd 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) { struct atmel_hlcdc_crtc_state *state; - if (crtc->state && crtc->state->mode_blob) - drm_property_unreference_blob(crtc->state->mode_blob); - if (crtc->state) { + __drm_atomic_helper_crtc_destroy_state(crtc->state); state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); kfree(state); + crtc->state = NULL; } state = kzalloc(sizeof(*state), GFP_KERNEL); @@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); - if (state) - __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + if (!state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); state->output_mode = cur->output_mode; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3ff1ed7b33db..c204ef32df16 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -351,6 +351,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, drm_property_unreference_blob(state->mode_blob); state->mode_blob = NULL; + memset(&state->mode, 0, sizeof(state->mode)); + if (blob) { if (blob->length != sizeof(struct drm_mode_modeinfo) || drm_mode_convert_umode(&state->mode, @@ -363,7 +365,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", state->mode.name, state); } else { - memset(&state->mode, 0, sizeof(state->mode)); state->enable = false; DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", state); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d2a6d958ca76..0e3cc66aa8b7 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2821,8 +2821,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - /* * Check whether the primary plane supports the fb pixel format. * Drivers not implementing the universal planes API use a @@ -4841,7 +4839,8 @@ bool drm_property_change_valid_get(struct drm_property *property, if (value == 0) return true; - return _object_find(property->dev, value, property->values[0]) != NULL; + *ref = _object_find(property->dev, value, property->values[0]); + return *ref != NULL; } for (i = 0; i < property->num_values; i++) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6e42433ef0e..26feb2f8453f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -528,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) int drm_crtc_helper_set_config(struct drm_mode_set *set) { struct drm_device *dev; - struct drm_crtc *new_crtc; - struct drm_encoder *save_encoders, *new_encoder, *encoder; + struct drm_crtc **save_encoder_crtcs, *new_crtc; + struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; bool mode_changed = false; /* if true do a full mode set */ bool fb_changed = false; /* if true and !mode_changed just do a flip */ - struct drm_connector *save_connectors, *connector; + struct drm_connector *connector; int count = 0, ro, fail = 0; const struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; @@ -574,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) * Allocate space for the backup of all (non-pointer) encoder and * connector data. */ - save_encoders = kzalloc(dev->mode_config.num_encoder * - sizeof(struct drm_encoder), GFP_KERNEL); - if (!save_encoders) + save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder * + sizeof(struct drm_crtc *), GFP_KERNEL); + if (!save_encoder_crtcs) return -ENOMEM; - save_connectors = kzalloc(dev->mode_config.num_connector * - sizeof(struct drm_connector), GFP_KERNEL); - if (!save_connectors) { - kfree(save_encoders); + save_connector_encoders = kzalloc(dev->mode_config.num_connector * + sizeof(struct drm_encoder *), GFP_KERNEL); + if (!save_connector_encoders) { + kfree(save_encoder_crtcs); return -ENOMEM; } @@ -593,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) */ count = 0; drm_for_each_encoder(encoder, dev) { - save_encoders[count++] = *encoder; + save_encoder_crtcs[count++] = encoder->crtc; } count = 0; drm_for_each_connector(connector, dev) { - save_connectors[count++] = *connector; + save_connector_encoders[count++] = connector->encoder; } save_set.crtc = set->crtc; @@ -631,8 +631,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; } - /* take a reference on all connectors in set */ + /* take a reference on all unbound connectors in set, reuse the + * already taken reference for bound connectors + */ for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; drm_connector_reference(set->connectors[ro]); } @@ -754,30 +758,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } } - /* after fail drop reference on all connectors in save set */ - count = 0; - drm_for_each_connector(connector, dev) { - drm_connector_unreference(&save_connectors[count++]); - } - - kfree(save_connectors); - kfree(save_encoders); + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); return 0; fail: /* Restore all previous data. */ count = 0; drm_for_each_encoder(encoder, dev) { - *encoder = save_encoders[count++]; + encoder->crtc = save_encoder_crtcs[count++]; } count = 0; drm_for_each_connector(connector, dev) { - *connector = save_connectors[count++]; + connector->encoder = save_connector_encoders[count++]; } - /* after fail drop reference on all connectors in set */ + /* after fail drop reference on all unbound connectors in set, let + * bound connectors keep their reference + */ for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; drm_connector_unreference(set->connectors[ro]); } @@ -787,8 +789,8 @@ fail: save_set.y, save_set.fb)) DRM_ERROR("failed to restore config after modeset failure\n"); - kfree(save_connectors); - kfree(save_encoders); + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); return ret; } EXPORT_SYMBOL(drm_crtc_helper_set_config); diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c new file mode 100644 index 000000000000..a7b2a751f6fe --- /dev/null +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -0,0 +1,366 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <drm/drm_dp_dual_mode_helper.h> +#include <drm/drmP.h> + +/** + * DOC: dp dual mode helpers + * + * Helper functions to deal with DP dual mode (aka. DP++) adaptors. + * + * Type 1: + * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C. + * + * Type 2: + * Adaptor registers and sink DDC bus can be accessed either via I2C or + * I2C-over-AUX. Source devices may choose to implement either of these + * access methods. + */ + +#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40 + +/** + * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s) + * @adapter: I2C adapter for the DDC bus + * @offset: register offset + * @buffer: buffer for return data + * @size: sizo of the buffer + * + * Reads @size bytes from the DP dual mode adaptor registers + * starting at @offset. + * + * Returns: + * 0 on success, negative error code on failure + */ +ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, + u8 offset, void *buffer, size_t size) +{ + struct i2c_msg msgs[] = { + { + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, + .flags = 0, + .len = 1, + .buf = &offset, + }, + { + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, + .flags = I2C_M_RD, + .len = size, + .buf = buffer, + }, + }; + int ret; + + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + if (ret != ARRAY_SIZE(msgs)) + return -EPROTO; + + return 0; +} +EXPORT_SYMBOL(drm_dp_dual_mode_read); + +/** + * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s) + * @adapter: I2C adapter for the DDC bus + * @offset: register offset + * @buffer: buffer for write data + * @size: sizo of the buffer + * + * Writes @size bytes to the DP dual mode adaptor registers + * starting at @offset. + * + * Returns: + * 0 on success, negative error code on failure + */ +ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, + u8 offset, const void *buffer, size_t size) +{ + struct i2c_msg msg = { + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, + .flags = 0, + .len = 1 + size, + .buf = NULL, + }; + void *data; + int ret; + + data = kmalloc(msg.len, GFP_TEMPORARY); + if (!data) + return -ENOMEM; + + msg.buf = data; + + memcpy(data, &offset, 1); + memcpy(data + 1, buffer, size); + + ret = i2c_transfer(adapter, &msg, 1); + + kfree(data); + + if (ret < 0) + return ret; + if (ret != 1) + return -EPROTO; + + return 0; +} +EXPORT_SYMBOL(drm_dp_dual_mode_write); + +static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) +{ + static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = + "DP-HDMI ADAPTOR\x04"; + + return memcmp(hdmi_id, dp_dual_mode_hdmi_id, + sizeof(dp_dual_mode_hdmi_id)) == 0; +} + +static bool is_type2_adaptor(uint8_t adaptor_id) +{ + return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | + DP_DUAL_MODE_REV_TYPE2); +} + +/** + * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor + * @adapter: I2C adapter for the DDC bus + * + * Attempt to identify the type of the DP dual mode adaptor used. + * + * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not + * certain whether we're dealing with a native HDMI port or + * a type 1 DVI dual mode adaptor. The driver will have to use + * some other hardware/driver specific mechanism to make that + * distinction. + * + * Returns: + * The type of the DP dual mode adaptor used + */ +enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) +{ + char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {}; + uint8_t adaptor_id = 0x00; + ssize_t ret; + + /* + * Let's see if the adaptor is there the by reading the + * HDMI ID registers. + * + * Note that type 1 DVI adaptors are not required to implemnt + * any registers, and that presents a problem for detection. + * If the i2c transfer is nacked, we may or may not be dealing + * with a type 1 DVI adaptor. Some other mechanism of detecting + * the presence of the adaptor is required. One way would be + * to check the state of the CONFIG1 pin, Another method would + * simply require the driver to know whether the port is a DP++ + * port or a native HDMI port. Both of these methods are entirely + * hardware/driver specific so we can't deal with them here. + */ + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, + hdmi_id, sizeof(hdmi_id)); + if (ret) + return DRM_DP_DUAL_MODE_UNKNOWN; + + /* + * Sigh. Some (maybe all?) type 1 adaptors are broken and ack + * the offset but ignore it, and instead they just always return + * data from the start of the HDMI ID buffer. So for a broken + * type 1 HDMI adaptor a single byte read will always give us + * 0x44, and for a type 1 DVI adaptor it should give 0x00 + * (assuming it implements any registers). Fortunately neither + * of those values will match the type 2 signature of the + * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with + * the type 2 adaptor detection safely even in the presence + * of broken type 1 adaptors. + */ + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, + &adaptor_id, sizeof(adaptor_id)); + if (ret == 0) { + if (is_type2_adaptor(adaptor_id)) { + if (is_hdmi_adaptor(hdmi_id)) + return DRM_DP_DUAL_MODE_TYPE2_HDMI; + else + return DRM_DP_DUAL_MODE_TYPE2_DVI; + } + } + + if (is_hdmi_adaptor(hdmi_id)) + return DRM_DP_DUAL_MODE_TYPE1_HDMI; + else + return DRM_DP_DUAL_MODE_TYPE1_DVI; +} +EXPORT_SYMBOL(drm_dp_dual_mode_detect); + +/** + * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor + * @type: DP dual mode adaptor type + * @adapter: I2C adapter for the DDC bus + * + * Determine the max TMDS clock the adaptor supports based on the + * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK + * register (on type2 adaptors). As some type 1 adaptors have + * problems with registers (see comments in drm_dp_dual_mode_detect()) + * we don't read the register on those, instead we simply assume + * a 165 MHz limit based on the specification. + * + * Returns: + * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz. + */ +int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter) +{ + uint8_t max_tmds_clock; + ssize_t ret; + + /* native HDMI so no limit */ + if (type == DRM_DP_DUAL_MODE_NONE) + return 0; + + /* + * Type 1 adaptors are limited to 165MHz + * Type 2 adaptors can tells us their limit + */ + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) + return 165000; + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK, + &max_tmds_clock, sizeof(max_tmds_clock)); + if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) { + DRM_DEBUG_KMS("Failed to query max TMDS clock\n"); + return 165000; + } + + return max_tmds_clock * 5000 / 2; +} +EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock); + +/** + * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor + * @type: DP dual mode adaptor type + * @adapter: I2C adapter for the DDC bus + * @enabled: current state of the TMDS output buffers + * + * Get the state of the TMDS output buffers in the adaptor. For + * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN + * register. As some type 1 adaptors have problems with registers + * (see comments in drm_dp_dual_mode_detect()) we don't read the + * register on those, instead we simply assume that the buffers + * are always enabled. + * + * Returns: + * 0 on success, negative error code on failure + */ +int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, + bool *enabled) +{ + uint8_t tmds_oen; + ssize_t ret; + + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) { + *enabled = true; + return 0; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n"); + return ret; + } + + *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE); + + return 0; +} +EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); + +/** + * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor + * @type: DP dual mode adaptor type + * @adapter: I2C adapter for the DDC bus + * @enable: enable (as opposed to disable) the TMDS output buffers + * + * Set the state of the TMDS output buffers in the adaptor. For + * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As + * some type 1 adaptors have problems with registers (see comments + * in drm_dp_dual_mode_detect()) we avoid touching the register, + * making this function a no-op on type 1 adaptors. + * + * Returns: + * 0 on success, negative error code on failure + */ +int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool enable) +{ + uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; + ssize_t ret; + + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) + return 0; + + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", + enable ? "enable" : "disable"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); + +/** + * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string + * @type: DP dual mode adaptor type + * + * Returns: + * String representation of the DP dual mode adaptor type + */ +const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type) +{ + switch (type) { + case DRM_DP_DUAL_MODE_NONE: + return "none"; + case DRM_DP_DUAL_MODE_TYPE1_DVI: + return "type 1 DVI"; + case DRM_DP_DUAL_MODE_TYPE1_HDMI: + return "type 1 HDMI"; + case DRM_DP_DUAL_MODE_TYPE2_DVI: + return "type 2 DVI"; + case DRM_DP_DUAL_MODE_TYPE2_HDMI: + return "type 2 HDMI"; + default: + WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN); + return "unknown"; + } +} +EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index a13edf5de2d6..6537908050d7 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) drm_dp_port_teardown_pdt(port, port->pdt); if (!port->input && port->vcpi.vcpi > 0) { - if (mgr->mst_state) { - drm_dp_mst_reset_vcpi_slots(mgr, port); - drm_dp_update_payload_part1(mgr); - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - } + drm_dp_mst_reset_vcpi_slots(mgr, port); + drm_dp_update_payload_part1(mgr); + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kref_put(&port->kref, drm_dp_free_mst_port); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 172cafe11c71..5075fae3c4e2 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -445,7 +445,7 @@ err_cma_destroy: err_fb_info_destroy: drm_fb_helper_release_fbi(helper); err_gem_free_object: - dev->driver->gem_free_object(&obj->base); + drm_gem_object_unreference_unlocked(&obj->base); return ret; } EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index e1ab008b3f08..1d6c335584ec 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, return cma_obj; error: - drm->driver->gem_free_object(&cma_obj->base); + drm_gem_object_unreference_unlocked(&cma_obj->base); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create); @@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, gem_obj, handle); - if (ret) - goto err_handle_create; - /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(gem_obj); + if (ret) + return ERR_PTR(ret); return cma_obj; - -err_handle_create: - drm->driver->gem_free_object(gem_obj); - - return ERR_PTR(ret); } /** diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 7def3d58da18..e5e6f504d8cc 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out, if (out->status != MODE_OK) goto out; + drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V); + ret = 0; out: diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0ec1ad961e0d..dc723f7ead7d 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_FLAT, .volatile_reg = fsl_dcu_drm_is_volatile_reg, + .max_register = 0x11fc, }; static int fsl_dcu_drm_irq_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 15615fb9bde6..b3198fcd0536 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1183,6 +1183,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) return ret; + ret = i915_ggtt_enable_hw(dev); + if (ret) { + DRM_ERROR("failed to enable GGTT\n"); + goto out_ggtt; + } + /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ ret = i915_kick_out_firmware_fb(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d37c0a671eed..f313b4d8344f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -734,9 +734,14 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int ret; disable_rpm_wakeref_asserts(dev_priv); + ret = i915_ggtt_enable_hw(dev); + if (ret) + DRM_ERROR("failed to re-enable GGTT\n"); + intel_csr_ucode_resume(dev_priv); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b87ca4fae20a..7c334e902266 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3481,7 +3481,9 @@ int intel_bios_init(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); +bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, enum port port); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9b99490e8367..aad26851cee3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1456,7 +1456,10 @@ i915_wait_request(struct drm_i915_gem_request *req) if (ret) return ret; - __i915_gem_request_retire__upto(req); + /* If the GPU hung, we want to keep the requests to find the guilty. */ + if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) + __i915_gem_request_retire__upto(req); + return 0; } @@ -1513,7 +1516,8 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, else if (obj->last_write_req == req) i915_gem_object_retire__write(obj); - __i915_gem_request_retire__upto(req); + if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) + __i915_gem_request_retire__upto(req); } /* A nonblocking variant of the above wait. This is a highly dangerous routine @@ -4860,9 +4864,6 @@ i915_gem_init_hw(struct drm_device *dev) struct intel_engine_cs *engine; int ret, j; - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) - return -EIO; - /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0d666b3f7e9b..92acdff9dad3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3236,6 +3236,14 @@ out_gtt_cleanup: return ret; } +int i915_ggtt_enable_hw(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) + return -EIO; + + return 0; +} + void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index d7dd3d8a8758..0008543d55f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -514,6 +514,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) } int i915_ggtt_init_hw(struct drm_device *dev); +int i915_ggtt_enable_hw(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index e72dd9a8d6bf..b9022fa053d6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; + panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | + dvo_timing->himage_lo; + panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | + dvo_timing->vimage_lo; + /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; @@ -1187,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, } if (bdb->version < 106) { expected_size = 22; - } else if (bdb->version < 109) { + } else if (bdb->version < 111) { expected_size = 27; } else if (bdb->version < 195) { BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); @@ -1546,6 +1551,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) } /** + * intel_bios_is_port_present - is the specified digital port present + * @dev_priv: i915 device instance + * @port: port to check + * + * Return true if the device in %port is present. + */ +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) +{ + static const struct { + u16 dp, hdmi; + } port_mapping[] = { + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, + }; + int i; + + /* FIXME maybe deal with port A as well? */ + if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) + return false; + + if (!dev_priv->vbt.child_dev_num) + return false; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + const union child_device_config *p_child = + &dev_priv->vbt.child_dev[i]; + if ((p_child->common.dvo_port == port_mapping[port].dp || + p_child->common.dvo_port == port_mapping[port].hdmi) && + (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | + DEVICE_TYPE_DISPLAYPORT_OUTPUT))) + return true; + } + + return false; +} + +/** * intel_bios_is_port_edp - is the device in given port eDP * @dev_priv: i915 device instance * @port: port to check @@ -1578,6 +1622,42 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) return false; } +bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) +{ + static const struct { + u16 dp, hdmi; + } port_mapping[] = { + /* + * Buggy VBTs may declare DP ports as having + * HDMI type dvo_port :( So let's check both. + */ + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, + }; + int i; + + if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) + return false; + + if (!dev_priv->vbt.child_dev_num) + return false; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + const union child_device_config *p_child = + &dev_priv->vbt.child_dev[i]; + + if ((p_child->common.dvo_port == port_mapping[port].dp || + p_child->common.dvo_port == port_mapping[port].hdmi) && + (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == + (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) + return true; + } + + return false; +} + /** * intel_bios_is_dsi_present - is DSI present in VBT * @dev_priv: i915 device instance diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 3fac04602a25..01e523df363b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1601,6 +1601,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; + if (type == INTEL_OUTPUT_HDMI) { + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); + } + intel_prepare_ddi_buffer(intel_encoder); if (type == INTEL_OUTPUT_EDP) { @@ -1667,6 +1673,12 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) DPLL_CTRL2_DDI_CLK_OFF(port))); else if (INTEL_INFO(dev)->gen < 9) I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); + + if (type == INTEL_OUTPUT_HDMI) { + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); + } } static void intel_enable_ddi(struct intel_encoder *intel_encoder) @@ -2180,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder, if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) pipe_config->has_infoframe = true; - break; + /* fall through */ case TRANS_DDI_MODE_SELECT_DVI: + pipe_config->lane_count = 4; + break; case TRANS_DDI_MODE_SELECT_FDI: break; case TRANS_DDI_MODE_SELECT_DP_SST: diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 46f9be3ad5a2..56a1637c864f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8275,12 +8275,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; + int i; u32 val, final; bool has_lvds = false; bool has_cpu_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; + bool using_ssc_source = false; /* We need to take the global config into account */ for_each_intel_encoder(dev, encoder) { @@ -8307,8 +8309,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) can_ssc = true; } - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", - has_panel, has_lvds, has_ck505); + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + u32 temp = I915_READ(PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after @@ -8345,9 +8361,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - } else { - final |= DREF_SSC_SOURCE_DISABLE; - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } else if (using_ssc_source) { + final |= DREF_SSC_SOURCE_ENABLE; + final |= DREF_SSC1_ENABLE; } if (final == val) @@ -8393,7 +8409,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); } else { - DRM_DEBUG_KMS("Disabling SSC entirely\n"); + DRM_DEBUG_KMS("Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; @@ -8404,16 +8420,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; + if (!using_ssc_source) { + DRM_DEBUG_KMS("Disabling SSC source\n"); - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } } BUG_ON(val != final); @@ -12005,6 +12025,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); return ret; } + } else if (dev_priv->display.compute_intermediate_wm) { + if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) + pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; } if (INTEL_INFO(dev)->gen >= 9) { @@ -14551,6 +14574,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + bool has_edp, has_port; + /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14559,27 +14584,37 @@ static void intel_setup_outputs(struct drm_device *dev) * Thus we can't rely on the DP_DETECTED bit alone to detect * eDP ports. Consult the VBT as well as DP_DETECTED to * detect eDP ports. + * + * Sadly the straps seem to be missing sometimes even for HDMI + * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap + * and VBT for the presence of the port. Additionally we can't + * trust the port type the VBT declares as we've seen at least + * HDMI ports that the VBT claim are DP or eDP. */ - if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_B)) + has_edp = intel_dp_is_edp(dev, PORT_B); + has_port = intel_bios_is_port_present(dev_priv, PORT_B); + if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); + if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIB, PORT_B); - if (I915_READ(VLV_DP_B) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_B)) - intel_dp_init(dev, VLV_DP_B, PORT_B); - if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_C)) + has_edp = intel_dp_is_edp(dev, PORT_C); + has_port = intel_bios_is_port_present(dev_priv, PORT_C); + if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); + if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIC, PORT_C); - if (I915_READ(VLV_DP_C) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_C)) - intel_dp_init(dev, VLV_DP_C, PORT_C); if (IS_CHERRYVIEW(dev)) { - /* eDP not supported on port D, so don't check VBT */ - if (I915_READ(CHV_HDMID) & SDVO_DETECTED) - intel_hdmi_init(dev, CHV_HDMID, PORT_D); - if (I915_READ(CHV_DP_D) & DP_DETECTED) + /* + * eDP not supported on port D, + * so no need to worry about it + */ + has_port = intel_bios_is_port_present(dev_priv, PORT_D); + if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) intel_dp_init(dev, CHV_DP_D, PORT_D); + if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) + intel_hdmi_init(dev, CHV_HDMID, PORT_D); } intel_dsi_init(dev); @@ -15990,6 +16025,9 @@ retry: state->acquire_ctx = &ctx; + /* ignore any reset values/BIOS leftovers in the WM registers */ + to_intel_atomic_state(state)->skip_intermediate_wm = true; + for_each_crtc_in_state(state, crtc, crtc_state, i) { /* * Force recalculation even if we restore diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f192f58708c2..ffe5f8430957 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5725,8 +5725,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) + if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + } } mutex_unlock(&dev->mode_config.mutex); @@ -5923,9 +5926,9 @@ fail: return false; } -void -intel_dp_init(struct drm_device *dev, - i915_reg_t output_reg, enum port port) +bool intel_dp_init(struct drm_device *dev, + i915_reg_t output_reg, + enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; @@ -5935,7 +5938,7 @@ intel_dp_init(struct drm_device *dev, intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); if (!intel_dig_port) - return; + return false; intel_connector = intel_connector_alloc(); if (!intel_connector) @@ -5992,7 +5995,7 @@ intel_dp_init(struct drm_device *dev, if (!intel_dp_init_connector(intel_dig_port, intel_connector)) goto err_init_connector; - return; + return true; err_init_connector: drm_encoder_cleanup(encoder); @@ -6000,8 +6003,7 @@ err_encoder_init: kfree(intel_connector); err_connector_alloc: kfree(intel_dig_port); - - return; + return false; } void intel_dp_mst_suspend(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 639bf0209c15..baf6f5584cbd 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -366,6 +366,9 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, DPLL_ID_PCH_PLL_B); } + if (!pll) + return NULL; + /* reference the pll */ intel_reference_shared_dpll(pll, crtc_state); @@ -1702,9 +1705,9 @@ static const struct intel_dpll_mgr hsw_pll_mgr = { static const struct dpll_info skl_plls[] = { { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, - { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, - { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, - { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, + { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, + { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, + { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, { NULL, -1, NULL, }, }; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5da29a02b9e3..4a24b0067a3a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -33,6 +33,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_dp_dual_mode_helper.h> #include <drm/drm_dp_mst_helper.h> #include <drm/drm_rect.h> #include <drm/drm_atomic.h> @@ -753,6 +754,10 @@ struct cxsr_latency { struct intel_hdmi { i915_reg_t hdmi_reg; int ddc_bus; + struct { + enum drm_dp_dual_mode_type type; + int max_tmds_clock; + } dp_dual_mode; bool limited_color_range; bool color_range_auto; bool has_hdmi_sink; @@ -1279,7 +1284,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ -void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); +bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -1401,6 +1406,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); +void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); /* intel_lvds.c */ diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 2b22bb9bb86f..4756ef639648 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -46,6 +46,22 @@ static const struct { }, }; +/* return pixels in terms of txbyteclkhs */ +static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, + u16 burst_mode_ratio) +{ + return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, + 8 * 100), lane_count); +} + +/* return pixels equvalent to txbyteclkhs */ +static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count, + u16 burst_mode_ratio) +{ + return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100), + (bpp * burst_mode_ratio)); +} + enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) { /* It just so happens the VBT matches register contents. */ @@ -780,10 +796,19 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode_sw; + struct intel_crtc *intel_crtc; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + unsigned int lane_count = intel_dsi->lane_count; unsigned int bpp, fmt; enum port port; - u16 vfp, vsync, vbp; + u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; + u16 hfp_sw, hsync_sw, hbp_sw; + u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw, + crtc_hblank_start_sw, crtc_hblank_end_sw; + + intel_crtc = to_intel_crtc(encoder->base.crtc); + adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode; /* * Atleast one port is active as encoder->get_config called only if @@ -808,26 +833,118 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, adjusted_mode->crtc_vtotal = I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); + hactive = adjusted_mode->crtc_hdisplay; + hfp = I915_READ(MIPI_HFP_COUNT(port)); + /* - * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and - * calculate hsync_start, hsync_end, htotal and hblank_end + * Meaningful for video mode non-burst sync pulse mode only, + * can be zero for non-burst sync events and burst modes */ + hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port)); + hbp = I915_READ(MIPI_HBP_COUNT(port)); + + /* harizontal values are in terms of high speed byte clock */ + hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count, + intel_dsi->burst_mode_ratio); + + if (intel_dsi->dual_link) { + hfp *= 2; + hsync *= 2; + hbp *= 2; + } /* vertical values are in terms of lines */ vfp = I915_READ(MIPI_VFP_COUNT(port)); vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); vbp = I915_READ(MIPI_VBP_COUNT(port)); + adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; + adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; + adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start; adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; + adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; - adjusted_mode->crtc_vsync_start = - vfp + adjusted_mode->crtc_vdisplay; - adjusted_mode->crtc_vsync_end = - vsync + adjusted_mode->crtc_vsync_start; + adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay; + adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start; adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; -} + /* + * In BXT DSI there is no regs programmed with few horizontal timings + * in Pixels but txbyteclkhs.. So retrieval process adds some + * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs. + * Actually here for the given adjusted_mode, we are calculating the + * value programmed to the port and then back to the horizontal timing + * param in pixels. This is the expected value, including roundup errors + * And if that is same as retrieved value from port, then + * (HW state) adjusted_mode's horizontal timings are corrected to + * match with SW state to nullify the errors. + */ + /* Calculating the value programmed to the Port register */ + hfp_sw = adjusted_mode_sw->crtc_hsync_start - + adjusted_mode_sw->crtc_hdisplay; + hsync_sw = adjusted_mode_sw->crtc_hsync_end - + adjusted_mode_sw->crtc_hsync_start; + hbp_sw = adjusted_mode_sw->crtc_htotal - + adjusted_mode_sw->crtc_hsync_end; + + if (intel_dsi->dual_link) { + hfp_sw /= 2; + hsync_sw /= 2; + hbp_sw /= 2; + } + + hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count, + intel_dsi->burst_mode_ratio); + + /* Reverse calculating the adjusted mode parameters from port reg vals*/ + hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count, + intel_dsi->burst_mode_ratio); + + if (intel_dsi->dual_link) { + hfp_sw *= 2; + hsync_sw *= 2; + hbp_sw *= 2; + } + + crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw + + hsync_sw + hbp_sw; + crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay; + crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw; + crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay; + crtc_hblank_end_sw = crtc_htotal_sw; + + if (adjusted_mode->crtc_htotal == crtc_htotal_sw) + adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal; + + if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw) + adjusted_mode->crtc_hsync_start = + adjusted_mode_sw->crtc_hsync_start; + + if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw) + adjusted_mode->crtc_hsync_end = + adjusted_mode_sw->crtc_hsync_end; + + if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw) + adjusted_mode->crtc_hblank_start = + adjusted_mode_sw->crtc_hblank_start; + + if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw) + adjusted_mode->crtc_hblank_end = + adjusted_mode_sw->crtc_hblank_end; +} static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) @@ -891,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us) } } -/* return pixels in terms of txbyteclkhs */ -static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, - u16 burst_mode_ratio) -{ - return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, - 8 * 100), lane_count); -} - static void set_dsi_timings(struct drm_encoder *encoder, const struct drm_display_mode *adjusted_mode) { @@ -1436,6 +1545,9 @@ void intel_dsi_init(struct drm_device *dev) goto err; } + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_dsi_add_properties(intel_connector); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2cdab73046f8..a8844702d11b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -836,6 +836,22 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } +void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) +{ + struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); + struct i2c_adapter *adapter = + intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + + if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) + return; + + DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n", + enable ? "Enabling" : "Disabling"); + + drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type, + adapter, enable); +} + static void intel_hdmi_prepare(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -845,6 +861,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder) const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; u32 hdmi_val; + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); + hdmi_val = SDVO_ENCODING_HDMI; if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) hdmi_val |= HDMI_COLOR_RANGE_16_235; @@ -953,6 +971,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, dotclock /= pipe_config->pixel_multiplier; pipe_config->base.adjusted_mode.crtc_clock = dotclock; + + pipe_config->lane_count = 4; } static void intel_enable_hdmi_audio(struct intel_encoder *encoder) @@ -1140,6 +1160,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) } intel_hdmi->set_infoframes(&encoder->base, false, NULL); + + intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } static void g4x_disable_hdmi(struct intel_encoder *encoder) @@ -1165,27 +1187,42 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder) intel_disable_hdmi(encoder); } -static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) +static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv) { - struct drm_device *dev = intel_hdmi_to_dev(hdmi); - - if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) + if (IS_G4X(dev_priv)) return 165000; - else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) + else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) return 300000; else return 225000; } +static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, + bool respect_downstream_limits) +{ + struct drm_device *dev = intel_hdmi_to_dev(hdmi); + int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev)); + + if (respect_downstream_limits) { + if (hdmi->dp_dual_mode.max_tmds_clock) + max_tmds_clock = min(max_tmds_clock, + hdmi->dp_dual_mode.max_tmds_clock); + if (!hdmi->has_hdmi_sink) + max_tmds_clock = min(max_tmds_clock, 165000); + } + + return max_tmds_clock; +} + static enum drm_mode_status hdmi_port_clock_valid(struct intel_hdmi *hdmi, - int clock, bool respect_dvi_limit) + int clock, bool respect_downstream_limits) { struct drm_device *dev = intel_hdmi_to_dev(hdmi); if (clock < 25000) return MODE_CLOCK_LOW; - if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit)) + if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits)) return MODE_CLOCK_HIGH; /* BXT DPLL can't generate 223-240 MHz */ @@ -1309,7 +1346,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, * within limits. */ if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && - hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK && + hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK && hdmi_12bpc_possible(pipe_config)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; @@ -1337,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, /* Set user selected PAR to incoming mode's member */ adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; + pipe_config->lane_count = 4; + return true; } @@ -1349,10 +1388,57 @@ intel_hdmi_unset_edid(struct drm_connector *connector) intel_hdmi->has_audio = false; intel_hdmi->rgb_quant_range_selectable = false; + intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; + intel_hdmi->dp_dual_mode.max_tmds_clock = 0; + kfree(to_intel_connector(connector)->detect_edid); to_intel_connector(connector)->detect_edid = NULL; } +static void +intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_hdmi *hdmi = intel_attached_hdmi(connector); + enum port port = hdmi_to_dig_port(hdmi)->port; + struct i2c_adapter *adapter = + intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter); + + /* + * Type 1 DVI adaptors are not required to implement any + * registers, so we can't always detect their presence. + * Ideally we should be able to check the state of the + * CONFIG1 pin, but no such luck on our hardware. + * + * The only method left to us is to check the VBT to see + * if the port is a dual mode capable DP port. But let's + * only do that when we sucesfully read the EDID, to avoid + * confusing log messages about DP dual mode adaptors when + * there's nothing connected to the port. + */ + if (type == DRM_DP_DUAL_MODE_UNKNOWN) { + if (has_edid && + intel_bios_is_port_dp_dual_mode(dev_priv, port)) { + DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n"); + type = DRM_DP_DUAL_MODE_TYPE1_DVI; + } else { + type = DRM_DP_DUAL_MODE_NONE; + } + } + + if (type == DRM_DP_DUAL_MODE_NONE) + return; + + hdmi->dp_dual_mode.type = type; + hdmi->dp_dual_mode.max_tmds_clock = + drm_dp_dual_mode_max_tmds_clock(type, adapter); + + DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", + drm_dp_get_dual_mode_type_name(type), + hdmi->dp_dual_mode.max_tmds_clock); +} + static bool intel_hdmi_set_edid(struct drm_connector *connector, bool force) { @@ -1368,6 +1454,8 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force) intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus)); + intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); } @@ -2054,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_dig_port->port; uint8_t alternate_ddc_pin; + DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", + port_name(port)); + if (WARN(intel_dig_port->max_lanes < 4, "Not enough lanes (%d) for HDMI on port %c\n", intel_dig_port->max_lanes, port_name(port))) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6179b591ee84..42eac37de047 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -721,48 +721,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request return ret; } -static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, - int bytes) -{ - struct intel_ringbuffer *ringbuf = req->ringbuf; - struct intel_engine_cs *engine = req->engine; - struct drm_i915_gem_request *target; - unsigned space; - int ret; - - if (intel_ring_space(ringbuf) >= bytes) - return 0; - - /* The whole point of reserving space is to not wait! */ - WARN_ON(ringbuf->reserved_in_use); - - list_for_each_entry(target, &engine->request_list, list) { - /* - * The request queue is per-engine, so can contain requests - * from multiple ringbuffers. Here, we must ignore any that - * aren't from the ringbuffer we're considering. - */ - if (target->ringbuf != ringbuf) - continue; - - /* Would completion of this request free enough space? */ - space = __intel_ring_space(target->postfix, ringbuf->tail, - ringbuf->size); - if (space >= bytes) - break; - } - - if (WARN_ON(&target->list == &engine->request_list)) - return -ENOSPC; - - ret = i915_wait_request(target); - if (ret) - return ret; - - ringbuf->space = space; - return 0; -} - /* * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload * @request: Request to advance the logical ringbuffer of. @@ -814,92 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) return 0; } -static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) -{ - uint32_t __iomem *virt; - int rem = ringbuf->size - ringbuf->tail; - - virt = ringbuf->virtual_start + ringbuf->tail; - rem /= 4; - while (rem--) - iowrite32(MI_NOOP, virt++); - - ringbuf->tail = 0; - intel_ring_update_space(ringbuf); -} - -static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) -{ - struct intel_ringbuffer *ringbuf = req->ringbuf; - int remain_usable = ringbuf->effective_size - ringbuf->tail; - int remain_actual = ringbuf->size - ringbuf->tail; - int ret, total_bytes, wait_bytes = 0; - bool need_wrap = false; - - if (ringbuf->reserved_in_use) - total_bytes = bytes; - else - total_bytes = bytes + ringbuf->reserved_size; - - if (unlikely(bytes > remain_usable)) { - /* - * Not enough space for the basic request. So need to flush - * out the remainder and then wait for base + reserved. - */ - wait_bytes = remain_actual + total_bytes; - need_wrap = true; - } else { - if (unlikely(total_bytes > remain_usable)) { - /* - * The base request will fit but the reserved space - * falls off the end. So don't need an immediate wrap - * and only need to effectively wait for the reserved - * size space from the start of ringbuffer. - */ - wait_bytes = remain_actual + ringbuf->reserved_size; - } else if (total_bytes > ringbuf->space) { - /* No wrapping required, just waiting. */ - wait_bytes = total_bytes; - } - } - - if (wait_bytes) { - ret = logical_ring_wait_for_space(req, wait_bytes); - if (unlikely(ret)) - return ret; - - if (need_wrap) - __wrap_ring_buffer(ringbuf); - } - - return 0; -} - -/** - * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands - * - * @req: The request to start some new work for - * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. - * - * The ringbuffer might not be ready to accept the commands right away (maybe it needs to - * be wrapped, or wait a bit for the tail to be updated). This function takes care of that - * and also preallocates a request (every workload submission is still mediated through - * requests, same as it did with legacy ringbuffer submission). - * - * Return: non-zero if the ringbuffer is not ready to be written to. - */ -int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) -{ - int ret; - - ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); - if (ret) - return ret; - - req->ringbuf->space -= num_dwords * sizeof(uint32_t); - return 0; -} - int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) { /* @@ -912,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) */ intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - return intel_logical_ring_begin(request, 0); + return intel_ring_begin(request, 0); } /** @@ -982,7 +854,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, if (engine == &dev_priv->engine[RCS] && instp_mode != dev_priv->relative_constants_mode) { - ret = intel_logical_ring_begin(params->request, 4); + ret = intel_ring_begin(params->request, 4); if (ret) return ret; @@ -1178,7 +1050,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - ret = intel_logical_ring_begin(req, w->count * 2 + 2); + ret = intel_ring_begin(req, w->count * 2 + 2); if (ret) return ret; @@ -1669,7 +1541,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; int i, ret; - ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2); + ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); if (ret) return ret; @@ -1716,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); } - ret = intel_logical_ring_begin(req, 4); + ret = intel_ring_begin(req, 4); if (ret) return ret; @@ -1778,7 +1650,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, uint32_t cmd; int ret; - ret = intel_logical_ring_begin(request, 4); + ret = intel_ring_begin(request, 4); if (ret) return ret; @@ -1846,7 +1718,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, vf_flush_wa = true; } - ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); + ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); if (ret) return ret; @@ -1920,7 +1792,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) struct intel_ringbuffer *ringbuf = request->ringbuf; int ret; - ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); + ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); if (ret) return ret; @@ -1944,7 +1816,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) struct intel_ringbuffer *ringbuf = request->ringbuf; int ret; - ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); + ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 461f1ef9b5c1..60a7385bc531 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -63,7 +63,6 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); void intel_logical_ring_stop(struct intel_engine_cs *engine); void intel_logical_ring_cleanup(struct intel_engine_cs *engine); int intel_logical_rings_init(struct drm_device *dev); -int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); /** diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bc53c0dd34d0..96281e628d2a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev) fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; goto out; } } diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 23b8545ad6b0..6ba4bf7f2a89 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -239,11 +239,9 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) return -ENODEV; - ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); - if (ret) { - DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); + ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); + if (ret) return ret; - } intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); @@ -305,11 +303,9 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) return -ENODEV; - ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); - if (ret) { - DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); + ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); + if (ret) return ret; - } intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a0788763757b..8357d571553a 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1638,6 +1638,12 @@ static int pwm_setup_backlight(struct intel_connector *connector, return -ENODEV; } + /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(panel->backlight.pwm); + retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS, CRC_PMIC_PWM_PERIOD_NS); if (retval < 0) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4b60005cda37..a7ef45da0a9e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3904,6 +3904,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); + memset(active, 0, sizeof(*active)); + active->pipe_enabled = intel_crtc->active; if (active->pipe_enabled) { diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index c3abae4bc596..a788d1e9589b 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -280,7 +280,10 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) * with the 5 or 6 idle patterns. */ uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - uint32_t val = 0x0; + uint32_t val = EDP_PSR_ENABLE; + + val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; + val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; if (IS_HASWELL(dev)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; @@ -288,14 +291,50 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) if (dev_priv->psr.link_standby) val |= EDP_PSR_LINK_STANDBY; - I915_WRITE(EDP_PSR_CTL, val | - max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | - idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | - EDP_PSR_ENABLE); + if (dev_priv->vbt.psr.tp1_wakeup_time > 5) + val |= EDP_PSR_TP1_TIME_2500us; + else if (dev_priv->vbt.psr.tp1_wakeup_time > 1) + val |= EDP_PSR_TP1_TIME_500us; + else if (dev_priv->vbt.psr.tp1_wakeup_time > 0) + val |= EDP_PSR_TP1_TIME_100us; + else + val |= EDP_PSR_TP1_TIME_0us; + + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) + val |= EDP_PSR_TP2_TP3_TIME_2500us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) + val |= EDP_PSR_TP2_TP3_TIME_500us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) + val |= EDP_PSR_TP2_TP3_TIME_100us; + else + val |= EDP_PSR_TP2_TP3_TIME_0us; + + if (intel_dp_source_supports_hbr2(intel_dp) && + drm_dp_tps3_supported(intel_dp->dpcd)) + val |= EDP_PSR_TP1_TP3_SEL; + else + val |= EDP_PSR_TP1_TP2_SEL; + + I915_WRITE(EDP_PSR_CTL, val); + + if (!dev_priv->psr.psr2_support) + return; + + /* FIXME: selective update is probably totally broken because it doesn't + * mesh at all with our frontbuffer tracking. And the hw alone isn't + * good enough. */ + val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; + + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) + val |= EDP_PSR2_TP2_TIME_2500; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) + val |= EDP_PSR2_TP2_TIME_500; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) + val |= EDP_PSR2_TP2_TIME_100; + else + val |= EDP_PSR2_TP2_TIME_50; - if (dev_priv->psr.psr2_support) - I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE | - EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100); + I915_WRITE(EDP_PSR2_CTL, val); } static bool intel_psr_match_conditions(struct intel_dp *intel_dp) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 245386e20c52..04402bb9d26b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -53,12 +53,6 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) ringbuf->tail, ringbuf->size); } -int intel_ring_space(struct intel_ringbuffer *ringbuf) -{ - intel_ring_update_space(ringbuf); - return ringbuf->space; -} - bool intel_engine_stopped(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->dev->dev_private; @@ -1309,7 +1303,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, intel_ring_emit(signaller, seqno); intel_ring_emit(signaller, 0); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | - MI_SEMAPHORE_TARGET(waiter->id)); + MI_SEMAPHORE_TARGET(waiter->hw_id)); intel_ring_emit(signaller, 0); } @@ -1349,7 +1343,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, intel_ring_emit(signaller, upper_32_bits(gtt_offset)); intel_ring_emit(signaller, seqno); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | - MI_SEMAPHORE_TARGET(waiter->id)); + MI_SEMAPHORE_TARGET(waiter->hw_id)); intel_ring_emit(signaller, 0); } @@ -1573,6 +1567,8 @@ pc_render_add_request(struct drm_i915_gem_request *req) static void gen6_seqno_barrier(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->dev->dev_private; + /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. @@ -1584,9 +1580,13 @@ gen6_seqno_barrier(struct intel_engine_cs *engine) * the write time to land, but that would incur a delay after every * batch i.e. much more frequent than a delay when waiting for the * interrupt (with the same net latency). + * + * Also note that to prevent whole machine hangs on gen7, we have to + * take the spinlock to guard against concurrent cacheline access. */ - struct drm_i915_private *dev_priv = engine->dev->dev_private; + spin_lock_irq(&dev_priv->uncore.lock); POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); + spin_unlock_irq(&dev_priv->uncore.lock); } static u32 @@ -2312,51 +2312,6 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) engine->dev = NULL; } -static int ring_wait_for_space(struct intel_engine_cs *engine, int n) -{ - struct intel_ringbuffer *ringbuf = engine->buffer; - struct drm_i915_gem_request *request; - unsigned space; - int ret; - - if (intel_ring_space(ringbuf) >= n) - return 0; - - /* The whole point of reserving space is to not wait! */ - WARN_ON(ringbuf->reserved_in_use); - - list_for_each_entry(request, &engine->request_list, list) { - space = __intel_ring_space(request->postfix, ringbuf->tail, - ringbuf->size); - if (space >= n) - break; - } - - if (WARN_ON(&request->list == &engine->request_list)) - return -ENOSPC; - - ret = i915_wait_request(request); - if (ret) - return ret; - - ringbuf->space = space; - return 0; -} - -static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) -{ - uint32_t __iomem *virt; - int rem = ringbuf->size - ringbuf->tail; - - virt = ringbuf->virtual_start + ringbuf->tail; - rem /= 4; - while (rem--) - iowrite32(MI_NOOP, virt++); - - ringbuf->tail = 0; - intel_ring_update_space(ringbuf); -} - int intel_engine_idle(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req; @@ -2398,63 +2353,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request) void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) { - WARN_ON(ringbuf->reserved_size); - WARN_ON(ringbuf->reserved_in_use); - + GEM_BUG_ON(ringbuf->reserved_size); ringbuf->reserved_size = size; } void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) { - WARN_ON(ringbuf->reserved_in_use); - + GEM_BUG_ON(!ringbuf->reserved_size); ringbuf->reserved_size = 0; - ringbuf->reserved_in_use = false; } void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) { - WARN_ON(ringbuf->reserved_in_use); - - ringbuf->reserved_in_use = true; - ringbuf->reserved_tail = ringbuf->tail; + GEM_BUG_ON(!ringbuf->reserved_size); + ringbuf->reserved_size = 0; } void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) { - WARN_ON(!ringbuf->reserved_in_use); - if (ringbuf->tail > ringbuf->reserved_tail) { - WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, - "request reserved size too small: %d vs %d!\n", - ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); - } else { + GEM_BUG_ON(ringbuf->reserved_size); +} + +static int wait_for_space(struct drm_i915_gem_request *req, int bytes) +{ + struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_engine_cs *engine = req->engine; + struct drm_i915_gem_request *target; + + intel_ring_update_space(ringbuf); + if (ringbuf->space >= bytes) + return 0; + + /* + * Space is reserved in the ringbuffer for finalising the request, + * as that cannot be allowed to fail. During request finalisation, + * reserved_space is set to 0 to stop the overallocation and the + * assumption is that then we never need to wait (which has the + * risk of failing with EINTR). + * + * See also i915_gem_request_alloc() and i915_add_request(). + */ + GEM_BUG_ON(!ringbuf->reserved_size); + + list_for_each_entry(target, &engine->request_list, list) { + unsigned space; + /* - * The ring was wrapped while the reserved space was in use. - * That means that some unknown amount of the ring tail was - * no-op filled and skipped. Thus simply adding the ring size - * to the tail and doing the above space check will not work. - * Rather than attempt to track how much tail was skipped, - * it is much simpler to say that also skipping the sanity - * check every once in a while is not a big issue. + * The request queue is per-engine, so can contain requests + * from multiple ringbuffers. Here, we must ignore any that + * aren't from the ringbuffer we're considering. */ + if (target->ringbuf != ringbuf) + continue; + + /* Would completion of this request free enough space? */ + space = __intel_ring_space(target->postfix, ringbuf->tail, + ringbuf->size); + if (space >= bytes) + break; } - ringbuf->reserved_size = 0; - ringbuf->reserved_in_use = false; + if (WARN_ON(&target->list == &engine->request_list)) + return -ENOSPC; + + return i915_wait_request(target); } -static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) +int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) { - struct intel_ringbuffer *ringbuf = engine->buffer; - int remain_usable = ringbuf->effective_size - ringbuf->tail; + struct intel_ringbuffer *ringbuf = req->ringbuf; int remain_actual = ringbuf->size - ringbuf->tail; - int ret, total_bytes, wait_bytes = 0; + int remain_usable = ringbuf->effective_size - ringbuf->tail; + int bytes = num_dwords * sizeof(u32); + int total_bytes, wait_bytes; bool need_wrap = false; - if (ringbuf->reserved_in_use) - total_bytes = bytes; - else - total_bytes = bytes + ringbuf->reserved_size; + total_bytes = bytes + ringbuf->reserved_size; if (unlikely(bytes > remain_usable)) { /* @@ -2463,44 +2437,42 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) */ wait_bytes = remain_actual + total_bytes; need_wrap = true; + } else if (unlikely(total_bytes > remain_usable)) { + /* + * The base request will fit but the reserved space + * falls off the end. So we don't need an immediate wrap + * and only need to effectively wait for the reserved + * size space from the start of ringbuffer. + */ + wait_bytes = remain_actual + ringbuf->reserved_size; } else { - if (unlikely(total_bytes > remain_usable)) { - /* - * The base request will fit but the reserved space - * falls off the end. So don't need an immediate wrap - * and only need to effectively wait for the reserved - * size space from the start of ringbuffer. - */ - wait_bytes = remain_actual + ringbuf->reserved_size; - } else if (total_bytes > ringbuf->space) { - /* No wrapping required, just waiting. */ - wait_bytes = total_bytes; - } + /* No wrapping required, just waiting. */ + wait_bytes = total_bytes; } - if (wait_bytes) { - ret = ring_wait_for_space(engine, wait_bytes); + if (wait_bytes > ringbuf->space) { + int ret = wait_for_space(req, wait_bytes); if (unlikely(ret)) return ret; - if (need_wrap) - __wrap_ring_buffer(ringbuf); + intel_ring_update_space(ringbuf); + if (unlikely(ringbuf->space < wait_bytes)) + return -EAGAIN; } - return 0; -} + if (unlikely(need_wrap)) { + GEM_BUG_ON(remain_actual > ringbuf->space); + GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); -int intel_ring_begin(struct drm_i915_gem_request *req, - int num_dwords) -{ - struct intel_engine_cs *engine = req->engine; - int ret; - - ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); - if (ret) - return ret; + /* Fill the tail with MI_NOOP */ + memset(ringbuf->virtual_start + ringbuf->tail, + 0, remain_actual); + ringbuf->tail = 0; + ringbuf->space -= remain_actual; + } - engine->buffer->space -= num_dwords * sizeof(uint32_t); + ringbuf->space -= bytes; + GEM_BUG_ON(ringbuf->space < 0); return 0; } @@ -2772,6 +2744,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->name = "render ring"; engine->id = RCS; engine->exec_id = I915_EXEC_RENDER; + engine->hw_id = 0; engine->mmio_base = RENDER_RING_BASE; if (INTEL_INFO(dev)->gen >= 8) { @@ -2923,6 +2896,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->name = "bsd ring"; engine->id = VCS; engine->exec_id = I915_EXEC_BSD; + engine->hw_id = 1; engine->write_tail = ring_write_tail; if (INTEL_INFO(dev)->gen >= 6) { @@ -3001,6 +2975,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) engine->name = "bsd2 ring"; engine->id = VCS2; engine->exec_id = I915_EXEC_BSD; + engine->hw_id = 4; engine->write_tail = ring_write_tail; engine->mmio_base = GEN8_BSD2_RING_BASE; @@ -3033,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->name = "blitter ring"; engine->id = BCS; engine->exec_id = I915_EXEC_BLT; + engine->hw_id = 2; engine->mmio_base = BLT_RING_BASE; engine->write_tail = ring_write_tail; @@ -3092,6 +3068,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->name = "video enhancement ring"; engine->id = VECS; engine->exec_id = I915_EXEC_VEBOX; + engine->hw_id = 3; engine->mmio_base = VEBOX_RING_BASE; engine->write_tail = ring_write_tail; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2ade194bbea9..ff126485d398 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -108,8 +108,6 @@ struct intel_ringbuffer { int size; int effective_size; int reserved_size; - int reserved_tail; - bool reserved_in_use; /** We track the position of the requests in the ring buffer, and * when each is retired we increment last_retired_head as the GPU @@ -156,7 +154,8 @@ struct intel_engine_cs { #define I915_NUM_ENGINES 5 #define _VCS(n) (VCS + (n)) unsigned int exec_id; - unsigned int guc_id; + unsigned int hw_id; + unsigned int guc_id; /* XXX same as hw_id? */ u32 mmio_base; struct drm_device *dev; struct intel_ringbuffer *buffer; @@ -459,7 +458,6 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine) } int __intel_ring_space(int head, int tail, int size); void intel_ring_update_space(struct intel_ringbuffer *ringbuf); -int intel_ring_space(struct intel_ringbuffer *ringbuf); bool intel_engine_stopped(struct intel_engine_cs *engine); int __must_check intel_engine_idle(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 9ff1e960d617..44fb0b35eed3 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -403,9 +403,10 @@ struct lvds_dvo_timing { u8 vsync_off:4; u8 rsvd0:6; u8 hsync_off_hi:2; - u8 h_image; - u8 v_image; - u8 max_hv; + u8 himage_lo; + u8 vimage_lo; + u8 vimage_hi:4; + u8 himage_hi:4; u8 h_border; u8 v_border; u8 rsvd1:3; @@ -740,6 +741,7 @@ struct bdb_psr { #define DEVICE_TYPE_INT_TV 0x1009 #define DEVICE_TYPE_HDMI 0x60D2 #define DEVICE_TYPE_DP 0x68C6 +#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6 #define DEVICE_TYPE_eDP 0x78C6 #define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) @@ -774,6 +776,17 @@ struct bdb_psr { DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ DEVICE_TYPE_ANALOG_OUTPUT) +#define DEVICE_TYPE_DP_DUAL_MODE_BITS \ + (DEVICE_TYPE_INTERNAL_CONNECTOR | \ + DEVICE_TYPE_MIPI_OUTPUT | \ + DEVICE_TYPE_COMPOSITE_OUTPUT | \ + DEVICE_TYPE_LVDS_SINGALING | \ + DEVICE_TYPE_TMDS_DVI_SIGNALING | \ + DEVICE_TYPE_VIDEO_SIGNALING | \ + DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ + DEVICE_TYPE_DIGITAL_OUTPUT | \ + DEVICE_TYPE_ANALOG_OUTPUT) + /* define the DVO port for HDMI output type */ #define DVO_B 1 #define DVO_C 2 diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 1080019e7b17..82656654fb21 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -25,6 +25,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_of.h> +#include <video/imx-ipu-v3.h> #include "imx-drm.h" @@ -96,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc) return NULL; } -int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, - int hsync_pin, int vsync_pin) +int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, + int hsync_pin, int vsync_pin, u32 bus_flags) { struct imx_drm_crtc_helper_funcs *helper; struct imx_drm_crtc *imx_crtc; @@ -109,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, helper = &imx_crtc->imx_drm_helper_funcs; if (helper->set_interface_pix_fmt) return helper->set_interface_pix_fmt(encoder->crtc, - bus_format, hsync_pin, vsync_pin); + bus_format, hsync_pin, vsync_pin, + bus_flags); return 0; } -EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins); +EXPORT_SYMBOL_GPL(imx_drm_set_bus_config); int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) { - return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3); + return imx_drm_set_bus_config(encoder, bus_format, 2, 3, + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE); } EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); @@ -437,6 +441,13 @@ static int compare_of(struct device *dev, void *data) { struct device_node *np = data; + /* Special case for DI, dev->of_node may not be set yet */ + if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) { + struct ipu_client_platformdata *pdata = dev->platform_data; + + return pdata->of_node == np; + } + /* Special case for LDB, one device for two channels */ if (of_node_cmp(np->name, "lvds-channel") == 0) { np = of_get_parent(np); diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index b0241b9d1334..74320a1723b7 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); int (*set_interface_pix_fmt)(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin); + u32 bus_format, int hsync_pin, int vsync_pin, + u32 bus_flags); const struct drm_crtc_helper_funcs *crtc_helper_funcs; const struct drm_crtc_funcs *crtc_funcs; }; @@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm); struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); -int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, - u32 bus_format, int hsync_pin, int vsync_pin); +int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, + int hsync_pin, int vsync_pin, u32 bus_flags); int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index a58eee59550a..beff793bb717 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -25,6 +25,7 @@ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/of_device.h> #include <linux/of_graph.h> +#include <video/of_display_timing.h> #include <video/of_videomode.h> #include <linux/regmap.h> #include <linux/videodev2.h> @@ -59,6 +60,7 @@ struct imx_ldb_channel { struct drm_encoder encoder; struct drm_panel *panel; struct device_node *child; + struct i2c_adapter *ddc; int chno; void *edid; int edid_len; @@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector) return num_modes; } + if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) + imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); + if (imx_ldb_ch->edid) { drm_mode_connector_update_edid_property(connector, imx_ldb_ch->edid); @@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) for_each_child_of_node(np, child) { struct imx_ldb_channel *channel; - struct device_node *port; + struct device_node *ddc_node; + struct device_node *ep; ret = of_property_read_u32(child, "reg", &i); if (ret || i < 0 || i > 1) @@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) * The output port is port@4 with an external 4-port mux or * port@2 with the internal 2-port mux. */ - port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2); - if (port) { - struct device_node *endpoint, *remote; - - endpoint = of_get_child_by_name(port, "endpoint"); - if (endpoint) { - remote = of_graph_get_remote_port_parent(endpoint); - if (remote) - channel->panel = of_drm_find_panel(remote); - else - return -EPROBE_DEFER; - if (!channel->panel) { - dev_err(dev, "panel not found: %s\n", - remote->full_name); - return -EPROBE_DEFER; - } + ep = of_graph_get_endpoint_by_regs(child, + imx_ldb->lvds_mux ? 4 : 2, + -1); + if (ep) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (remote) + channel->panel = of_drm_find_panel(remote); + else + return -EPROBE_DEFER; + of_node_put(remote); + if (!channel->panel) { + dev_err(dev, "panel not found: %s\n", + remote->full_name); + return -EPROBE_DEFER; } } - edidp = of_get_property(child, "edid", &channel->edid_len); - if (edidp) { - channel->edid = kmemdup(edidp, channel->edid_len, - GFP_KERNEL); - } else if (!channel->panel) { - ret = of_get_drm_display_mode(child, &channel->mode, 0); - if (!ret) - channel->mode_valid = 1; + ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); + if (ddc_node) { + channel->ddc = of_find_i2c_adapter_by_node(ddc_node); + of_node_put(ddc_node); + if (!channel->ddc) { + dev_warn(dev, "failed to get ddc i2c adapter\n"); + return -EPROBE_DEFER; + } + } + + if (!channel->ddc) { + /* if no DDC available, fallback to hardcoded EDID */ + dev_dbg(dev, "no ddc available\n"); + + edidp = of_get_property(child, "edid", + &channel->edid_len); + if (edidp) { + channel->edid = kmemdup(edidp, + channel->edid_len, + GFP_KERNEL); + } else if (!channel->panel) { + /* fallback to display-timings node */ + ret = of_get_drm_display_mode(child, + &channel->mode, + OF_USE_NATIVE_MODE); + if (!ret) + channel->mode_valid = 1; + } } channel->bus_format = of_get_bus_format(dev, child); @@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, channel->encoder.funcs->destroy(&channel->encoder); kfree(channel->edid); + i2c_put_adapter(channel->ddc); } } diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index ae7a9fb3b8a2..baf788121287 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder) switch (tve->mode) { case TVE_MODE_VGA: - imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, - tve->hsync_pin, tve->vsync_pin); + imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24, + tve->hsync_pin, tve->vsync_pin, + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE); break; case TVE_MODE_TVOUT: imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index dee8e8b3523b..fc040417e1e8 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -66,6 +66,7 @@ struct ipu_crtc { struct ipu_flip_work *flip_work; int irq; u32 bus_format; + u32 bus_flags; int di_hsync_pin; int di_vsync_pin; }; @@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, else sig_cfg.clkflags = 0; - sig_cfg.enable_pol = 1; - sig_cfg.clk_pol = 0; + sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW); + /* Default to driving pixel data on negative clock edges */ + sig_cfg.clk_pol = !!(ipu_crtc->bus_flags & + DRM_BUS_FLAG_PIXDATA_POSEDGE); sig_cfg.bus_format = ipu_crtc->bus_format; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; @@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) } static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin) + u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_crtc->bus_format = bus_format; + ipu_crtc->bus_flags = bus_flags; ipu_crtc->di_hsync_pin = hsync_pin; ipu_crtc->di_vsync_pin = vsync_pin; @@ -473,7 +477,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, - ipu_crtc->dev->of_node); + pdata->of_node); if (ret) { dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); goto err_put_resources; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 681ec6eb77d9..a4bb44118d33 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = { DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRA8888, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_YUV420, @@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (crtc != plane->crtc) dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", plane->crtc, crtc); - plane->crtc = crtc; if (!ipu_plane->enabled) ipu_plane_enable(ipu_plane); @@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane) kfree(ipu_plane); } -static struct drm_plane_funcs ipu_plane_funcs = { +static const struct drm_plane_funcs ipu_plane_funcs = { .update_plane = ipu_update_plane, .disable_plane = ipu_disable_plane, .destroy = ipu_plane_destroy, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 363e2c7741e2..2d1fd02cd3d6 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -35,7 +35,6 @@ struct imx_parallel_display { void *edid; int edid_len; u32 bus_format; - int mode_valid; struct drm_display_mode mode; struct drm_panel *panel; }; @@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) num_modes = drm_add_edid_modes(connector, imxpd->edid); } - if (imxpd->mode_valid) { - struct drm_display_mode *mode = drm_mode_create(connector->dev); - - if (!mode) - return -EINVAL; - drm_mode_copy(mode, &imxpd->mode); - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, - drm_mode_probed_add(connector, mode); - num_modes++; - } - if (np) { struct drm_display_mode *mode = drm_mode_create(connector->dev); @@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) static void imx_pd_encoder_prepare(struct drm_encoder *encoder) { struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); - - imx_drm_set_bus_format(encoder, imxpd->bus_format); + imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3, + imxpd->connector.display_info.bus_flags); } static void imx_pd_encoder_commit(struct drm_encoder *encoder) @@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct device_node *np = dev->of_node; - struct device_node *port; + struct device_node *ep; const u8 *edidp; struct imx_parallel_display *imxpd; int ret; @@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) } /* port@1 is the output port */ - port = of_graph_get_port_by_id(np, 1); - if (port) { - struct device_node *endpoint, *remote; - - endpoint = of_get_child_by_name(port, "endpoint"); - if (endpoint) { - remote = of_graph_get_remote_port_parent(endpoint); - if (remote) - imxpd->panel = of_drm_find_panel(remote); - if (!imxpd->panel) - return -EPROBE_DEFER; + ep = of_graph_get_endpoint_by_regs(np, 1, -1); + if (ep) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (remote) { + imxpd->panel = of_drm_find_panel(remote); + of_node_put(remote); } + if (!imxpd->panel) + return -EPROBE_DEFER; } imxpd->dev = dev; diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index d05ca7901315..0186e500d2a5 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, unsigned long pll_rate; unsigned int factor; - if (!dpi) { - dev_err(dpi->dev, "invalid argument\n"); - return -EINVAL; - } - pix_rate = 1000UL * mode->clock; if (mode->clock <= 74000) factor = 8 * 3; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 2d808e59fefd..769559124562 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -695,10 +695,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) { drm_encoder_cleanup(&dsi->encoder); /* Skip connector cleanup if creation was delegated to the bridge */ - if (dsi->conn.dev) { - drm_connector_unregister(&dsi->conn); + if (dsi->conn.dev) drm_connector_cleanup(&dsi->conn); - } } static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 14e64e08909e..d347dca17267 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) } } - fvv = pllreffreq * testn / testm; + fvv = pllreffreq * (n + 1) / (m + 1); fvv = (fvv - 800000) / 50000; if (fvv > 15) @@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) WREG_DAC(MGA1064_PIX_PLLC_M, m); WREG_DAC(MGA1064_PIX_PLLC_N, n); WREG_DAC(MGA1064_PIX_PLLC_P, p); + + if (mdev->unique_rev_id >= 0x04) { + WREG_DAC(0x1a, 0x09); + msleep(20); + WREG_DAC(0x1a, 0x01); + + } + return 0; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index fbe304ee6c80..2aec27dbb5bb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, } adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); - if (!adreno_gpu->memptrs) { + if (IS_ERR(adreno_gpu->memptrs)) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf3482e..c6cf837c5193 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dev->mode_config.fb_base = paddr; fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); + if (IS_ERR(fbi->screen_base)) { + ret = PTR_ERR(fbi->screen_base); + goto fail_unlock; + } fbi->screen_size = fbdev->bo->size; fbi->fix.smem_start = paddr; fbi->fix.smem_len = fbdev->bo->size; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7daf4054dd2b..69836f5685b1 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) return ERR_CAST(pages); msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (msm_obj->vaddr == NULL) + return ERR_PTR(-ENOMEM); } return msm_obj->vaddr; } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b89ca5174863..eb4bb8b2f3a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, submit->dev = dev; submit->gpu = gpu; + submit->fence = NULL; submit->pid = get_pid(task_pid(current)); /* initially, until copy_from_user() and bo lookup succeeds: */ submit->nr_bos = 0; submit->nr_cmds = 0; + INIT_LIST_HEAD(&submit->node); INIT_LIST_HEAD(&submit->bo_list); ww_acquire_init(&submit->ticket, &reservation_ww_class); @@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, void __user *userptr = u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); + /* make sure we don't have garbage flags, in case we hit + * error path before flags is initialized: + */ + submit->bos[i].flags = 0; + ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); if (ret) { ret = -EFAULT; diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index b48f73ac6389..0857710c2ff2 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) struct msm_gem_object *obj = submit->bos[idx].obj; const char *buf = msm_gem_vaddr_locked(&obj->base); + if (IS_ERR(buf)) + continue; + buf += iova - submit->bos[idx].iova; rd_write_section(rd, RD_GPUADDR, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 1f14b908b221..42f5359cf988 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) } ring->start = msm_gem_vaddr_locked(ring->bo); + if (IS_ERR(ring->start)) { + ret = PTR_ERR(ring->start); + goto fail; + } ring->end = ring->start + (size / 4); ring->cur = ring->start; diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 73241c4eb7aa..336ad4de9981 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -2,6 +2,7 @@ config DRM_OMAP tristate "OMAP DRM" depends on DRM depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM + select OMAP2_DSS select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 225fd8d6ab31..667ca4a24ece 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -9,6 +9,7 @@ * the Free Software Foundation. */ +#include <linux/gpio/consumer.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 8c246c213e06..9594ff7a2b0c 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -14,7 +14,7 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 2fd5602880a7..671806ca7d6a 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -9,7 +9,7 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index e780fd4f8b46..7c2331be8d15 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -9,7 +9,7 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 36485c2137ce..2b118071b5a1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -14,7 +14,7 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/module.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 458f77bc473d..ac680e1de603 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -15,6 +15,7 @@ #include <linux/spi/spi.h> #include <linux/mutex.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <video/omapdss.h> #include <video/omap-panel-data.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 780cb263a318..38d2920a95e6 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -15,7 +15,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_gpio.h> #include <video/omapdss.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 529a017602e4..4363fffc87e3 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -10,7 +10,7 @@ */ #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 31efcca801bd..deb416736aad 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -29,7 +29,7 @@ #include <linux/sched.h> #include <linux/backlight.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_gpio.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 03e2beb7b4f0..d93175b03a12 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -14,7 +14,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of_gpio.h> diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8730646a0cbb..56c43f355ce3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct regulator *vdds_dsi; - int r; if (dsi->vdds_dsi_reg != NULL) return 0; @@ -1180,15 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) return PTR_ERR(vdds_dsi); } - if (regulator_can_change_voltage(vdds_dsi)) { - r = regulator_set_voltage(vdds_dsi, 1800000, 1800000); - if (r) { - devm_regulator_put(vdds_dsi); - DSSERR("can't set the DSI regulator voltage\n"); - return r; - } - } - dsi->vdds_dsi_reg = vdds_dsi; return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index f95ff319e68e..3303cfad4838 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -30,6 +30,7 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/clk.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/gfp.h> diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index f892ae157ff3..4d46cdf7a037 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -33,6 +33,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> +#include <linux/of.h> #include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> @@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) @@ -114,15 +114,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - if (regulator_can_change_voltage(reg)) { - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - } - hdmi.vdda_reg = reg; return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index fa72e735dad2..ef3afe99e487 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg) static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) { DSSDBG("Enter hdmi_core_powerdown_disable\n"); - REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); + REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0); } static void hdmi_core_swreset_release(struct hdmi_core_data *core) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index a43f7b10e113..9255c0e1e4a7 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -38,6 +38,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> +#include <linux/of.h> #include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> @@ -119,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) @@ -131,15 +131,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - if (regulator_can_change_voltage(reg)) { - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - } - hdmi.vdda_reg = reg; return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 6a397520cae5..8ab2093daa12 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) { void __iomem *base = core->base; const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ - const unsigned ss_scl_high = 4000; /* ns */ - const unsigned ss_scl_low = 4700; /* ns */ + const unsigned ss_scl_high = 4600; /* ns */ + const unsigned ss_scl_low = 5400; /* ns */ const unsigned fs_scl_high = 600; /* ns */ const unsigned fs_scl_low = 1300; /* ns */ const unsigned sda_hold = 1000; /* ns */ @@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, c = (ptr[1] >> 6) & 0x3; m = (ptr[1] >> 4) & 0x3; - r = (ptr[1] >> 0) & 0x3; + r = (ptr[1] >> 0) & 0xf; itc = (ptr[2] >> 7) & 0x1; ec = (ptr[2] >> 4) & 0x7; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 1f5d19c119ce..f98b750fc499 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -13,6 +13,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/seq_file.h> #include <video/omapdss.h> #include "dss.h" diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index 06e23a7c432c..f1015e8b8267 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -16,6 +16,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/seq_file.h> #include <video/omapdss.h> diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 13442b9052d1..055f62fca5dc 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/seq_file.h> #include <video/omapdss.h> #include "dss.h" diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index 6f5fc14fc015..479bf24050f8 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c @@ -17,6 +17,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> + #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index de275a5be1db..4ceed7a9762f 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/platform_device.h> /* platform_device() */ #include <linux/sched.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/vmalloc.h> diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 94ec06d3d737..f84570d1636c 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -17,6 +17,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> + #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index b97afc281778..03698b6c806c 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -17,6 +17,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> #include <linux/shmem_fs.h> #include <linux/spinlock.h> #include <linux/pfn_t.h> diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index d0240743a17c..a7e978677937 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2164,7 +2164,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, if (pi->caps_stable_p_state) { stable_p_state_sclk = (max_limits->sclk * 75) / 100; - for (i = table->count - 1; i >= 0; i++) { + for (i = table->count - 1; i >= 0; i--) { if (stable_p_state_sclk >= table->entries[i].clk) { stable_p_state_sclk = table->entries[i].clk; break; diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 505620c7c2c8..e04deedabd4a 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc) mixer->status = STI_MIXER_DISABLING; } -static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* accept the provided drm_display_mode, do not fix it up */ - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); - return true; -} - static int sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) { @@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { .enable = sti_crtc_enable, .disable = sti_crtc_disabling, - .mode_fixup = sti_crtc_mode_fixup, .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = sti_crtc_mode_set_nofb, .mode_set_base = drm_helper_crtc_mode_set_base, diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 32c7986b63ab..6bf4ce466d20 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -437,7 +437,7 @@ static int vtg_probe(struct platform_device *pdev) return -EPROBE_DEFER; } else { vtg->irq = platform_get_irq(pdev, 0); - if (IS_ERR_VALUE(vtg->irq)) { + if (vtg->irq < 0) { DRM_ERROR("Failed to get VTG interrupt\n"); return vtg->irq; } @@ -447,7 +447,7 @@ static int vtg_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, vtg_irq_thread, IRQF_ONESHOT, dev_name(dev), vtg); - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { DRM_ERROR("Failed to register VTG interrupt\n"); return ret; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 7716f42f8aab..6b8c5b3bf588 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -342,7 +342,7 @@ static int tfp410_probe(struct platform_device *pdev) tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio", 0, NULL); - if (IS_ERR_VALUE(tfp410_mod->gpio)) { + if (tfp410_mod->gpio < 0) { dev_warn(&pdev->dev, "No power down GPIO\n"); } else { ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 904d0754ad78..0f18b76c7906 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - - if (debug_dump_regs) { - DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); - vc4_hvs_dump_state(dev); - } - if (crtc->state->event) { unsigned long flags; @@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); vc4_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); crtc->state->event = NULL; + + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + + spin_unlock_irqrestore(&dev->event_lock, flags); + } else { + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + } + + if (debug_dump_regs) { + DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); + vc4_hvs_dump_state(dev); } } @@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) { struct drm_crtc *crtc = &vc4_crtc->base; struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + u32 chan = vc4_crtc->channel; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (vc4_crtc->event) { + if (vc4_crtc->event && + (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; + drm_crtc_vblank_put(crtc); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) spin_unlock_irqrestore(&dev->event_lock, flags); } + drm_crtc_vblank_put(crtc); drm_framebuffer_unreference(flip_state->fb); kfree(flip_state); @@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, return ret; } + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + /* Immediately update the plane's legacy fb pointer, so that later * modeset prep sees the state that will be present when the semaphore * is released. diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3446ece21b4a..250ed7e3754c 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = { }; static const struct drm_ioctl_desc vc4_drm_ioctls[] = { - DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, DRM_ROOT_ONLY), }; @@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = { .enable_vblank = vc4_enable_vblank, .disable_vblank = vc4_disable_vblank, - .get_vblank_counter = drm_vblank_count, + .get_vblank_counter = drm_vblank_no_hw_counter, #if defined(CONFIG_DEBUG_FS) .debugfs_init = vc4_debugfs_init, diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index cb37751bc99f..861a623bc185 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev, return -ENOMEM; /* Make sure that any outstanding modesets have finished. */ - ret = down_interruptible(&vc4->async_modeset); - if (ret) { - kfree(c); - return ret; + if (nonblock) { + ret = down_trylock(&vc4->async_modeset); + if (ret) { + kfree(c); + return -EBUSY; + } + } else { + ret = down_interruptible(&vc4->async_modeset); + if (ret) { + kfree(c); + return ret; + } } ret = drm_atomic_helper_prepare_planes(dev, state); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 6163b95c5411..f99eece4cc97 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -341,6 +341,10 @@ #define SCALER_DISPLACT0 0x00000030 #define SCALER_DISPLACT1 0x00000034 #define SCALER_DISPLACT2 0x00000038 +#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \ + (x) * (SCALER_DISPLACT1 - \ + SCALER_DISPLACT0)) + #define SCALER_DISPCTRL0 0x00000040 # define SCALER_DISPCTRLX_ENABLE BIT(31) # define SCALER_DISPCTRLX_RESET BIT(30) diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index 498b37e39058..e1e31e9e67cd 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c @@ -85,7 +85,7 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, err = devm_request_irq(host->dev, host->intr_syncpt_irq, syncpt_thresh_isr, IRQF_SHARED, "host1x_syncpt", host); - if (IS_ERR_VALUE(err)) { + if (err < 0) { WARN_ON(1); return err; } diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index abb98c77bad2..99dcacf05b99 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -997,7 +997,7 @@ struct ipu_platform_reg { }; /* These must be in the order of the corresponding device tree port nodes */ -static const struct ipu_platform_reg client_reg[] = { +static struct ipu_platform_reg client_reg[] = { { .pdata = { .csi = 0, @@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) mutex_unlock(&ipu_client_id_mutex); for (i = 0; i < ARRAY_SIZE(client_reg); i++) { - const struct ipu_platform_reg *reg = &client_reg[i]; + struct ipu_platform_reg *reg = &client_reg[i]; struct platform_device *pdev; struct device_node *of_node; @@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) pdev->dev.parent = dev; + reg->pdata.of_node = of_node; ret = platform_device_add_data(pdev, ®->pdata, sizeof(reg->pdata)); if (!ret) diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 952fe692d764..24e395c5907d 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -58,7 +58,7 @@ static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 }; */ static int apd = -1; module_param(apd, bint, 0); -MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode"); +MODULE_PARM_DESC(apd, "Set to zero to disable anti-parallel diode mode"); struct temperature { s8 degrees; diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 0addc84ba948..69166ab3151d 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -77,7 +77,6 @@ static const u8 LM75_REG_TEMP[3] = { struct lm75_data { struct i2c_client *client; struct device *hwmon_dev; - struct thermal_zone_device *tz; struct mutex update_lock; u8 orig_conf; u8 resolution; /* In bits, between 9 and 12 */ @@ -306,11 +305,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) if (IS_ERR(data->hwmon_dev)) return PTR_ERR(data->hwmon_dev); - data->tz = thermal_zone_of_sensor_register(data->hwmon_dev, 0, - data->hwmon_dev, - &lm75_of_thermal_ops); - if (IS_ERR(data->tz)) - data->tz = NULL; + devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0, + data->hwmon_dev, + &lm75_of_thermal_ops); dev_info(dev, "%s: sensor '%s'\n", dev_name(data->hwmon_dev), client->name); @@ -322,7 +319,6 @@ static int lm75_remove(struct i2c_client *client) { struct lm75_data *data = i2c_get_clientdata(client); - thermal_zone_of_sensor_unregister(data->hwmon_dev, data->tz); hwmon_device_unregister(data->hwmon_dev); lm75_write_value(client, LM75_REG_CONF, data->orig_conf); return 0; diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index faa6e8dfbaaf..8ef7b713cb1a 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -259,7 +259,6 @@ struct ntc_data { struct device *dev; int n_comp; char name[PLATFORM_NAME_SIZE]; - struct thermal_zone_device *tz; }; #if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO) @@ -579,6 +578,7 @@ static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = { static int ntc_thermistor_probe(struct platform_device *pdev) { + struct thermal_zone_device *tz; const struct of_device_id *of_id = of_match_device(of_match_ptr(ntc_match), &pdev->dev); const struct platform_device_id *pdev_id; @@ -677,12 +677,10 @@ static int ntc_thermistor_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n", pdev_id->name); - data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev, - &ntc_of_thermal_ops); - if (IS_ERR(data->tz)) { + tz = devm_thermal_zone_of_sensor_register(data->dev, 0, data->dev, + &ntc_of_thermal_ops); + if (IS_ERR(tz)) dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n"); - data->tz = NULL; - } return 0; err_after_sysfs: @@ -700,8 +698,6 @@ static int ntc_thermistor_remove(struct platform_device *pdev) sysfs_remove_group(&data->dev->kobj, &ntc_attr_group); ntc_iio_channel_release(pdata); - thermal_zone_of_sensor_unregister(data->dev, data->tz); - return 0; } diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 3e23003f78b0..f9af3935b427 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -40,15 +40,18 @@ struct pwm_fan_ctx { static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm) { + struct pwm_args pargs; unsigned long duty; int ret = 0; + pwm_get_args(ctx->pwm, &pargs); + mutex_lock(&ctx->lock); if (ctx->pwm_value == pwm) goto exit_set_pwm_err; - duty = DIV_ROUND_UP(pwm * (ctx->pwm->period - 1), MAX_PWM); - ret = pwm_config(ctx->pwm, duty, ctx->pwm->period); + duty = DIV_ROUND_UP(pwm * (pargs.period - 1), MAX_PWM); + ret = pwm_config(ctx->pwm, duty, pargs.period); if (ret) goto exit_set_pwm_err; @@ -215,6 +218,7 @@ static int pwm_fan_probe(struct platform_device *pdev) { struct thermal_cooling_device *cdev; struct pwm_fan_ctx *ctx; + struct pwm_args pargs; struct device *hwmon; int duty_cycle; int ret; @@ -233,11 +237,19 @@ static int pwm_fan_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); + /* + * FIXME: pwm_apply_args() should be removed when switching to the + * atomic PWM API. + */ + pwm_apply_args(ctx->pwm); + /* Set duty cycle to maximum allowed */ - duty_cycle = ctx->pwm->period - 1; + pwm_get_args(ctx->pwm, &pargs); + + duty_cycle = pargs.period - 1; ctx->pwm_value = MAX_PWM; - ret = pwm_config(ctx->pwm, duty_cycle, ctx->pwm->period); + ret = pwm_config(ctx->pwm, duty_cycle, pargs.period); if (ret) { dev_err(&pdev->dev, "Failed to configure PWM\n"); return ret; @@ -303,14 +315,16 @@ static int pwm_fan_suspend(struct device *dev) static int pwm_fan_resume(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); + struct pwm_args pargs; unsigned long duty; int ret; if (ctx->pwm_value == 0) return 0; - duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM); - ret = pwm_config(ctx->pwm, duty, ctx->pwm->period); + pwm_get_args(ctx->pwm, &pargs); + duty = DIV_ROUND_UP(ctx->pwm_value * (pargs.period - 1), MAX_PWM); + ret = pwm_config(ctx->pwm, duty, pargs.period); if (ret) return ret; return pwm_enable(ctx->pwm); diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c index 912b449c8303..25b44e68926d 100644 --- a/drivers/hwmon/scpi-hwmon.c +++ b/drivers/hwmon/scpi-hwmon.c @@ -31,10 +31,8 @@ struct sensor_data { }; struct scpi_thermal_zone { - struct list_head list; int sensor_id; struct scpi_sensors *scpi_sensors; - struct thermal_zone_device *tzd; }; struct scpi_sensors { @@ -92,20 +90,6 @@ scpi_show_label(struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%s\n", sensor->info.name); } -static void -unregister_thermal_zones(struct platform_device *pdev, - struct scpi_sensors *scpi_sensors) -{ - struct list_head *pos; - - list_for_each(pos, &scpi_sensors->thermal_zones) { - struct scpi_thermal_zone *zone; - - zone = list_entry(pos, struct scpi_thermal_zone, list); - thermal_zone_of_sensor_unregister(&pdev->dev, zone->tzd); - } -} - static struct thermal_zone_of_device_ops scpi_sensor_ops = { .get_temp = scpi_read_temp, }; @@ -118,7 +102,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev) struct scpi_ops *scpi_ops; struct device *hwdev, *dev = &pdev->dev; struct scpi_sensors *scpi_sensors; - int ret, idx; + int idx, ret; scpi_ops = get_scpi_ops(); if (!scpi_ops) @@ -232,48 +216,35 @@ static int scpi_hwmon_probe(struct platform_device *pdev) INIT_LIST_HEAD(&scpi_sensors->thermal_zones); for (i = 0; i < nr_sensors; i++) { struct sensor_data *sensor = &scpi_sensors->data[i]; + struct thermal_zone_device *z; struct scpi_thermal_zone *zone; if (sensor->info.class != TEMPERATURE) continue; zone = devm_kzalloc(dev, sizeof(*zone), GFP_KERNEL); - if (!zone) { - ret = -ENOMEM; - goto unregister_tzd; - } + if (!zone) + return -ENOMEM; zone->sensor_id = i; zone->scpi_sensors = scpi_sensors; - zone->tzd = thermal_zone_of_sensor_register(dev, - sensor->info.sensor_id, zone, &scpi_sensor_ops); + z = devm_thermal_zone_of_sensor_register(dev, + sensor->info.sensor_id, + zone, + &scpi_sensor_ops); /* * The call to thermal_zone_of_sensor_register returns * an error for sensors that are not associated with * any thermal zones or if the thermal subsystem is * not configured. */ - if (IS_ERR(zone->tzd)) { + if (IS_ERR(z)) { devm_kfree(dev, zone); continue; } - list_add(&zone->list, &scpi_sensors->thermal_zones); } return 0; - -unregister_tzd: - unregister_thermal_zones(pdev, scpi_sensors); - return ret; -} - -static int scpi_hwmon_remove(struct platform_device *pdev) -{ - struct scpi_sensors *scpi_sensors = platform_get_drvdata(pdev); - - unregister_thermal_zones(pdev, scpi_sensors); - - return 0; } static const struct of_device_id scpi_of_match[] = { @@ -288,7 +259,6 @@ static struct platform_driver scpi_hwmon_platdrv = { .of_match_table = scpi_of_match, }, .probe = scpi_hwmon_probe, - .remove = scpi_hwmon_remove, }; module_platform_driver(scpi_hwmon_platdrv); diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index 5289aa0980a8..f1e96fd7f445 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c @@ -53,7 +53,6 @@ struct tmp102 { struct i2c_client *client; struct device *hwmon_dev; - struct thermal_zone_device *tz; struct mutex lock; u16 config_orig; unsigned long last_update; @@ -232,10 +231,8 @@ static int tmp102_probe(struct i2c_client *client, goto fail_restore_config; } tmp102->hwmon_dev = hwmon_dev; - tmp102->tz = thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev, - &tmp102_of_thermal_ops); - if (IS_ERR(tmp102->tz)) - tmp102->tz = NULL; + devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev, + &tmp102_of_thermal_ops); dev_info(dev, "initialized\n"); @@ -251,7 +248,6 @@ static int tmp102_remove(struct i2c_client *client) { struct tmp102 *tmp102 = i2c_get_clientdata(client); - thermal_zone_of_sensor_unregister(tmp102->hwmon_dev, tmp102->tz); hwmon_device_unregister(tmp102->hwmon_dev); /* Stop monitoring if device was stopped originally */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 2dd40ddf04de..f167021b8c21 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -965,7 +965,7 @@ config I2C_XILINX config I2C_XLR tristate "Netlogic XLR and Sigma Designs I2C support" - depends on CPU_XLR || ARCH_TANGOX + depends on CPU_XLR || ARCH_TANGO help This driver enables support for the on-chip I2C interface of the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs. @@ -985,6 +985,7 @@ config I2C_XLP9XX config I2C_RCAR tristate "Renesas R-Car I2C Controller" + depends on HAS_DMA depends on ARCH_RENESAS || COMPILE_TEST select I2C_SLAVE help diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 921d32bfcda8..f23372669f77 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -1013,7 +1013,7 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr) error: if (ret != -EPROBE_DEFER) - dev_info(dev->dev, "can't use DMA, error %d\n", ret); + dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n"); if (dma->chan_rx) dma_release_channel(dma->chan_rx); if (dma->chan_tx) diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 9aca1b4e2d8d..52407f3c9e1c 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -623,7 +623,7 @@ static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev, char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx"; int ret; - chan = dma_request_slave_channel_reason(dev, chan_name); + chan = dma_request_chan(dev, chan_name); if (IS_ERR(chan)) { ret = PTR_ERR(chan); dev_dbg(dev, "request_channel failed for %s (%d)\n", diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 0b1108d3c2f3..6ecfd76270f2 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -22,6 +22,7 @@ /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ +#include <linux/cdev.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/i2c-dev.h> @@ -47,9 +48,10 @@ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; struct device *dev; + struct cdev cdev; }; -#define I2C_MINORS 256 +#define I2C_MINORS MINORMASK static LIST_HEAD(i2c_dev_list); static DEFINE_SPINLOCK(i2c_dev_list_lock); @@ -89,7 +91,7 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) return i2c_dev; } -static void return_i2c_dev(struct i2c_dev *i2c_dev) +static void put_i2c_dev(struct i2c_dev *i2c_dev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); @@ -552,6 +554,12 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) if (IS_ERR(i2c_dev)) return PTR_ERR(i2c_dev); + cdev_init(&i2c_dev->cdev, &i2cdev_fops); + i2c_dev->cdev.owner = THIS_MODULE; + res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1); + if (res) + goto error_cdev; + /* register this i2c device with the driver core */ i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, MKDEV(I2C_MAJOR, adap->nr), NULL, @@ -565,7 +573,9 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) adap->name, adap->nr); return 0; error: - return_i2c_dev(i2c_dev); + cdev_del(&i2c_dev->cdev); +error_cdev: + put_i2c_dev(i2c_dev); return res; } @@ -582,7 +592,8 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) if (!i2c_dev) /* attach_adapter must have failed */ return 0; - return_i2c_dev(i2c_dev); + cdev_del(&i2c_dev->cdev); + put_i2c_dev(i2c_dev); device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); @@ -620,7 +631,7 @@ static int __init i2c_dev_init(void) printk(KERN_INFO "i2c /dev entries driver\n"); - res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops); + res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c"); if (res) goto out; @@ -644,7 +655,7 @@ static int __init i2c_dev_init(void) out_unreg_class: class_destroy(i2c_dev_class); out_unreg_chrdev: - unregister_chrdev(I2C_MAJOR, "i2c"); + unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); out: printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__); return res; @@ -655,7 +666,7 @@ static void __exit i2c_dev_exit(void) bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); i2c_for_each_dev(NULL, i2cdev_detach_adapter); class_destroy(i2c_dev_class); - unregister_chrdev(I2C_MAJOR, "i2c"); + unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 6425c0e5d18a..2137adfbd8c3 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -85,4 +85,6 @@ source "drivers/infiniband/ulp/isert/Kconfig" source "drivers/infiniband/sw/rdmavt/Kconfig" +source "drivers/infiniband/hw/hfi1/Kconfig" + endif # INFINIBAND diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 26987d9d7e1c..edaae9f9853c 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -1,8 +1,7 @@ infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o -obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ - ib_cm.o iw_cm.o ib_addr.o \ +obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \ $(infiniband-y) obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ @@ -10,14 +9,11 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ - roce_gid_mgmt.o mr_pool.o + roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ + multicast.o mad.o smi.o agent.o mad_rmpp.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o -ib_mad-y := mad.o smi.o agent.o mad_rmpp.o - -ib_sa-y := sa_query.o multicast.o - ib_cm-y := cm.o iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o @@ -28,8 +24,6 @@ rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o rdma_ucm-y := ucma.o -ib_addr-y := addr.o - ib_umad-y := user_mad.o ib_ucm-y := ucm.o diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 337353d86cfa..1374541a4528 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -46,10 +46,10 @@ #include <net/ip6_route.h> #include <rdma/ib_addr.h> #include <rdma/ib.h> +#include <rdma/rdma_netlink.h> +#include <net/netlink.h> -MODULE_AUTHOR("Sean Hefty"); -MODULE_DESCRIPTION("IB Address Translation"); -MODULE_LICENSE("Dual BSD/GPL"); +#include "core_priv.h" struct addr_req { struct list_head list; @@ -62,8 +62,11 @@ struct addr_req { struct rdma_dev_addr *addr, void *context); unsigned long timeout; int status; + u32 seq; }; +static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); + static void process_req(struct work_struct *work); static DEFINE_MUTEX(lock); @@ -71,6 +74,126 @@ static LIST_HEAD(req_list); static DECLARE_DELAYED_WORK(work, process_req); static struct workqueue_struct *addr_wq; +static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { + [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, + .len = sizeof(struct rdma_nla_ls_gid)}, +}; + +static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) +{ + struct nlattr *tb[LS_NLA_TYPE_MAX] = {}; + int ret; + + if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) + return false; + + ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), + nlmsg_len(nlh), ib_nl_addr_policy); + if (ret) + return false; + + return true; +} + +static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) +{ + const struct nlattr *head, *curr; + union ib_gid gid; + struct addr_req *req; + int len, rem; + int found = 0; + + head = (const struct nlattr *)nlmsg_data(nlh); + len = nlmsg_len(nlh); + + nla_for_each_attr(curr, head, len, rem) { + if (curr->nla_type == LS_NLA_TYPE_DGID) + memcpy(&gid, nla_data(curr), nla_len(curr)); + } + + mutex_lock(&lock); + list_for_each_entry(req, &req_list, list) { + if (nlh->nlmsg_seq != req->seq) + continue; + /* We set the DGID part, the rest was set earlier */ + rdma_addr_set_dgid(req->addr, &gid); + req->status = 0; + found = 1; + break; + } + mutex_unlock(&lock); + + if (!found) + pr_info("Couldn't find request waiting for DGID: %pI6\n", + &gid); +} + +int ib_nl_handle_ip_res_resp(struct sk_buff *skb, + struct netlink_callback *cb) +{ + const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; + + if ((nlh->nlmsg_flags & NLM_F_REQUEST) || + !(NETLINK_CB(skb).sk) || + !netlink_capable(skb, CAP_NET_ADMIN)) + return -EPERM; + + if (ib_nl_is_good_ip_resp(nlh)) + ib_nl_process_good_ip_rsep(nlh); + + return skb->len; +} + +static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, + const void *daddr, + u32 seq, u16 family) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + struct rdma_ls_ip_resolve_header *header; + void *data; + size_t size; + int attrtype; + int len; + + if (family == AF_INET) { + size = sizeof(struct in_addr); + attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4; + } else { + size = sizeof(struct in6_addr); + attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6; + } + + len = nla_total_size(sizeof(size)); + len += NLMSG_ALIGN(sizeof(*header)); + + skb = nlmsg_new(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS, + RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST); + if (!data) { + nlmsg_free(skb); + return -ENODATA; + } + + /* Construct the family header first */ + header = (struct rdma_ls_ip_resolve_header *) + skb_put(skb, NLMSG_ALIGN(sizeof(*header))); + header->ifindex = dev_addr->bound_dev_if; + nla_put(skb, attrtype, size, daddr); + + /* Repair the nlmsg header length */ + nlmsg_end(skb, nlh); + ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); + + /* Make the request retry, so when we get the response from userspace + * we will have something. + */ + return -ENODATA; +} + int rdma_addr_size(struct sockaddr *addr) { switch (addr->sa_family) { @@ -199,6 +322,17 @@ static void queue_req(struct addr_req *req) mutex_unlock(&lock); } +static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, + const void *daddr, u32 seq, u16 family) +{ + if (ibnl_chk_listeners(RDMA_NL_GROUP_LS)) + return -EADDRNOTAVAIL; + + /* We fill in what we can, the response will fill the rest */ + rdma_copy_addr(dev_addr, dst->dev, NULL); + return ib_nl_ip_send_msg(dev_addr, daddr, seq, family); +} + static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, const void *daddr) { @@ -223,6 +357,39 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, return ret; } +static bool has_gateway(struct dst_entry *dst, sa_family_t family) +{ + struct rtable *rt; + struct rt6_info *rt6; + + if (family == AF_INET) { + rt = container_of(dst, struct rtable, dst); + return rt->rt_uses_gateway; + } + + rt6 = container_of(dst, struct rt6_info, dst); + return rt6->rt6i_flags & RTF_GATEWAY; +} + +static int fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, + const struct sockaddr *dst_in, u32 seq) +{ + const struct sockaddr_in *dst_in4 = + (const struct sockaddr_in *)dst_in; + const struct sockaddr_in6 *dst_in6 = + (const struct sockaddr_in6 *)dst_in; + const void *daddr = (dst_in->sa_family == AF_INET) ? + (const void *)&dst_in4->sin_addr.s_addr : + (const void *)&dst_in6->sin6_addr; + sa_family_t family = dst_in->sa_family; + + /* Gateway + ARPHRD_INFINIBAND -> IB router */ + if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND) + return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family); + else + return dst_fetch_ha(dst, dev_addr, daddr); +} + static int addr4_resolve(struct sockaddr_in *src_in, const struct sockaddr_in *dst_in, struct rdma_dev_addr *addr, @@ -246,10 +413,11 @@ static int addr4_resolve(struct sockaddr_in *src_in, src_in->sin_family = AF_INET; src_in->sin_addr.s_addr = fl4.saddr; - /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't - * routable) and we could set the network type accordingly. + /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're + * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network + * type accordingly. */ - if (rt->rt_uses_gateway) + if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND) addr->network = RDMA_NETWORK_IPV4; addr->hoplimit = ip4_dst_hoplimit(&rt->dst); @@ -291,10 +459,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, src_in->sin6_addr = fl6.saddr; } - /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't - * routable) and we could set the network type accordingly. + /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're + * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network + * type accordingly. */ - if (rt->rt6i_flags & RTF_GATEWAY) + if (rt->rt6i_flags & RTF_GATEWAY && + ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND) addr->network = RDMA_NETWORK_IPV6; addr->hoplimit = ip6_dst_hoplimit(dst); @@ -317,7 +487,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, static int addr_resolve_neigh(struct dst_entry *dst, const struct sockaddr *dst_in, - struct rdma_dev_addr *addr) + struct rdma_dev_addr *addr, + u32 seq) { if (dst->dev->flags & IFF_LOOPBACK) { int ret; @@ -331,17 +502,8 @@ static int addr_resolve_neigh(struct dst_entry *dst, } /* If the device doesn't do ARP internally */ - if (!(dst->dev->flags & IFF_NOARP)) { - const struct sockaddr_in *dst_in4 = - (const struct sockaddr_in *)dst_in; - const struct sockaddr_in6 *dst_in6 = - (const struct sockaddr_in6 *)dst_in; - - return dst_fetch_ha(dst, addr, - dst_in->sa_family == AF_INET ? - (const void *)&dst_in4->sin_addr.s_addr : - (const void *)&dst_in6->sin6_addr); - } + if (!(dst->dev->flags & IFF_NOARP)) + return fetch_ha(dst, addr, dst_in, seq); return rdma_copy_addr(addr, dst->dev, NULL); } @@ -349,7 +511,8 @@ static int addr_resolve_neigh(struct dst_entry *dst, static int addr_resolve(struct sockaddr *src_in, const struct sockaddr *dst_in, struct rdma_dev_addr *addr, - bool resolve_neigh) + bool resolve_neigh, + u32 seq) { struct net_device *ndev; struct dst_entry *dst; @@ -366,7 +529,7 @@ static int addr_resolve(struct sockaddr *src_in, return ret; if (resolve_neigh) - ret = addr_resolve_neigh(&rt->dst, dst_in, addr); + ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq); ndev = rt->dst.dev; dev_hold(ndev); @@ -383,7 +546,7 @@ static int addr_resolve(struct sockaddr *src_in, return ret; if (resolve_neigh) - ret = addr_resolve_neigh(dst, dst_in, addr); + ret = addr_resolve_neigh(dst, dst_in, addr, seq); ndev = dst->dev; dev_hold(ndev); @@ -412,7 +575,7 @@ static void process_req(struct work_struct *work) src_in = (struct sockaddr *) &req->src_addr; dst_in = (struct sockaddr *) &req->dst_addr; req->status = addr_resolve(src_in, dst_in, req->addr, - true); + true, req->seq); if (req->status && time_after_eq(jiffies, req->timeout)) req->status = -ETIMEDOUT; else if (req->status == -ENODATA) @@ -471,8 +634,9 @@ int rdma_resolve_ip(struct rdma_addr_client *client, req->context = context; req->client = client; atomic_inc(&client->refcount); + req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); - req->status = addr_resolve(src_in, dst_in, addr, true); + req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); switch (req->status) { case 0: req->timeout = jiffies; @@ -510,7 +674,7 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr, src_in->sa_family = dst_addr->sa_family; } - return addr_resolve(src_in, dst_addr, addr, false); + return addr_resolve(src_in, dst_addr, addr, false, 0); } EXPORT_SYMBOL(rdma_resolve_ip_route); @@ -634,7 +798,7 @@ static struct notifier_block nb = { .notifier_call = netevent_callback }; -static int __init addr_init(void) +int addr_init(void) { addr_wq = create_singlethread_workqueue("ib_addr"); if (!addr_wq) @@ -642,15 +806,13 @@ static int __init addr_init(void) register_netevent_notifier(&nb); rdma_addr_register_client(&self); + return 0; } -static void __exit addr_cleanup(void) +void addr_cleanup(void) { rdma_addr_unregister_client(&self); unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); } - -module_init(addr_init); -module_exit(addr_cleanup); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index eab32215756b..19d499dcab76 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -137,4 +137,20 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, return _upper == upper; } +int addr_init(void); +void addr_cleanup(void); + +int ib_mad_init(void); +void ib_mad_cleanup(void); + +int ib_sa_init(void); +void ib_sa_cleanup(void); + +int ib_nl_handle_resolve_resp(struct sk_buff *skb, + struct netlink_callback *cb); +int ib_nl_handle_set_timeout(struct sk_buff *skb, + struct netlink_callback *cb); +int ib_nl_handle_ip_res_resp(struct sk_buff *skb, + struct netlink_callback *cb); + #endif /* _CORE_PRIV_H */ diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 10979844026a..5516fb070344 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -955,6 +955,29 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, } EXPORT_SYMBOL(ib_get_net_dev_by_params); +static struct ibnl_client_cbs ibnl_ls_cb_table[] = { + [RDMA_NL_LS_OP_RESOLVE] = { + .dump = ib_nl_handle_resolve_resp, + .module = THIS_MODULE }, + [RDMA_NL_LS_OP_SET_TIMEOUT] = { + .dump = ib_nl_handle_set_timeout, + .module = THIS_MODULE }, + [RDMA_NL_LS_OP_IP_RESOLVE] = { + .dump = ib_nl_handle_ip_res_resp, + .module = THIS_MODULE }, +}; + +static int ib_add_ibnl_clients(void) +{ + return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table), + ibnl_ls_cb_table); +} + +static void ib_remove_ibnl_clients(void) +{ + ibnl_remove_client(RDMA_NL_LS); +} + static int __init ib_core_init(void) { int ret; @@ -983,10 +1006,41 @@ static int __init ib_core_init(void) goto err_sysfs; } + ret = addr_init(); + if (ret) { + pr_warn("Could't init IB address resolution\n"); + goto err_ibnl; + } + + ret = ib_mad_init(); + if (ret) { + pr_warn("Couldn't init IB MAD\n"); + goto err_addr; + } + + ret = ib_sa_init(); + if (ret) { + pr_warn("Couldn't init SA\n"); + goto err_mad; + } + + if (ib_add_ibnl_clients()) { + pr_warn("Couldn't register ibnl clients\n"); + goto err_sa; + } + ib_cache_setup(); return 0; +err_sa: + ib_sa_cleanup(); +err_mad: + ib_mad_cleanup(); +err_addr: + addr_cleanup(); +err_ibnl: + ibnl_cleanup(); err_sysfs: class_unregister(&ib_class); err_comp: @@ -999,6 +1053,10 @@ err: static void __exit ib_core_cleanup(void) { ib_cache_cleanup(); + ib_remove_ibnl_clients(); + ib_sa_cleanup(); + ib_mad_cleanup(); + addr_cleanup(); ibnl_cleanup(); class_unregister(&ib_class); destroy_workqueue(ib_comp_wq); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 9fa5bf33f5a3..82fb511112da 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -47,11 +47,7 @@ #include "smi.h" #include "opa_smi.h" #include "agent.h" - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("kernel IB MAD API"); -MODULE_AUTHOR("Hal Rosenstock"); -MODULE_AUTHOR("Sean Hefty"); +#include "core_priv.h" static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; @@ -3316,7 +3312,7 @@ static struct ib_client mad_client = { .remove = ib_mad_remove_device }; -static int __init ib_mad_init_module(void) +int ib_mad_init(void) { mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); @@ -3334,10 +3330,7 @@ static int __init ib_mad_init_module(void) return 0; } -static void __exit ib_mad_cleanup_module(void) +void ib_mad_cleanup(void) { ib_unregister_client(&mad_client); } - -module_init(ib_mad_init_module); -module_exit(ib_mad_cleanup_module); diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 250937cb9a1a..a83ec28a147b 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -93,6 +93,18 @@ enum { struct mcast_member; +/* +* There are 4 types of join states: +* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember. +*/ +enum { + FULLMEMBER_JOIN, + NONMEMBER_JOIN, + SENDONLY_NONMEBER_JOIN, + SENDONLY_FULLMEMBER_JOIN, + NUM_JOIN_MEMBERSHIP_TYPES, +}; + struct mcast_group { struct ib_sa_mcmember_rec rec; struct rb_node node; @@ -102,7 +114,7 @@ struct mcast_group { struct list_head pending_list; struct list_head active_list; struct mcast_member *last_join; - int members[3]; + int members[NUM_JOIN_MEMBERSHIP_TYPES]; atomic_t refcount; enum mcast_group_state state; struct ib_sa_query *query; @@ -220,8 +232,9 @@ static void queue_join(struct mcast_member *member) } /* - * A multicast group has three types of members: full member, non member, and - * send only member. We need to keep track of the number of members of each + * A multicast group has four types of members: full member, non member, + * sendonly non member and sendonly full member. + * We need to keep track of the number of members of each * type based on their join state. Adjust the number of members the belong to * the specified join states. */ @@ -229,7 +242,7 @@ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) { int i; - for (i = 0; i < 3; i++, join_state >>= 1) + for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1) if (join_state & 0x1) group->members[i] += inc; } @@ -245,7 +258,7 @@ static u8 get_leave_state(struct mcast_group *group) u8 leave_state = 0; int i; - for (i = 0; i < 3; i++) + for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++) if (!group->members[i]) leave_state |= (0x1 << i); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 3ebd108bcc5f..e95538650dc6 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -53,10 +53,6 @@ #include "sa.h" #include "core_priv.h" -MODULE_AUTHOR("Roland Dreier"); -MODULE_DESCRIPTION("InfiniBand subnet administration query support"); -MODULE_LICENSE("Dual BSD/GPL"); - #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 @@ -119,6 +115,12 @@ struct ib_sa_guidinfo_query { struct ib_sa_query sa_query; }; +struct ib_sa_classport_info_query { + void (*callback)(int, struct ib_class_port_info *, void *); + void *context; + struct ib_sa_query sa_query; +}; + struct ib_sa_mcmember_query { void (*callback)(int, struct ib_sa_mcmember_rec *, void *); void *context; @@ -392,6 +394,82 @@ static const struct ib_field service_rec_table[] = { .size_bits = 2*64 }, }; +#define CLASSPORTINFO_REC_FIELD(field) \ + .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ + .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \ + .field_name = "ib_class_port_info:" #field + +static const struct ib_field classport_info_rec_table[] = { + { CLASSPORTINFO_REC_FIELD(base_version), + .offset_words = 0, + .offset_bits = 0, + .size_bits = 8 }, + { CLASSPORTINFO_REC_FIELD(class_version), + .offset_words = 0, + .offset_bits = 8, + .size_bits = 8 }, + { CLASSPORTINFO_REC_FIELD(capability_mask), + .offset_words = 0, + .offset_bits = 16, + .size_bits = 16 }, + { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), + .offset_words = 1, + .offset_bits = 0, + .size_bits = 32 }, + { CLASSPORTINFO_REC_FIELD(redirect_gid), + .offset_words = 2, + .offset_bits = 0, + .size_bits = 128 }, + { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), + .offset_words = 6, + .offset_bits = 0, + .size_bits = 32 }, + { CLASSPORTINFO_REC_FIELD(redirect_lid), + .offset_words = 7, + .offset_bits = 0, + .size_bits = 16 }, + { CLASSPORTINFO_REC_FIELD(redirect_pkey), + .offset_words = 7, + .offset_bits = 16, + .size_bits = 16 }, + + { CLASSPORTINFO_REC_FIELD(redirect_qp), + .offset_words = 8, + .offset_bits = 0, + .size_bits = 32 }, + { CLASSPORTINFO_REC_FIELD(redirect_qkey), + .offset_words = 9, + .offset_bits = 0, + .size_bits = 32 }, + + { CLASSPORTINFO_REC_FIELD(trap_gid), + .offset_words = 10, + .offset_bits = 0, + .size_bits = 128 }, + { CLASSPORTINFO_REC_FIELD(trap_tcslfl), + .offset_words = 14, + .offset_bits = 0, + .size_bits = 32 }, + + { CLASSPORTINFO_REC_FIELD(trap_lid), + .offset_words = 15, + .offset_bits = 0, + .size_bits = 16 }, + { CLASSPORTINFO_REC_FIELD(trap_pkey), + .offset_words = 15, + .offset_bits = 16, + .size_bits = 16 }, + + { CLASSPORTINFO_REC_FIELD(trap_hlqp), + .offset_words = 16, + .offset_bits = 0, + .size_bits = 32 }, + { CLASSPORTINFO_REC_FIELD(trap_qkey), + .offset_words = 17, + .offset_bits = 0, + .size_bits = 32 }, +}; + #define GUIDINFO_REC_FIELD(field) \ .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ @@ -705,8 +783,8 @@ static void ib_nl_request_timeout(struct work_struct *work) spin_unlock_irqrestore(&ib_nl_request_lock, flags); } -static int ib_nl_handle_set_timeout(struct sk_buff *skb, - struct netlink_callback *cb) +int ib_nl_handle_set_timeout(struct sk_buff *skb, + struct netlink_callback *cb) { const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; int timeout, delta, abs_delta; @@ -782,8 +860,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) return 1; } -static int ib_nl_handle_resolve_resp(struct sk_buff *skb, - struct netlink_callback *cb) +int ib_nl_handle_resolve_resp(struct sk_buff *skb, + struct netlink_callback *cb) { const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; unsigned long flags; @@ -838,15 +916,6 @@ resp_out: return skb->len; } -static struct ibnl_client_cbs ib_sa_cb_table[] = { - [RDMA_NL_LS_OP_RESOLVE] = { - .dump = ib_nl_handle_resolve_resp, - .module = THIS_MODULE }, - [RDMA_NL_LS_OP_SET_TIMEOUT] = { - .dump = ib_nl_handle_set_timeout, - .module = THIS_MODULE }, -}; - static void free_sm_ah(struct kref *kref) { struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); @@ -1645,6 +1714,97 @@ err1: } EXPORT_SYMBOL(ib_sa_guid_info_rec_query); +/* Support get SA ClassPortInfo */ +static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, + int status, + struct ib_sa_mad *mad) +{ + struct ib_sa_classport_info_query *query = + container_of(sa_query, struct ib_sa_classport_info_query, sa_query); + + if (mad) { + struct ib_class_port_info rec; + + ib_unpack(classport_info_rec_table, + ARRAY_SIZE(classport_info_rec_table), + mad->data, &rec); + query->callback(status, &rec, query->context); + } else { + query->callback(status, NULL, query->context); + } +} + +static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query) +{ + kfree(container_of(sa_query, struct ib_sa_classport_info_query, + sa_query)); +} + +int ib_sa_classport_info_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, + int timeout_ms, gfp_t gfp_mask, + void (*callback)(int status, + struct ib_class_port_info *resp, + void *context), + void *context, + struct ib_sa_query **sa_query) +{ + struct ib_sa_classport_info_query *query; + struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); + struct ib_sa_port *port; + struct ib_mad_agent *agent; + struct ib_sa_mad *mad; + int ret; + + if (!sa_dev) + return -ENODEV; + + port = &sa_dev->port[port_num - sa_dev->start_port]; + agent = port->agent; + + query = kzalloc(sizeof(*query), gfp_mask); + if (!query) + return -ENOMEM; + + query->sa_query.port = port; + ret = alloc_mad(&query->sa_query, gfp_mask); + if (ret) + goto err1; + + ib_sa_client_get(client); + query->sa_query.client = client; + query->callback = callback; + query->context = context; + + mad = query->sa_query.mad_buf->mad; + init_mad(mad, agent); + + query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL; + + query->sa_query.release = ib_sa_portclass_info_rec_release; + /* support GET only */ + mad->mad_hdr.method = IB_MGMT_METHOD_GET; + mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); + mad->sa_hdr.comp_mask = 0; + *sa_query = &query->sa_query; + + ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); + if (ret < 0) + goto err2; + + return ret; + +err2: + *sa_query = NULL; + ib_sa_client_put(query->sa_query.client); + free_mad(&query->sa_query); + +err1: + kfree(query); + return ret; +} +EXPORT_SYMBOL(ib_sa_classport_info_rec_query); + static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { @@ -1794,7 +1954,7 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data) kfree(sa_dev); } -static int __init ib_sa_init(void) +int ib_sa_init(void) { int ret; @@ -1820,17 +1980,10 @@ static int __init ib_sa_init(void) goto err3; } - if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table), - ib_sa_cb_table)) { - pr_err("Failed to add netlink callback\n"); - ret = -EINVAL; - goto err4; - } INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); return 0; -err4: - destroy_workqueue(ib_nl_wq); + err3: mcast_cleanup(); err2: @@ -1839,9 +1992,8 @@ err1: return ret; } -static void __exit ib_sa_cleanup(void) +void ib_sa_cleanup(void) { - ibnl_remove_client(RDMA_NL_LS); cancel_delayed_work(&ib_nl_timed_work); flush_workqueue(ib_nl_wq); destroy_workqueue(ib_nl_wq); @@ -1849,6 +2001,3 @@ static void __exit ib_sa_cleanup(void) ib_unregister_client(&sa_client); idr_destroy(&query_idr); } - -module_init(ib_sa_init); -module_exit(ib_sa_cleanup); diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 14606afbfaa8..5e573bb18660 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -56,8 +56,10 @@ struct ib_port { struct gid_attr_group *gid_attr_group; struct attribute_group gid_group; struct attribute_group pkey_group; - u8 port_num; struct attribute_group *pma_table; + struct attribute_group *hw_stats_ag; + struct rdma_hw_stats *hw_stats; + u8 port_num; }; struct port_attribute { @@ -80,6 +82,18 @@ struct port_table_attribute { __be16 attr_id; }; +struct hw_stats_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t count); + int index; + u8 port_num; +}; + static ssize_t port_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -733,6 +747,212 @@ static struct attribute_group *get_counter_table(struct ib_device *dev, return &pma_group; } +static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, + u8 port_num, int index) +{ + int ret; + + if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan)) + return 0; + ret = dev->get_hw_stats(dev, stats, port_num, index); + if (ret < 0) + return ret; + if (ret == stats->num_counters) + stats->timestamp = jiffies; + + return 0; +} + +static ssize_t print_hw_stat(struct rdma_hw_stats *stats, int index, char *buf) +{ + return sprintf(buf, "%llu\n", stats->value[index]); +} + +static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct ib_device *dev; + struct ib_port *port; + struct hw_stats_attribute *hsa; + struct rdma_hw_stats *stats; + int ret; + + hsa = container_of(attr, struct hw_stats_attribute, attr); + if (!hsa->port_num) { + dev = container_of((struct device *)kobj, + struct ib_device, dev); + stats = dev->hw_stats; + } else { + port = container_of(kobj, struct ib_port, kobj); + dev = port->ibdev; + stats = port->hw_stats; + } + ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index); + if (ret) + return ret; + return print_hw_stat(stats, hsa->index, buf); +} + +static ssize_t show_stats_lifespan(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct hw_stats_attribute *hsa; + int msecs; + + hsa = container_of(attr, struct hw_stats_attribute, attr); + if (!hsa->port_num) { + struct ib_device *dev = container_of((struct device *)kobj, + struct ib_device, dev); + msecs = jiffies_to_msecs(dev->hw_stats->lifespan); + } else { + struct ib_port *p = container_of(kobj, struct ib_port, kobj); + msecs = jiffies_to_msecs(p->hw_stats->lifespan); + } + return sprintf(buf, "%d\n", msecs); +} + +static ssize_t set_stats_lifespan(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t count) +{ + struct hw_stats_attribute *hsa; + int msecs; + int jiffies; + int ret; + + ret = kstrtoint(buf, 10, &msecs); + if (ret) + return ret; + if (msecs < 0 || msecs > 10000) + return -EINVAL; + jiffies = msecs_to_jiffies(msecs); + hsa = container_of(attr, struct hw_stats_attribute, attr); + if (!hsa->port_num) { + struct ib_device *dev = container_of((struct device *)kobj, + struct ib_device, dev); + dev->hw_stats->lifespan = jiffies; + } else { + struct ib_port *p = container_of(kobj, struct ib_port, kobj); + p->hw_stats->lifespan = jiffies; + } + return count; +} + +static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group) +{ + struct attribute **attr; + + sysfs_remove_group(kobj, attr_group); + + for (attr = attr_group->attrs; *attr; attr++) + kfree(*attr); + kfree(attr_group); +} + +static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) +{ + struct hw_stats_attribute *hsa; + + hsa = kmalloc(sizeof(*hsa), GFP_KERNEL); + if (!hsa) + return NULL; + + hsa->attr.name = (char *)name; + hsa->attr.mode = S_IRUGO; + hsa->show = show_hw_stats; + hsa->store = NULL; + hsa->index = index; + hsa->port_num = port_num; + + return &hsa->attr; +} + +static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) +{ + struct hw_stats_attribute *hsa; + + hsa = kmalloc(sizeof(*hsa), GFP_KERNEL); + if (!hsa) + return NULL; + + hsa->attr.name = name; + hsa->attr.mode = S_IWUSR | S_IRUGO; + hsa->show = show_stats_lifespan; + hsa->store = set_stats_lifespan; + hsa->index = 0; + hsa->port_num = port_num; + + return &hsa->attr; +} + +static void setup_hw_stats(struct ib_device *device, struct ib_port *port, + u8 port_num) +{ + struct attribute_group *hsag = NULL; + struct rdma_hw_stats *stats; + int i = 0, ret; + + stats = device->alloc_hw_stats(device, port_num); + + if (!stats) + return; + + if (!stats->names || stats->num_counters <= 0) + goto err; + + hsag = kzalloc(sizeof(*hsag) + + // 1 extra for the lifespan config entry + sizeof(void *) * (stats->num_counters + 1), + GFP_KERNEL); + if (!hsag) + return; + + ret = device->get_hw_stats(device, stats, port_num, + stats->num_counters); + if (ret != stats->num_counters) + goto err; + + stats->timestamp = jiffies; + + hsag->name = "hw_counters"; + hsag->attrs = (void *)hsag + sizeof(*hsag); + + for (i = 0; i < stats->num_counters; i++) { + hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); + if (!hsag->attrs[i]) + goto err; + } + + /* treat an error here as non-fatal */ + hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); + + if (port) { + struct kobject *kobj = &port->kobj; + ret = sysfs_create_group(kobj, hsag); + if (ret) + goto err; + port->hw_stats_ag = hsag; + port->hw_stats = stats; + } else { + struct kobject *kobj = &device->dev.kobj; + ret = sysfs_create_group(kobj, hsag); + if (ret) + goto err; + device->hw_stats_ag = hsag; + device->hw_stats = stats; + } + + return; + +err: + kfree(stats); + for (; i >= 0; i--) + kfree(hsag->attrs[i]); + kfree(hsag); + return; +} + static int add_port(struct ib_device *device, int port_num, int (*port_callback)(struct ib_device *, u8, struct kobject *)) @@ -835,6 +1055,14 @@ static int add_port(struct ib_device *device, int port_num, goto err_remove_pkey; } + /* + * If port == 0, it means we have only one port and the parent + * device, not this port device, should be the holder of the + * hw_counters + */ + if (device->alloc_hw_stats && port_num) + setup_hw_stats(device, p, port_num); + list_add_tail(&p->kobj.entry, &device->port_list); kobject_uevent(&p->kobj, KOBJ_ADD); @@ -972,120 +1200,6 @@ static struct device_attribute *ib_class_attributes[] = { &dev_attr_node_desc }; -/* Show a given an attribute in the statistics group */ -static ssize_t show_protocol_stat(const struct device *device, - struct device_attribute *attr, char *buf, - unsigned offset) -{ - struct ib_device *dev = container_of(device, struct ib_device, dev); - union rdma_protocol_stats stats; - ssize_t ret; - - ret = dev->get_protocol_stats(dev, &stats); - if (ret) - return ret; - - return sprintf(buf, "%llu\n", - (unsigned long long) ((u64 *) &stats)[offset]); -} - -/* generate a read-only iwarp statistics attribute */ -#define IW_STATS_ENTRY(name) \ -static ssize_t show_##name(struct device *device, \ - struct device_attribute *attr, char *buf) \ -{ \ - return show_protocol_stat(device, attr, buf, \ - offsetof(struct iw_protocol_stats, name) / \ - sizeof (u64)); \ -} \ -static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) - -IW_STATS_ENTRY(ipInReceives); -IW_STATS_ENTRY(ipInHdrErrors); -IW_STATS_ENTRY(ipInTooBigErrors); -IW_STATS_ENTRY(ipInNoRoutes); -IW_STATS_ENTRY(ipInAddrErrors); -IW_STATS_ENTRY(ipInUnknownProtos); -IW_STATS_ENTRY(ipInTruncatedPkts); -IW_STATS_ENTRY(ipInDiscards); -IW_STATS_ENTRY(ipInDelivers); -IW_STATS_ENTRY(ipOutForwDatagrams); -IW_STATS_ENTRY(ipOutRequests); -IW_STATS_ENTRY(ipOutDiscards); -IW_STATS_ENTRY(ipOutNoRoutes); -IW_STATS_ENTRY(ipReasmTimeout); -IW_STATS_ENTRY(ipReasmReqds); -IW_STATS_ENTRY(ipReasmOKs); -IW_STATS_ENTRY(ipReasmFails); -IW_STATS_ENTRY(ipFragOKs); -IW_STATS_ENTRY(ipFragFails); -IW_STATS_ENTRY(ipFragCreates); -IW_STATS_ENTRY(ipInMcastPkts); -IW_STATS_ENTRY(ipOutMcastPkts); -IW_STATS_ENTRY(ipInBcastPkts); -IW_STATS_ENTRY(ipOutBcastPkts); -IW_STATS_ENTRY(tcpRtoAlgorithm); -IW_STATS_ENTRY(tcpRtoMin); -IW_STATS_ENTRY(tcpRtoMax); -IW_STATS_ENTRY(tcpMaxConn); -IW_STATS_ENTRY(tcpActiveOpens); -IW_STATS_ENTRY(tcpPassiveOpens); -IW_STATS_ENTRY(tcpAttemptFails); -IW_STATS_ENTRY(tcpEstabResets); -IW_STATS_ENTRY(tcpCurrEstab); -IW_STATS_ENTRY(tcpInSegs); -IW_STATS_ENTRY(tcpOutSegs); -IW_STATS_ENTRY(tcpRetransSegs); -IW_STATS_ENTRY(tcpInErrs); -IW_STATS_ENTRY(tcpOutRsts); - -static struct attribute *iw_proto_stats_attrs[] = { - &dev_attr_ipInReceives.attr, - &dev_attr_ipInHdrErrors.attr, - &dev_attr_ipInTooBigErrors.attr, - &dev_attr_ipInNoRoutes.attr, - &dev_attr_ipInAddrErrors.attr, - &dev_attr_ipInUnknownProtos.attr, - &dev_attr_ipInTruncatedPkts.attr, - &dev_attr_ipInDiscards.attr, - &dev_attr_ipInDelivers.attr, - &dev_attr_ipOutForwDatagrams.attr, - &dev_attr_ipOutRequests.attr, - &dev_attr_ipOutDiscards.attr, - &dev_attr_ipOutNoRoutes.attr, - &dev_attr_ipReasmTimeout.attr, - &dev_attr_ipReasmReqds.attr, - &dev_attr_ipReasmOKs.attr, - &dev_attr_ipReasmFails.attr, - &dev_attr_ipFragOKs.attr, - &dev_attr_ipFragFails.attr, - &dev_attr_ipFragCreates.attr, - &dev_attr_ipInMcastPkts.attr, - &dev_attr_ipOutMcastPkts.attr, - &dev_attr_ipInBcastPkts.attr, - &dev_attr_ipOutBcastPkts.attr, - &dev_attr_tcpRtoAlgorithm.attr, - &dev_attr_tcpRtoMin.attr, - &dev_attr_tcpRtoMax.attr, - &dev_attr_tcpMaxConn.attr, - &dev_attr_tcpActiveOpens.attr, - &dev_attr_tcpPassiveOpens.attr, - &dev_attr_tcpAttemptFails.attr, - &dev_attr_tcpEstabResets.attr, - &dev_attr_tcpCurrEstab.attr, - &dev_attr_tcpInSegs.attr, - &dev_attr_tcpOutSegs.attr, - &dev_attr_tcpRetransSegs.attr, - &dev_attr_tcpInErrs.attr, - &dev_attr_tcpOutRsts.attr, - NULL -}; - -static struct attribute_group iw_stats_group = { - .name = "proto_stats", - .attrs = iw_proto_stats_attrs, -}; - static void free_port_list_attributes(struct ib_device *device) { struct kobject *p, *t; @@ -1093,6 +1207,10 @@ static void free_port_list_attributes(struct ib_device *device) list_for_each_entry_safe(p, t, &device->port_list, entry) { struct ib_port *port = container_of(p, struct ib_port, kobj); list_del(&p->entry); + if (port->hw_stats) { + kfree(port->hw_stats); + free_hsag(&port->kobj, port->hw_stats_ag); + } sysfs_remove_group(p, port->pma_table); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); @@ -1149,11 +1267,8 @@ int ib_device_register_sysfs(struct ib_device *device, } } - if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) { - ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group); - if (ret) - goto err_put; - } + if (device->alloc_hw_stats) + setup_hw_stats(device, NULL, 0); return 0; @@ -1169,15 +1284,18 @@ err: void ib_device_unregister_sysfs(struct ib_device *device) { - /* Hold kobject until ib_dealloc_device() */ - struct kobject *kobj_dev = kobject_get(&device->dev.kobj); int i; - if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) - sysfs_remove_group(kobj_dev, &iw_stats_group); + /* Hold kobject until ib_dealloc_device() */ + kobject_get(&device->dev.kobj); free_port_list_attributes(device); + if (device->hw_stats) { + kfree(device->hw_stats); + free_hsag(&device->dev.kobj, device->hw_stats_ag); + } + for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) device_remove_file(&device->dev, ib_class_attributes[i]); diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index c7ad0a4c8b15..c0c7cf8af3f4 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/ obj-$(CONFIG_INFINIBAND_NES) += nes/ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ +obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index de1c61b417d6..ada2e5009c86 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -327,7 +327,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) kfree(cq->sw_queue); dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), (1UL << (cq->size_log2)) - * sizeof(struct t3_cqe), cq->queue, + * sizeof(struct t3_cqe) + 1, cq->queue, dma_unmap_addr(cq, mapping)); cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); return err; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 47cb927a0dd6..bb1a839d4d6d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1218,59 +1218,119 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, iwch_dev->rdev.rnic_info.pdev->device); } -static int iwch_get_mib(struct ib_device *ibdev, - union rdma_protocol_stats *stats) +enum counters { + IPINRECEIVES, + IPINHDRERRORS, + IPINADDRERRORS, + IPINUNKNOWNPROTOS, + IPINDISCARDS, + IPINDELIVERS, + IPOUTREQUESTS, + IPOUTDISCARDS, + IPOUTNOROUTES, + IPREASMTIMEOUT, + IPREASMREQDS, + IPREASMOKS, + IPREASMFAILS, + TCPACTIVEOPENS, + TCPPASSIVEOPENS, + TCPATTEMPTFAILS, + TCPESTABRESETS, + TCPCURRESTAB, + TCPINSEGS, + TCPOUTSEGS, + TCPRETRANSSEGS, + TCPINERRS, + TCPOUTRSTS, + TCPRTOMIN, + TCPRTOMAX, + NR_COUNTERS +}; + +static const char * const names[] = { + [IPINRECEIVES] = "ipInReceives", + [IPINHDRERRORS] = "ipInHdrErrors", + [IPINADDRERRORS] = "ipInAddrErrors", + [IPINUNKNOWNPROTOS] = "ipInUnknownProtos", + [IPINDISCARDS] = "ipInDiscards", + [IPINDELIVERS] = "ipInDelivers", + [IPOUTREQUESTS] = "ipOutRequests", + [IPOUTDISCARDS] = "ipOutDiscards", + [IPOUTNOROUTES] = "ipOutNoRoutes", + [IPREASMTIMEOUT] = "ipReasmTimeout", + [IPREASMREQDS] = "ipReasmReqds", + [IPREASMOKS] = "ipReasmOKs", + [IPREASMFAILS] = "ipReasmFails", + [TCPACTIVEOPENS] = "tcpActiveOpens", + [TCPPASSIVEOPENS] = "tcpPassiveOpens", + [TCPATTEMPTFAILS] = "tcpAttemptFails", + [TCPESTABRESETS] = "tcpEstabResets", + [TCPCURRESTAB] = "tcpCurrEstab", + [TCPINSEGS] = "tcpInSegs", + [TCPOUTSEGS] = "tcpOutSegs", + [TCPRETRANSSEGS] = "tcpRetransSegs", + [TCPINERRS] = "tcpInErrs", + [TCPOUTRSTS] = "tcpOutRsts", + [TCPRTOMIN] = "tcpRtoMin", + [TCPRTOMAX] = "tcpRtoMax", +}; + +static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev, + u8 port_num) +{ + BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS); + + /* Our driver only supports device level stats */ + if (port_num != 0) + return NULL; + + return rdma_alloc_hw_stats_struct(names, NR_COUNTERS, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} + +static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u8 port, int index) { struct iwch_dev *dev; struct tp_mib_stats m; int ret; + if (port != 0 || !stats) + return -ENOSYS; + PDBG("%s ibdev %p\n", __func__, ibdev); dev = to_iwch_dev(ibdev); ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m); if (ret) return -ENOSYS; - memset(stats, 0, sizeof *stats); - stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) + - m.ipInReceive_lo; - stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) + - m.ipInHdrErrors_lo; - stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) + - m.ipInAddrErrors_lo; - stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) + - m.ipInUnknownProtos_lo; - stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) + - m.ipInDiscards_lo; - stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) + - m.ipInDelivers_lo; - stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) + - m.ipOutRequests_lo; - stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) + - m.ipOutDiscards_lo; - stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) + - m.ipOutNoRoutes_lo; - stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout; - stats->iw.ipReasmReqds = (u64) m.ipReasmReqds; - stats->iw.ipReasmOKs = (u64) m.ipReasmOKs; - stats->iw.ipReasmFails = (u64) m.ipReasmFails; - stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens; - stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens; - stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails; - stats->iw.tcpEstabResets = (u64) m.tcpEstabResets; - stats->iw.tcpOutRsts = (u64) m.tcpOutRsts; - stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab; - stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) + - m.tcpInSegs_lo; - stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) + - m.tcpOutSegs_lo; - stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) + - m.tcpRetransSeg_lo; - stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) + - m.tcpInErrs_lo; - stats->iw.tcpRtoMin = (u64) m.tcpRtoMin; - stats->iw.tcpRtoMax = (u64) m.tcpRtoMax; - return 0; + stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo; + stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo; + stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo; + stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo; + stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo; + stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo; + stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo; + stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo; + stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo; + stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout; + stats->value[IPREASMREQDS] = m.ipReasmReqds; + stats->value[IPREASMOKS] = m.ipReasmOKs; + stats->value[IPREASMFAILS] = m.ipReasmFails; + stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens; + stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens; + stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails; + stats->value[TCPESTABRESETS] = m.tcpEstabResets; + stats->value[TCPCURRESTAB] = m.tcpOutRsts; + stats->value[TCPINSEGS] = m.tcpCurrEstab; + stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo; + stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo; + stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo, + stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo; + stats->value[TCPRTOMIN] = m.tcpRtoMin; + stats->value[TCPRTOMAX] = m.tcpRtoMax; + + return stats->num_counters; } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); @@ -1373,7 +1433,8 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.req_notify_cq = iwch_arm_cq; dev->ibdev.post_send = iwch_post_send; dev->ibdev.post_recv = iwch_post_receive; - dev->ibdev.get_protocol_stats = iwch_get_mib; + dev->ibdev.alloc_hw_stats = iwch_alloc_stats; + dev->ibdev.get_hw_stats = iwch_get_mib; dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; dev->ibdev.get_port_immutable = iwch_port_immutable; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 7574f394fdac..dd8a86b726d2 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -446,20 +446,59 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, c4iw_dev->rdev.lldi.pdev->device); } +enum counters { + IP4INSEGS, + IP4OUTSEGS, + IP4RETRANSSEGS, + IP4OUTRSTS, + IP6INSEGS, + IP6OUTSEGS, + IP6RETRANSSEGS, + IP6OUTRSTS, + NR_COUNTERS +}; + +static const char * const names[] = { + [IP4INSEGS] = "ip4InSegs", + [IP4OUTSEGS] = "ip4OutSegs", + [IP4RETRANSSEGS] = "ip4RetransSegs", + [IP4OUTRSTS] = "ip4OutRsts", + [IP6INSEGS] = "ip6InSegs", + [IP6OUTSEGS] = "ip6OutSegs", + [IP6RETRANSSEGS] = "ip6RetransSegs", + [IP6OUTRSTS] = "ip6OutRsts" +}; + +static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev, + u8 port_num) +{ + BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS); + + if (port_num != 0) + return NULL; + + return rdma_alloc_hw_stats_struct(names, NR_COUNTERS, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} + static int c4iw_get_mib(struct ib_device *ibdev, - union rdma_protocol_stats *stats) + struct rdma_hw_stats *stats, + u8 port, int index) { struct tp_tcp_stats v4, v6; struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); - memset(stats, 0, sizeof *stats); - stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs; - stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs; - stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs; - stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts; - - return 0; + stats->value[IP4INSEGS] = v4.tcp_in_segs; + stats->value[IP4OUTSEGS] = v4.tcp_out_segs; + stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs; + stats->value[IP4OUTRSTS] = v4.tcp_out_rsts; + stats->value[IP6INSEGS] = v6.tcp_in_segs; + stats->value[IP6OUTSEGS] = v6.tcp_out_segs; + stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs; + stats->value[IP6OUTRSTS] = v6.tcp_out_rsts; + + return stats->num_counters; } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); @@ -562,7 +601,8 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.req_notify_cq = c4iw_arm_cq; dev->ibdev.post_send = c4iw_post_send; dev->ibdev.post_recv = c4iw_post_receive; - dev->ibdev.get_protocol_stats = c4iw_get_mib; + dev->ibdev.alloc_hw_stats = c4iw_alloc_stats; + dev->ibdev.get_hw_stats = c4iw_get_mib; dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.drain_sq = c4iw_drain_sq; diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig index a925fb0db706..a925fb0db706 100644 --- a/drivers/staging/rdma/hfi1/Kconfig +++ b/drivers/infiniband/hw/hfi1/Kconfig diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile index 8dc59382ee96..9b5382c94b0c 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/infiniband/hw/hfi1/Makefile @@ -7,7 +7,7 @@ # obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o -hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \ +hfi1-y := affinity.o chip.o device.o driver.o efivar.o \ eprom.o file_ops.o firmware.o \ init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 6e7050ab9e16..6e7050ab9e16 100644 --- a/drivers/staging/rdma/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h index 20f52fe74091..20f52fe74091 100644 --- a/drivers/staging/rdma/hfi1/affinity.h +++ b/drivers/infiniband/hw/hfi1/affinity.h diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h index 0d58fe3b49b5..0d58fe3b49b5 100644 --- a/drivers/staging/rdma/hfi1/aspm.h +++ b/drivers/infiniband/hw/hfi1/aspm.h diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index dcae8e723f98..3b876da745a1 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1037,6 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *); static void dc_start(struct hfi1_devdata *); static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, unsigned int *np); +static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); /* * Error interrupt table entry. This is used as input to the interrupt @@ -6105,7 +6106,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) } /* this access is valid only when the link is up */ - if ((ppd->host_link_state & HLS_UP) == 0) { + if (ppd->host_link_state & HLS_DOWN) { dd_dev_info(dd, "%s: link state %s not up\n", __func__, link_state_name(ppd->host_link_state)); ret = -EBUSY; @@ -6961,6 +6962,8 @@ void handle_link_down(struct work_struct *work) } reset_neighbor_info(ppd); + if (ppd->mgmt_allowed) + remove_full_mgmt_pkey(ppd); /* disable the port */ clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); @@ -7069,6 +7072,12 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); } +static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) +{ + ppd->pkeys[2] = 0; + (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); +} + /* * Convert the given link width to the OPA link width bitmask. */ @@ -7429,7 +7438,7 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths) retry: mutex_lock(&ppd->hls_lock); /* only apply if the link is up */ - if (!(ppd->host_link_state & HLS_UP)) { + if (ppd->host_link_state & HLS_DOWN) { /* still going up..wait and retry */ if (ppd->host_link_state & HLS_GOING_UP) { if (++tries < 1000) { @@ -9212,9 +9221,6 @@ void reset_qsfp(struct hfi1_pportdata *ppd) /* Reset the QSFP */ mask = (u64)QSFP_HFI0_RESET_N; - qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE); - qsfp_mask |= mask; - write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask); qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); @@ -9252,6 +9258,12 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, dd_dev_info(dd, "%s: QSFP cable temperature too low\n", __func__); + /* + * The remaining alarms/warnings don't matter if the link is down. + */ + if (ppd->host_link_state & HLS_DOWN) + return 0; + if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) dd_dev_info(dd, "%s: QSFP supply voltage too high\n", @@ -9346,9 +9358,8 @@ void qsfp_event(struct work_struct *work) return; /* - * Turn DC back on after cables has been - * re-inserted. Up until now, the DC has been in - * reset to save power. + * Turn DC back on after cable has been re-inserted. Up until + * now, the DC has been in reset to save power. */ dc_start(dd); @@ -9480,7 +9491,15 @@ int bringup_serdes(struct hfi1_pportdata *ppd) return ret; } - /* tune the SERDES to a ballpark setting for + get_port_type(ppd); + if (ppd->port_type == PORT_TYPE_QSFP) { + set_qsfp_int_n(ppd, 0); + wait_for_qsfp_init(ppd); + set_qsfp_int_n(ppd, 1); + } + + /* + * Tune the SerDes to a ballpark setting for * optimal signal and bit error rate * Needs to be done before starting the link */ @@ -10074,7 +10093,7 @@ u32 driver_physical_state(struct hfi1_pportdata *ppd) */ u32 driver_logical_state(struct hfi1_pportdata *ppd) { - if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP)) + if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) return IB_PORT_DOWN; switch (ppd->host_link_state & HLS_UP) { @@ -14578,7 +14597,7 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, (reason), (ret)) /* - * Initialize the Avago Thermal sensor. + * Initialize the thermal sensor. * * After initialization, enable polling of thermal sensor through * SBus interface. In order for this to work, the SBus Master diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 1948706fff1a..66a327978739 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -398,6 +398,12 @@ /* Lane ID for general configuration registers */ #define GENERAL_CONFIG 4 +/* LINK_TUNING_PARAMETERS fields */ +#define TUNING_METHOD_SHIFT 24 + +/* LINK_OPTIMIZATION_SETTINGS fields */ +#define ENABLE_EXT_DEV_CONFIG_SHIFT 24 + /* LOAD_DATA 8051 command shifts and fields */ #define LOAD_DATA_FIELD_ID_SHIFT 40 #define LOAD_DATA_FIELD_ID_MASK 0xfull diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index 8744de6667c2..8744de6667c2 100644 --- a/drivers/staging/rdma/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h index e9b6bb322025..fcc9c217a97a 100644 --- a/drivers/staging/rdma/hfi1/common.h +++ b/drivers/infiniband/hw/hfi1/common.h @@ -178,7 +178,8 @@ HFI1_CAP_PKEY_CHECK | \ HFI1_CAP_NO_INTEGRITY) -#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR) +#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \ + HFI1_USER_SWMINOR) #ifndef HFI1_KERN_TYPE #define HFI1_KERN_TYPE 0 @@ -349,6 +350,8 @@ struct hfi1_message_header { #define HFI1_BECN_MASK 1 #define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT) +#define HFI1_PSM_IOC_BASE_SEQ 0x0 + static inline __u64 rhf_to_cpu(const __le32 *rbuf) { return __le64_to_cpu(*((__le64 *)rbuf)); diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index dbab9d9cc288..dbab9d9cc288 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h index b6fb6814f1b8..b6fb6814f1b8 100644 --- a/drivers/staging/rdma/hfi1/debugfs.h +++ b/drivers/infiniband/hw/hfi1/debugfs.h diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c index c05c39da83b1..bf64b5a7bfd7 100644 --- a/drivers/staging/rdma/hfi1/device.c +++ b/drivers/infiniband/hw/hfi1/device.c @@ -60,7 +60,8 @@ static dev_t hfi1_dev; int hfi1_cdev_init(int minor, const char *name, const struct file_operations *fops, struct cdev *cdev, struct device **devp, - bool user_accessible) + bool user_accessible, + struct kobject *parent) { const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); struct device *device = NULL; @@ -68,6 +69,7 @@ int hfi1_cdev_init(int minor, const char *name, cdev_init(cdev, fops); cdev->owner = THIS_MODULE; + cdev->kobj.parent = parent; kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, dev, 1); @@ -82,13 +84,13 @@ int hfi1_cdev_init(int minor, const char *name, else device = device_create(class, NULL, dev, NULL, "%s", name); - if (!IS_ERR(device)) - goto done; - ret = PTR_ERR(device); - device = NULL; - pr_err("Could not create device for minor %d, %s (err %d)\n", - minor, name, -ret); - cdev_del(cdev); + if (IS_ERR(device)) { + ret = PTR_ERR(device); + device = NULL; + pr_err("Could not create device for minor %d, %s (err %d)\n", + minor, name, -ret); + cdev_del(cdev); + } done: *devp = device; return ret; diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h index 5bb3e83cf2da..c3ec19cb0ac9 100644 --- a/drivers/staging/rdma/hfi1/device.h +++ b/drivers/infiniband/hw/hfi1/device.h @@ -50,7 +50,8 @@ int hfi1_cdev_init(int minor, const char *name, const struct file_operations *fops, struct cdev *cdev, struct device **devp, - bool user_accessible); + bool user_accessible, + struct kobject *parent); void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); const char *class_name(void); int __init dev_init(void); diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c index 7e8dab892848..7e8dab892848 100644 --- a/drivers/staging/rdma/hfi1/dma.c +++ b/drivers/infiniband/hw/hfi1/dma.c diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 700c6fa3a633..c75b0ae688f8 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -1161,7 +1161,7 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) ppd->lmc = lmc; hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); - dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid); + dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); return 0; } diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index 106349fc1fb9..106349fc1fb9 100644 --- a/drivers/staging/rdma/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h index 94e9e70de568..94e9e70de568 100644 --- a/drivers/staging/rdma/hfi1/efivar.h +++ b/drivers/infiniband/hw/hfi1/efivar.h diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c new file mode 100644 index 000000000000..36b77943cbfd --- /dev/null +++ b/drivers/infiniband/hw/hfi1/eprom.c @@ -0,0 +1,102 @@ +/* + * Copyright(c) 2015, 2016 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include <linux/delay.h> +#include "hfi.h" +#include "common.h" +#include "eprom.h" + +#define CMD_SHIFT 24 +#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT)) + +/* controller interface speeds */ +#define EP_SPEED_FULL 0x2 /* full speed */ + +/* + * How long to wait for the EPROM to become available, in ms. + * The spec 32 Mb EPROM takes around 40s to erase then write. + * Double it for safety. + */ +#define EPROM_TIMEOUT 80000 /* ms */ +/* + * Initialize the EPROM handler. + */ +int eprom_init(struct hfi1_devdata *dd) +{ + int ret = 0; + + /* only the discrete chip has an EPROM */ + if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0) + return 0; + + /* + * It is OK if both HFIs reset the EPROM as long as they don't + * do it at the same time. + */ + ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); + if (ret) { + dd_dev_err(dd, + "%s: unable to acquire EPROM resource, no EPROM support\n", + __func__); + goto done_asic; + } + + /* reset EPROM to be sure it is in a good state */ + + /* set reset */ + write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK); + /* clear reset, set speed */ + write_csr(dd, ASIC_EEP_CTL_STAT, + EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); + + /* wake the device with command "release powerdown NoID" */ + write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); + + dd->eprom_available = true; + release_chip_resource(dd, CR_EPROM); +done_asic: + return ret; +} diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h index d41f0b1afb15..d41f0b1afb15 100644 --- a/drivers/staging/rdma/hfi1/eprom.h +++ b/drivers/infiniband/hw/hfi1/eprom.h diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index c1c5bf82addb..7a5b0e676cc7 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -72,8 +72,6 @@ */ static int hfi1_file_open(struct inode *, struct file *); static int hfi1_file_close(struct inode *, struct file *); -static ssize_t hfi1_file_write(struct file *, const char __user *, - size_t, loff_t *); static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *); static unsigned int hfi1_poll(struct file *, struct poll_table_struct *); static int hfi1_file_mmap(struct file *, struct vm_area_struct *); @@ -86,8 +84,7 @@ static int get_ctxt_info(struct file *, void __user *, __u32); static int get_base_info(struct file *, void __user *, __u32); static int setup_ctxt(struct file *); static int setup_subctxt(struct hfi1_ctxtdata *); -static int get_user_context(struct file *, struct hfi1_user_info *, - int, unsigned); +static int get_user_context(struct file *, struct hfi1_user_info *, int); static int find_shared_ctxt(struct file *, const struct hfi1_user_info *); static int allocate_ctxt(struct file *, struct hfi1_devdata *, struct hfi1_user_info *); @@ -97,13 +94,15 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); static int vma_fault(struct vm_area_struct *, struct vm_fault *); +static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg); static const struct file_operations hfi1_file_ops = { .owner = THIS_MODULE, - .write = hfi1_file_write, .write_iter = hfi1_write_iter, .open = hfi1_file_open, .release = hfi1_file_close, + .unlocked_ioctl = hfi1_file_ioctl, .poll = hfi1_poll, .mmap = hfi1_file_mmap, .llseek = noop_llseek, @@ -169,6 +168,13 @@ static inline int is_valid_mmap(u64 token) static int hfi1_file_open(struct inode *inode, struct file *fp) { + struct hfi1_devdata *dd = container_of(inode->i_cdev, + struct hfi1_devdata, + user_cdev); + + /* Just take a ref now. Not all opens result in a context assign */ + kobject_get(&dd->kobj); + /* The real work is performed later in assign_ctxt() */ fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL); if (fp->private_data) /* no cpu affinity by default */ @@ -176,127 +182,59 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) return fp->private_data ? 0 : -ENOMEM; } -static ssize_t hfi1_file_write(struct file *fp, const char __user *data, - size_t count, loff_t *offset) +static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) { - const struct hfi1_cmd __user *ucmd; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; - struct hfi1_cmd cmd; struct hfi1_user_info uinfo; struct hfi1_tid_info tinfo; + int ret = 0; unsigned long addr; - ssize_t consumed = 0, copy = 0, ret = 0; - void *dest = NULL; - __u64 user_val = 0; - int uctxt_required = 1; - int must_be_root = 0; - - /* FIXME: This interface cannot continue out of staging */ - if (WARN_ON_ONCE(!ib_safe_file_access(fp))) - return -EACCES; - - if (count < sizeof(cmd)) { - ret = -EINVAL; - goto bail; - } - - ucmd = (const struct hfi1_cmd __user *)data; - if (copy_from_user(&cmd, ucmd, sizeof(cmd))) { - ret = -EFAULT; - goto bail; - } - - consumed = sizeof(cmd); - - switch (cmd.type) { - case HFI1_CMD_ASSIGN_CTXT: - uctxt_required = 0; /* assigned user context not required */ - copy = sizeof(uinfo); - dest = &uinfo; - break; - case HFI1_CMD_SDMA_STATUS_UPD: - case HFI1_CMD_CREDIT_UPD: - copy = 0; - break; - case HFI1_CMD_TID_UPDATE: - case HFI1_CMD_TID_FREE: - case HFI1_CMD_TID_INVAL_READ: - copy = sizeof(tinfo); - dest = &tinfo; - break; - case HFI1_CMD_USER_INFO: - case HFI1_CMD_RECV_CTRL: - case HFI1_CMD_POLL_TYPE: - case HFI1_CMD_ACK_EVENT: - case HFI1_CMD_CTXT_INFO: - case HFI1_CMD_SET_PKEY: - case HFI1_CMD_CTXT_RESET: - copy = 0; - user_val = cmd.addr; - break; - case HFI1_CMD_EP_INFO: - case HFI1_CMD_EP_ERASE_CHIP: - case HFI1_CMD_EP_ERASE_RANGE: - case HFI1_CMD_EP_READ_RANGE: - case HFI1_CMD_EP_WRITE_RANGE: - uctxt_required = 0; /* assigned user context not required */ - must_be_root = 1; /* validate user */ - copy = 0; - break; - default: - ret = -EINVAL; - goto bail; - } - - /* If the command comes with user data, copy it. */ - if (copy) { - if (copy_from_user(dest, (void __user *)cmd.addr, copy)) { - ret = -EFAULT; - goto bail; - } - consumed += copy; - } - - /* - * Make sure there is a uctxt when needed. - */ - if (uctxt_required && !uctxt) { - ret = -EINVAL; - goto bail; - } + int uval = 0; + unsigned long ul_uval = 0; + u16 uval16 = 0; + + hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd); + if (cmd != HFI1_IOCTL_ASSIGN_CTXT && + cmd != HFI1_IOCTL_GET_VERS && + !uctxt) + return -EINVAL; - /* only root can do these operations */ - if (must_be_root && !capable(CAP_SYS_ADMIN)) { - ret = -EPERM; - goto bail; - } + switch (cmd) { + case HFI1_IOCTL_ASSIGN_CTXT: + if (copy_from_user(&uinfo, + (struct hfi1_user_info __user *)arg, + sizeof(uinfo))) + return -EFAULT; - switch (cmd.type) { - case HFI1_CMD_ASSIGN_CTXT: ret = assign_ctxt(fp, &uinfo); if (ret < 0) - goto bail; - ret = setup_ctxt(fp); + return ret; + setup_ctxt(fp); if (ret) - goto bail; + return ret; ret = user_init(fp); break; - case HFI1_CMD_CTXT_INFO: - ret = get_ctxt_info(fp, (void __user *)(unsigned long) - user_val, cmd.len); - break; - case HFI1_CMD_USER_INFO: - ret = get_base_info(fp, (void __user *)(unsigned long) - user_val, cmd.len); + case HFI1_IOCTL_CTXT_INFO: + ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg, + sizeof(struct hfi1_ctxt_info)); break; - case HFI1_CMD_SDMA_STATUS_UPD: + case HFI1_IOCTL_USER_INFO: + ret = get_base_info(fp, (void __user *)(unsigned long)arg, + sizeof(struct hfi1_base_info)); break; - case HFI1_CMD_CREDIT_UPD: + case HFI1_IOCTL_CREDIT_UPD: if (uctxt && uctxt->sc) sc_return_credits(uctxt->sc); break; - case HFI1_CMD_TID_UPDATE: + + case HFI1_IOCTL_TID_UPDATE: + if (copy_from_user(&tinfo, + (struct hfi11_tid_info __user *)arg, + sizeof(tinfo))) + return -EFAULT; + ret = hfi1_user_exp_rcv_setup(fp, &tinfo); if (!ret) { /* @@ -305,57 +243,82 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, * These fields are adjacent in the structure so * we can copy them at the same time. */ - addr = (unsigned long)cmd.addr + - offsetof(struct hfi1_tid_info, tidcnt); + addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt) + sizeof(tinfo.length))) ret = -EFAULT; } break; - case HFI1_CMD_TID_INVAL_READ: - ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); + + case HFI1_IOCTL_TID_FREE: + if (copy_from_user(&tinfo, + (struct hfi11_tid_info __user *)arg, + sizeof(tinfo))) + return -EFAULT; + + ret = hfi1_user_exp_rcv_clear(fp, &tinfo); if (ret) break; - addr = (unsigned long)cmd.addr + - offsetof(struct hfi1_tid_info, tidcnt); + addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; - case HFI1_CMD_TID_FREE: - ret = hfi1_user_exp_rcv_clear(fp, &tinfo); + + case HFI1_IOCTL_TID_INVAL_READ: + if (copy_from_user(&tinfo, + (struct hfi11_tid_info __user *)arg, + sizeof(tinfo))) + return -EFAULT; + + ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); if (ret) break; - addr = (unsigned long)cmd.addr + - offsetof(struct hfi1_tid_info, tidcnt); + addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; - case HFI1_CMD_RECV_CTRL: - ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val); + + case HFI1_IOCTL_RECV_CTRL: + ret = get_user(uval, (int __user *)arg); + if (ret != 0) + return -EFAULT; + ret = manage_rcvq(uctxt, fd->subctxt, uval); break; - case HFI1_CMD_POLL_TYPE: - uctxt->poll_type = (typeof(uctxt->poll_type))user_val; + + case HFI1_IOCTL_POLL_TYPE: + ret = get_user(uval, (int __user *)arg); + if (ret != 0) + return -EFAULT; + uctxt->poll_type = (typeof(uctxt->poll_type))uval; break; - case HFI1_CMD_ACK_EVENT: - ret = user_event_ack(uctxt, fd->subctxt, user_val); + + case HFI1_IOCTL_ACK_EVENT: + ret = get_user(ul_uval, (unsigned long __user *)arg); + if (ret != 0) + return -EFAULT; + ret = user_event_ack(uctxt, fd->subctxt, ul_uval); break; - case HFI1_CMD_SET_PKEY: + + case HFI1_IOCTL_SET_PKEY: + ret = get_user(uval16, (u16 __user *)arg); + if (ret != 0) + return -EFAULT; if (HFI1_CAP_IS_USET(PKEY_CHECK)) - ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val); + ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16); else - ret = -EPERM; + return -EPERM; break; - case HFI1_CMD_CTXT_RESET: { + + case HFI1_IOCTL_CTXT_RESET: { struct send_context *sc; struct hfi1_devdata *dd; - if (!uctxt || !uctxt->dd || !uctxt->sc) { - ret = -EINVAL; - break; - } + if (!uctxt || !uctxt->dd || !uctxt->sc) + return -EINVAL; + /* * There is no protection here. User level has to * guarantee that no one will be writing to the send @@ -373,10 +336,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, wait_event_interruptible_timeout( sc->halt_wait, (sc->flags & SCF_HALTED), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); - if (!(sc->flags & SCF_HALTED)) { - ret = -ENOLCK; - break; - } + if (!(sc->flags & SCF_HALTED)) + return -ENOLCK; + /* * If the send context was halted due to a Freeze, * wait until the device has been "unfrozen" before @@ -387,18 +349,16 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, dd->event_queue, !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); - if (dd->flags & HFI1_FROZEN) { - ret = -ENOLCK; - break; - } - if (dd->flags & HFI1_FORCED_FREEZE) { + if (dd->flags & HFI1_FROZEN) + return -ENOLCK; + + if (dd->flags & HFI1_FORCED_FREEZE) /* * Don't allow context reset if we are into * forced freeze */ - ret = -ENODEV; - break; - } + return -ENODEV; + sc_disable(sc); ret = sc_enable(sc); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, @@ -410,18 +370,17 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, sc_return_credits(sc); break; } - case HFI1_CMD_EP_INFO: - case HFI1_CMD_EP_ERASE_CHIP: - case HFI1_CMD_EP_ERASE_RANGE: - case HFI1_CMD_EP_READ_RANGE: - case HFI1_CMD_EP_WRITE_RANGE: - ret = handle_eprom_command(fp, &cmd); + + case HFI1_IOCTL_GET_VERS: + uval = HFI1_USER_SWVERSION; + if (put_user(uval, (int __user *)arg)) + return -EFAULT; break; + + default: + return -EINVAL; } - if (ret >= 0) - ret = consumed; -bail: return ret; } @@ -738,7 +697,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) { struct hfi1_filedata *fdata = fp->private_data; struct hfi1_ctxtdata *uctxt = fdata->uctxt; - struct hfi1_devdata *dd; + struct hfi1_devdata *dd = container_of(inode->i_cdev, + struct hfi1_devdata, + user_cdev); unsigned long flags, *ev; fp->private_data = NULL; @@ -747,7 +708,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) goto done; hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); - dd = uctxt->dd; mutex_lock(&hfi1_mutex); flush_wc(); @@ -813,6 +773,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) mutex_unlock(&hfi1_mutex); hfi1_free_ctxtdata(dd, uctxt); done: + kobject_put(&dd->kobj); kfree(fdata); return 0; } @@ -836,7 +797,7 @@ static u64 kvirt_to_phys(void *addr) static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) { int i_minor, ret = 0; - unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS; + unsigned int swmajor, swminor; swmajor = uinfo->userversion >> 16; if (swmajor != HFI1_USER_SWMAJOR) { @@ -846,9 +807,6 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) swminor = uinfo->userversion & 0xffff; - if (uinfo->hfi1_alg < HFI1_ALG_COUNT) - alg = uinfo->hfi1_alg; - mutex_lock(&hfi1_mutex); /* First, lets check if we need to setup a shared context? */ if (uinfo->subctxt_cnt) { @@ -868,7 +826,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) */ if (!ret) { i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; - ret = get_user_context(fp, uinfo, i_minor - 1, alg); + ret = get_user_context(fp, uinfo, i_minor); } done_unlock: mutex_unlock(&hfi1_mutex); @@ -876,71 +834,26 @@ done: return ret; } -/* return true if the device available for general use */ -static int usable_device(struct hfi1_devdata *dd) -{ - struct hfi1_pportdata *ppd = dd->pport; - - return driver_lstate(ppd) == IB_PORT_ACTIVE; -} - static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo, - int devno, unsigned alg) + int devno) { struct hfi1_devdata *dd = NULL; - int ret = 0, devmax, npresent, nup, dev; + int devmax, npresent, nup; devmax = hfi1_count_units(&npresent, &nup); - if (!npresent) { - ret = -ENXIO; - goto done; - } - if (!nup) { - ret = -ENETDOWN; - goto done; - } - if (devno >= 0) { - dd = hfi1_lookup(devno); - if (!dd) - ret = -ENODEV; - else if (!dd->freectxts) - ret = -EBUSY; - } else { - struct hfi1_devdata *pdd; - - if (alg == HFI1_ALG_ACROSS) { - unsigned free = 0U; - - for (dev = 0; dev < devmax; dev++) { - pdd = hfi1_lookup(dev); - if (!pdd) - continue; - if (!usable_device(pdd)) - continue; - if (pdd->freectxts && - pdd->freectxts > free) { - dd = pdd; - free = pdd->freectxts; - } - } - } else { - for (dev = 0; dev < devmax; dev++) { - pdd = hfi1_lookup(dev); - if (!pdd) - continue; - if (!usable_device(pdd)) - continue; - if (pdd->freectxts) { - dd = pdd; - break; - } - } - } - if (!dd) - ret = -EBUSY; - } -done: - return ret ? ret : allocate_ctxt(fp, dd, uinfo); + if (!npresent) + return -ENXIO; + + if (!nup) + return -ENETDOWN; + + dd = hfi1_lookup(devno); + if (!dd) + return -ENODEV; + else if (!dd->freectxts) + return -EBUSY; + + return allocate_ctxt(fp, dd, uinfo); } static int find_shared_ctxt(struct file *fp, @@ -1546,170 +1459,10 @@ done: return ret; } -static int ui_open(struct inode *inode, struct file *filp) -{ - struct hfi1_devdata *dd; - - dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev); - filp->private_data = dd; /* for other methods */ - return 0; -} - -static int ui_release(struct inode *inode, struct file *filp) -{ - /* nothing to do */ - return 0; -} - -static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) -{ - struct hfi1_devdata *dd = filp->private_data; - - return fixed_size_llseek(filp, offset, whence, - (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE); -} - -/* NOTE: assumes unsigned long is 8 bytes */ -static ssize_t ui_read(struct file *filp, char __user *buf, size_t count, - loff_t *f_pos) -{ - struct hfi1_devdata *dd = filp->private_data; - void __iomem *base = dd->kregbase; - unsigned long total, csr_off, - barlen = (dd->kregend - dd->kregbase); - u64 data; - - /* only read 8 byte quantities */ - if ((count % 8) != 0) - return -EINVAL; - /* offset must be 8-byte aligned */ - if ((*f_pos % 8) != 0) - return -EINVAL; - /* destination buffer must be 8-byte aligned */ - if ((unsigned long)buf % 8 != 0) - return -EINVAL; - /* must be in range */ - if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE)) - return -EINVAL; - /* only set the base if we are not starting past the BAR */ - if (*f_pos < barlen) - base += *f_pos; - csr_off = *f_pos; - for (total = 0; total < count; total += 8, csr_off += 8) { - /* accessing LCB CSRs requires more checks */ - if (is_lcb_offset(csr_off)) { - if (read_lcb_csr(dd, csr_off, (u64 *)&data)) - break; /* failed */ - } - /* - * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a - * false parity error. Avoid the whole issue by not reading - * them. These registers are defined as having a read value - * of 0. - */ - else if (csr_off == ASIC_GPIO_CLEAR || - csr_off == ASIC_GPIO_FORCE || - csr_off == ASIC_QSFP1_CLEAR || - csr_off == ASIC_QSFP1_FORCE || - csr_off == ASIC_QSFP2_CLEAR || - csr_off == ASIC_QSFP2_FORCE) - data = 0; - else if (csr_off >= barlen) { - /* - * read_8051_data can read more than just 8 bytes at - * a time. However, folding this into the loop and - * handling the reads in 8 byte increments allows us - * to smoothly transition from chip memory to 8051 - * memory. - */ - if (read_8051_data(dd, - (u32)(csr_off - barlen), - sizeof(data), &data)) - break; /* failed */ - } else - data = readq(base + total); - if (put_user(data, (unsigned long __user *)(buf + total))) - break; - } - *f_pos += total; - return total; -} - -/* NOTE: assumes unsigned long is 8 bytes */ -static ssize_t ui_write(struct file *filp, const char __user *buf, - size_t count, loff_t *f_pos) -{ - struct hfi1_devdata *dd = filp->private_data; - void __iomem *base; - unsigned long total, data, csr_off; - int in_lcb; - - /* only write 8 byte quantities */ - if ((count % 8) != 0) - return -EINVAL; - /* offset must be 8-byte aligned */ - if ((*f_pos % 8) != 0) - return -EINVAL; - /* source buffer must be 8-byte aligned */ - if ((unsigned long)buf % 8 != 0) - return -EINVAL; - /* must be in range */ - if (*f_pos + count > dd->kregend - dd->kregbase) - return -EINVAL; - - base = (void __iomem *)dd->kregbase + *f_pos; - csr_off = *f_pos; - in_lcb = 0; - for (total = 0; total < count; total += 8, csr_off += 8) { - if (get_user(data, (unsigned long __user *)(buf + total))) - break; - /* accessing LCB CSRs requires a special procedure */ - if (is_lcb_offset(csr_off)) { - if (!in_lcb) { - int ret = acquire_lcb_access(dd, 1); - - if (ret) - break; - in_lcb = 1; - } - } else { - if (in_lcb) { - release_lcb_access(dd, 1); - in_lcb = 0; - } - } - writeq(data, base + total); - } - if (in_lcb) - release_lcb_access(dd, 1); - *f_pos += total; - return total; -} - -static const struct file_operations ui_file_ops = { - .owner = THIS_MODULE, - .llseek = ui_lseek, - .read = ui_read, - .write = ui_write, - .open = ui_open, - .release = ui_release, -}; - -#define UI_OFFSET 192 /* device minor offset for UI devices */ -static int create_ui = 1; - -static struct cdev wildcard_cdev; -static struct device *wildcard_device; - -static atomic_t user_count = ATOMIC_INIT(0); - static void user_remove(struct hfi1_devdata *dd) { - if (atomic_dec_return(&user_count) == 0) - hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device); hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); - hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device); } static int user_add(struct hfi1_devdata *dd) @@ -1717,34 +1470,13 @@ static int user_add(struct hfi1_devdata *dd) char name[10]; int ret; - if (atomic_inc_return(&user_count) == 1) { - ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops, - &wildcard_cdev, &wildcard_device, - true); - if (ret) - goto done; - } - snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); - ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, + ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, &dd->user_cdev, &dd->user_device, - true); + true, &dd->kobj); if (ret) - goto done; + user_remove(dd); - if (create_ui) { - snprintf(name, sizeof(name), - "%s_ui%d", class_name(), dd->unit); - ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops, - &dd->ui_cdev, &dd->ui_device, - false); - if (ret) - goto done; - } - - return 0; -done: - user_remove(dd); return ret; } @@ -1753,13 +1485,7 @@ done: */ int hfi1_device_create(struct hfi1_devdata *dd) { - int r, ret; - - r = user_add(dd); - ret = hfi1_diag_add(dd); - if (r && !ret) - ret = r; - return ret; + return user_add(dd); } /* @@ -1769,5 +1495,4 @@ int hfi1_device_create(struct hfi1_devdata *dd) void hfi1_device_remove(struct hfi1_devdata *dd) { user_remove(dd); - hfi1_diag_remove(dd); } diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index ed680fda611d..ed680fda611d 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 7b78d56de7f5..4417a0fd3ef9 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -453,6 +453,7 @@ struct rvt_sge_state; #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP) #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE) +#define HLS_DOWN ~(HLS_UP) /* use this MTU size if none other is given */ #define HFI1_DEFAULT_ACTIVE_MTU 10240 @@ -1168,6 +1169,7 @@ struct hfi1_devdata { atomic_t aspm_disabled_cnt; struct hfi1_affinity *affinity; + struct kobject kobj; }; /* 8051 firmware version helper */ @@ -1882,9 +1884,8 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) get_unit_name((dd)->unit), ##__VA_ARGS__) #define hfi1_dev_porterr(dd, port, fmt, ...) \ - dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ - get_unit_name((dd)->unit), (dd)->unit, (port), \ - ##__VA_ARGS__) + dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \ + get_unit_name((dd)->unit), (port), ##__VA_ARGS__) /* * this is used for formatting hw error messages... diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 502b7cf4647d..5cc492e5776d 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -732,12 +732,12 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) lastfail = hfi1_create_rcvhdrq(dd, rcd); if (!lastfail) lastfail = hfi1_setup_eagerbufs(rcd); - if (lastfail) + if (lastfail) { dd_dev_err(dd, "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); + ret = lastfail; + } } - if (lastfail) - ret = lastfail; /* Allocate enough memory for user event notification. */ len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * @@ -989,8 +989,10 @@ static void release_asic_data(struct hfi1_devdata *dd) dd->asic_data = NULL; } -void hfi1_free_devdata(struct hfi1_devdata *dd) +static void __hfi1_free_devdata(struct kobject *kobj) { + struct hfi1_devdata *dd = + container_of(kobj, struct hfi1_devdata, kobj); unsigned long flags; spin_lock_irqsave(&hfi1_devs_lock, flags); @@ -1007,6 +1009,15 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) rvt_dealloc_device(&dd->verbs_dev.rdi); } +static struct kobj_type hfi1_devdata_type = { + .release = __hfi1_free_devdata, +}; + +void hfi1_free_devdata(struct hfi1_devdata *dd) +{ + kobject_put(&dd->kobj); +} + /* * Allocate our primary per-unit data structure. Must be done via verbs * allocator, because the verbs cleanup process both does cleanup and @@ -1102,6 +1113,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) &pdev->dev, "Could not alloc cpulist info, cpu affinity might be wrong\n"); } + kobject_init(&dd->kobj, &hfi1_devdata_type); return dd; bail: @@ -1300,7 +1312,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) spin_lock(&ppd->cc_state_lock); cc_state = get_cc_state(ppd); - rcu_assign_pointer(ppd->cc_state, NULL); + RCU_INIT_POINTER(ppd->cc_state, NULL); spin_unlock(&ppd->cc_state_lock); if (cc_state) diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c index 65348d16ab2f..65348d16ab2f 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/infiniband/hw/hfi1/intr.c diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h index 2ec6ef38d389..2ec6ef38d389 100644 --- a/drivers/staging/rdma/hfi1/iowait.h +++ b/drivers/infiniband/hw/hfi1/iowait.h diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index ed58cf21e790..219029576ba0 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -1403,6 +1403,12 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) if (key == okey) continue; /* + * Don't update pkeys[2], if an HFI port without MgmtAllowed + * by neighbor is a switch. + */ + if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1) + continue; + /* * The SM gives us the complete PKey table. We have * to ensure that we put the PKeys in the matching * slots. @@ -3363,6 +3369,50 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, return reply((struct ib_mad_hdr *)smp); } +/* + * Apply congestion control information stored in the ppd to the + * active structure. + */ +static void apply_cc_state(struct hfi1_pportdata *ppd) +{ + struct cc_state *old_cc_state, *new_cc_state; + + new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); + if (!new_cc_state) + return; + + /* + * Hold the lock for updating *and* to prevent ppd information + * from changing during the update. + */ + spin_lock(&ppd->cc_state_lock); + + old_cc_state = get_cc_state(ppd); + if (!old_cc_state) { + /* never active, or shutting down */ + spin_unlock(&ppd->cc_state_lock); + kfree(new_cc_state); + return; + } + + *new_cc_state = *old_cc_state; + + new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1; + memcpy(new_cc_state->cct.entries, ppd->ccti_entries, + ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)); + + new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED; + new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map; + memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries, + OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry)); + + rcu_assign_pointer(ppd->cc_state, new_cc_state); + + spin_unlock(&ppd->cc_state_lock); + + call_rcu(&old_cc_state->rcu, cc_state_reclaim); +} + static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, u32 *resp_len) @@ -3374,6 +3424,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, struct opa_congestion_setting_entry_shadow *entries; int i; + /* + * Save details from packet into the ppd. Hold the cc_state_lock so + * our information is consistent with anyone trying to apply the state. + */ + spin_lock(&ppd->cc_state_lock); ppd->cc_sl_control_map = be32_to_cpu(p->control_map); entries = ppd->congestion_entries; @@ -3384,6 +3439,10 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, p->entries[i].trigger_threshold; entries[i].ccti_min = p->entries[i].ccti_min; } + spin_unlock(&ppd->cc_state_lock); + + /* now apply the information */ + apply_cc_state(ppd); return __subn_get_opa_cong_setting(smp, am, data, ibdev, port, resp_len); @@ -3526,7 +3585,6 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, int i, j; u32 sentry, eentry; u16 ccti_limit; - struct cc_state *old_cc_state, *new_cc_state; /* sanity check n_blocks, start_block */ if (n_blocks == 0 || @@ -3546,45 +3604,20 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); - if (!new_cc_state) - goto getit; - + /* + * Save details from packet into the ppd. Hold the cc_state_lock so + * our information is consistent with anyone trying to apply the state. + */ spin_lock(&ppd->cc_state_lock); - - old_cc_state = get_cc_state(ppd); - - if (!old_cc_state) { - spin_unlock(&ppd->cc_state_lock); - kfree(new_cc_state); - return reply((struct ib_mad_hdr *)smp); - } - - *new_cc_state = *old_cc_state; - - new_cc_state->cct.ccti_limit = ccti_limit; - - entries = ppd->ccti_entries; ppd->total_cct_entry = ccti_limit + 1; - + entries = ppd->ccti_entries; for (j = 0, i = sentry; i < eentry; j++, i++) entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry); - - memcpy(new_cc_state->cct.entries, entries, - eentry * sizeof(struct ib_cc_table_entry)); - - new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED; - new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map; - memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries, - OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry)); - - rcu_assign_pointer(ppd->cc_state, new_cc_state); - spin_unlock(&ppd->cc_state_lock); - call_rcu(&old_cc_state->rcu, cc_state_reclaim); + /* now apply the information */ + apply_cc_state(ppd); -getit: return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len); } diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 55ee08675333..55ee08675333 100644 --- a/drivers/staging/rdma/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 2b0e91d3093d..b7a80aa1ae30 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -45,6 +45,7 @@ * */ #include <linux/list.h> +#include <linux/rculist.h> #include <linux/mmu_notifier.h> #include <linux/interval_tree_generic.h> @@ -97,7 +98,6 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node) int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) { struct mmu_rb_handler *handlr; - unsigned long flags; if (!ops->invalidate) return -EINVAL; @@ -111,9 +111,9 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) INIT_HLIST_NODE(&handlr->mn.hlist); spin_lock_init(&handlr->lock); handlr->mn.ops = &mn_opts; - spin_lock_irqsave(&mmu_rb_lock, flags); - list_add_tail(&handlr->list, &mmu_rb_handlers); - spin_unlock_irqrestore(&mmu_rb_lock, flags); + spin_lock(&mmu_rb_lock); + list_add_tail_rcu(&handlr->list, &mmu_rb_handlers); + spin_unlock(&mmu_rb_lock); return mmu_notifier_register(&handlr->mn, current->mm); } @@ -130,9 +130,10 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) if (current->mm) mmu_notifier_unregister(&handler->mn, current->mm); - spin_lock_irqsave(&mmu_rb_lock, flags); - list_del(&handler->list); - spin_unlock_irqrestore(&mmu_rb_lock, flags); + spin_lock(&mmu_rb_lock); + list_del_rcu(&handler->list); + spin_unlock(&mmu_rb_lock); + synchronize_rcu(); spin_lock_irqsave(&handler->lock, flags); if (!RB_EMPTY_ROOT(root)) { @@ -271,16 +272,15 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) { struct mmu_rb_handler *handler; - unsigned long flags; - spin_lock_irqsave(&mmu_rb_lock, flags); - list_for_each_entry(handler, &mmu_rb_handlers, list) { + rcu_read_lock(); + list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) { if (handler->root == root) goto unlock; } handler = NULL; unlock: - spin_unlock_irqrestore(&mmu_rb_lock, flags); + rcu_read_unlock(); return handler; } diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h index 7a57b9c49d27..7a57b9c49d27 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/infiniband/hw/hfi1/mmu_rb.h diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h index 6ef3c1cbdcd7..6ef3c1cbdcd7 100644 --- a/drivers/staging/rdma/hfi1/opa_compat.h +++ b/drivers/infiniband/hw/hfi1/opa_compat.h diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 0bac21e6a658..0bac21e6a658 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index c67b9ad3fcf4..d5edb1afbb8f 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -1835,8 +1835,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) struct pio_vl_map *oldmap, *newmap; if (!vl_scontexts) { - /* send context 0 reserved for VL15 */ - for (i = 1; i < dd->num_send_contexts; i++) + for (i = 0; i < dd->num_send_contexts; i++) if (dd->send_contexts[i].type == SC_KERNEL) num_kernel_send_contexts++; /* truncate divide */ diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index 53a08edb7f64..464cbd27b975 100644 --- a/drivers/staging/rdma/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h @@ -49,10 +49,10 @@ /* send context types */ #define SC_KERNEL 0 -#define SC_ACK 1 -#define SC_USER 2 -#define SC_VL15 3 -#define SC_MAX 4 +#define SC_VL15 1 +#define SC_ACK 2 +#define SC_USER 3 /* must be the last one: it may take all left */ +#define SC_MAX 4 /* count of send context types */ /* invalid send context index */ #define INVALID_SCI 0xff diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index 8c25e1b58849..8c25e1b58849 100644 --- a/drivers/staging/rdma/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index 8fe8a205b5bb..03df9322f862 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -87,6 +87,17 @@ void free_platform_config(struct hfi1_devdata *dd) */ } +void get_port_type(struct hfi1_pportdata *ppd) +{ + int ret; + + ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_PORT_TYPE, &ppd->port_type, + 4); + if (ret) + ppd->port_type = PORT_TYPE_UNKNOWN; +} + int set_qsfp_tx(struct hfi1_pportdata *ppd, int on) { u8 tx_ctrl_byte = on ? 0x0 : 0xF; @@ -529,7 +540,8 @@ static void apply_tunings( /* Enable external device config if channel is limiting active */ read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, GENERAL_CONFIG, &config_data); - config_data |= limiting_active; + config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT); + config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT); ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, GENERAL_CONFIG, config_data); if (ret != HCMD_SUCCESS) @@ -542,7 +554,8 @@ static void apply_tunings( /* Pass tuning method to 8051 */ read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, &config_data); - config_data |= tuning_method; + config_data &= ~(0xff << TUNING_METHOD_SHIFT); + config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT); ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, config_data); if (ret != HCMD_SUCCESS) @@ -564,8 +577,8 @@ static void apply_tunings( ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, &config_data); /* Clear, then set the external device config field */ - config_data &= ~(0xFF << 24); - config_data |= (external_device_config << 24); + config_data &= ~(u32)0xFF; + config_data |= external_device_config; ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, config_data); if (ret != HCMD_SUCCESS) @@ -784,12 +797,6 @@ void tune_serdes(struct hfi1_pportdata *ppd) return; } - ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, - PORT_TABLE_PORT_TYPE, &ppd->port_type, - 4); - if (ret) - ppd->port_type = PORT_TYPE_UNKNOWN; - switch (ppd->port_type) { case PORT_TYPE_DISCONNECTED: ppd->offline_disabled_reason = diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h index 19620cf546d5..e2c21613c326 100644 --- a/drivers/staging/rdma/hfi1/platform.h +++ b/drivers/infiniband/hw/hfi1/platform.h @@ -298,6 +298,7 @@ enum link_tuning_encoding { /* platform.c */ void get_platform_config(struct hfi1_devdata *dd); void free_platform_config(struct hfi1_devdata *dd); +void get_port_type(struct hfi1_pportdata *ppd); int set_qsfp_tx(struct hfi1_pportdata *ppd, int on); void tune_serdes(struct hfi1_pportdata *ppd); diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 91eb42316df9..1a942ffba4cb 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -49,7 +49,6 @@ #include <linux/vmalloc.h> #include <linux/hash.h> #include <linux/module.h> -#include <linux/random.h> #include <linux/seq_file.h> #include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h> @@ -161,9 +160,6 @@ static inline int opa_mtu_enum_to_int(int mtu) * This function is what we would push to the core layer if we wanted to be a * "first class citizen". Instead we hide this here and rely on Verbs ULPs * to blindly pass the MTU enum value from the PathRecord to us. - * - * The actual flag used to determine "8k MTU" will change and is currently - * unknown. */ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) { @@ -516,6 +512,7 @@ static void iowait_wakeup(struct iowait *wait, int reason) static void iowait_sdma_drained(struct iowait *wait) { struct rvt_qp *qp = iowait_to_qp(wait); + unsigned long flags; /* * This happens when the send engine notes @@ -523,12 +520,12 @@ static void iowait_sdma_drained(struct iowait *wait) * do the flush work until that QP's * sdma work has finished. */ - spin_lock(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & RVT_S_WAIT_DMA) { qp->s_flags &= ~RVT_S_WAIT_DMA; hfi1_schedule_send(qp); } - spin_unlock(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); } /** diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h index e7bc8d6cf681..e7bc8d6cf681 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/infiniband/hw/hfi1/qp.h diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 2441669f0817..2441669f0817 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h index dadc66c442b9..dadc66c442b9 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/infiniband/hw/hfi1/qsfp.h diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 792f15eb8efe..792f15eb8efe 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index a659aec3c3c6..a659aec3c3c6 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index abb8ebc1fcac..f9befc05b349 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -134,6 +134,7 @@ static const char * const sdma_state_names[] = { [sdma_state_s99_running] = "s99_Running", }; +#ifdef CONFIG_SDMA_VERBOSITY static const char * const sdma_event_names[] = { [sdma_event_e00_go_hw_down] = "e00_GoHwDown", [sdma_event_e10_go_hw_start] = "e10_GoHwStart", @@ -150,6 +151,7 @@ static const char * const sdma_event_names[] = { [sdma_event_e85_link_down] = "e85_LinkDown", [sdma_event_e90_sw_halted] = "e90_SwHalted", }; +#endif static const struct sdma_set_state_action sdma_action_table[] = { [sdma_state_s00_hw_down] = { @@ -376,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde, sdma_txclean(sde->dd, tx); if (complete) (*complete)(tx, res); - if (iowait_sdma_dec(wait) && wait) + if (wait && iowait_sdma_dec(wait)) iowait_drain_wakeup(wait); } diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 8f50c99fe711..8f50c99fe711 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h index bf7d777d756e..bf7d777d756e 100644 --- a/drivers/staging/rdma/hfi1/sdma_txreq.h +++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 8cd6df8634ad..91fc2aed6aed 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -721,8 +721,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, } dd_dev_info(dd, - "IB%u: Congestion Control Agent enabled for port %d\n", - dd->unit, port_num); + "Congestion Control Agent enabled for port %d\n", + port_num); return 0; diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 8b62fefcf903..79b2952c0dfb 100644 --- a/drivers/staging/rdma/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c @@ -66,6 +66,7 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr) #define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x" #define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x" #define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x" +#define IETH_PRN "ieth rkey 0x%.8x" #define ATOMICACKETH_PRN "origdata %lld" #define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld" @@ -166,6 +167,12 @@ const char *parse_everbs_hdrs( be32_to_cpu(eh->ud.deth[0]), be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); break; + /* ieth */ + case OP(RC, SEND_LAST_WITH_INVALIDATE): + case OP(RC, SEND_ONLY_WITH_INVALIDATE): + trace_seq_printf(p, IETH_PRN, + be32_to_cpu(eh->ieth)); + break; } trace_seq_putc(p, 0); return ret; @@ -233,3 +240,4 @@ __hfi1_trace_fn(FIRMWARE); __hfi1_trace_fn(RCVCTRL); __hfi1_trace_fn(TID); __hfi1_trace_fn(MMU); +__hfi1_trace_fn(IOCTL); diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h index 963dc948c38a..28c1d0832886 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/infiniband/hw/hfi1/trace.h @@ -74,8 +74,8 @@ __print_symbolic(etype, \ TRACE_EVENT(hfi1_rcvhdr, TP_PROTO(struct hfi1_devdata *dd, - u64 eflags, u32 ctxt, + u64 eflags, u32 etype, u32 hlen, u32 tlen, @@ -392,6 +392,8 @@ __print_symbolic(opcode, \ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ ib_opcode_name(RC_COMPARE_SWAP), \ ib_opcode_name(RC_FETCH_ADD), \ + ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \ + ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \ ib_opcode_name(UC_SEND_FIRST), \ ib_opcode_name(UC_SEND_MIDDLE), \ ib_opcode_name(UC_SEND_LAST), \ @@ -1341,6 +1343,7 @@ __hfi1_trace_def(FIRMWARE); __hfi1_trace_def(RCVCTRL); __hfi1_trace_def(TID); __hfi1_trace_def(MMU); +__hfi1_trace_def(IOCTL); #define hfi1_cdbg(which, fmt, ...) \ __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__) diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/infiniband/hw/hfi1/twsi.c index e82e52a63d35..e82e52a63d35 100644 --- a/drivers/staging/rdma/hfi1/twsi.c +++ b/drivers/infiniband/hw/hfi1/twsi.c diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/infiniband/hw/hfi1/twsi.h index 5b8a5b5e7eae..5b8a5b5e7eae 100644 --- a/drivers/staging/rdma/hfi1/twsi.h +++ b/drivers/infiniband/hw/hfi1/twsi.h diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index df773d433297..df773d433297 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 1e503ad0bebb..1e503ad0bebb 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index 1b640a35b3fe..1b640a35b3fe 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h index 9bc8d9fba87e..9bc8d9fba87e 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 88e10b5f55f1..88e10b5f55f1 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 0014c9c0e967..29f4795f866c 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -166,6 +166,8 @@ static unsigned initial_pkt_count = 8; #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ +struct sdma_mmu_node; + struct user_sdma_iovec { struct list_head list; struct iovec iov; @@ -178,6 +180,7 @@ struct user_sdma_iovec { * which we last left off. */ u64 offset; + struct sdma_mmu_node *node; }; #define SDMA_CACHE_NODE_EVICT BIT(0) @@ -507,6 +510,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, struct sdma_req_info info; struct user_sdma_request *req; u8 opcode, sc, vl; + int req_queued = 0; if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { hfi1_cdbg( @@ -703,6 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); atomic_inc(&pq->n_reqs); + req_queued = 1; /* Send the first N packets in the request to buy us some time */ ret = user_sdma_send_pkts(req, pcount); if (unlikely(ret < 0 && ret != -EBUSY)) { @@ -747,7 +752,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, return 0; free_req: user_sdma_free_request(req, true); - pq_update(pq); + if (req_queued) + pq_update(pq); set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); return ret; } @@ -1153,6 +1159,7 @@ retry: } iovec->pages = node->pages; iovec->npages = npages; + iovec->node = node; ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb); if (ret) { @@ -1519,18 +1526,13 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) } if (req->data_iovs) { struct sdma_mmu_node *node; - struct mmu_rb_node *mnode; int i; for (i = 0; i < req->data_iovs; i++) { - mnode = hfi1_mmu_rb_search( - &req->pq->sdma_rb_root, - (unsigned long)req->iovs[i].iov.iov_base, - req->iovs[i].iov.iov_len); - if (!mnode || IS_ERR(mnode)) + node = req->iovs[i].node; + if (!node) continue; - node = container_of(mnode, struct sdma_mmu_node, rb); if (unpin) hfi1_mmu_rb_remove(&req->pq->sdma_rb_root, &node->rb); diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index b9240e351161..b9240e351161 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 9cdc85fa366f..849c4b9399d4 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -52,7 +52,6 @@ #include <linux/utsname.h> #include <linux/rculist.h> #include <linux/mm.h> -#include <linux/random.h> #include <linux/vmalloc.h> #include "hfi.h" @@ -336,6 +335,8 @@ const u8 hdr_len_by_opcode[256] = { [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4, [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28, [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, + [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4, + [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4, /* UC */ [IB_OPCODE_UC_SEND_FIRST] = 12 + 8, [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, @@ -946,7 +947,6 @@ static int pio_wait(struct rvt_qp *qp, dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN); - dev->n_piowait++; qp->s_flags |= flag; was_empty = list_empty(&sc->piowait); list_add_tail(&priv->s_iowait.list, &sc->piowait); diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index 3ee223983b20..488356775627 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h @@ -152,6 +152,7 @@ union ib_ehdrs { } at; __be32 imm_data; __be32 aeth; + __be32 ieth; struct ib_atomic_eth atomic_eth; } __packed; diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index bc95c4112c61..bc95c4112c61 100644 --- a/drivers/staging/rdma/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1cf69b2fe4a5..1cf69b2fe4a5 100644 --- a/drivers/staging/rdma/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 4a740f7a0519..02a735b64208 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2361,58 +2361,130 @@ static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num, return 0; } +static const char * const i40iw_hw_stat_names[] = { + // 32bit names + [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards", + [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts", + [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes", + [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards", + [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts", + [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes", + [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs", + [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors", + [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors", + // 64bit names + [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InOctets", + [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InPkts", + [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InReasmRqd", + [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InMcastPkts", + [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutOctets", + [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutPkts", + [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutSegRqd", + [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutMcastPkts", + [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InOctets", + [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InPkts", + [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InReasmRqd", + [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InMcastPkts", + [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutOctets", + [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutPkts", + [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutSegRqd", + [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutMcastPkts", + [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] = + "tcpInSegs", + [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] = + "tcpOutSegs", + [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwInRdmaReads", + [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwInRdmaSends", + [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwInRdmaWrites", + [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwOutRdmaReads", + [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwOutRdmaSends", + [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwOutRdmaWrites", + [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] = + "iwRdmaBnd", + [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] = + "iwRdmaInv" +}; + /** - * i40iw_get_protocol_stats - Populates the rdma_stats structure - * @ibdev: ib dev struct - * @stats: iw protocol stats struct + * i40iw_alloc_hw_stats - Allocate a hw stats structure + * @ibdev: device pointer from stack + * @port_num: port number */ -static int i40iw_get_protocol_stats(struct ib_device *ibdev, - union rdma_protocol_stats *stats) +static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev, + u8 port_num) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + int num_counters = I40IW_HW_STAT_INDEX_MAX_32 + + I40IW_HW_STAT_INDEX_MAX_64; + unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; + + BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) != + (I40IW_HW_STAT_INDEX_MAX_32 + + I40IW_HW_STAT_INDEX_MAX_64)); + + /* + * PFs get the default update lifespan, but VFs only update once + * per second + */ + if (!dev->is_pf) + lifespan = 1000; + return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters, + lifespan); +} + +/** + * i40iw_get_hw_stats - Populates the rdma_hw_stats structure + * @ibdev: device pointer from stack + * @stats: stats pointer from stack + * @port_num: port number + * @index: which hw counter the stack is requesting we update + */ +static int i40iw_get_hw_stats(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + u8 port_num, int index) { struct i40iw_device *iwdev = to_iwdev(ibdev); struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_dev_pestat *devstat = &dev->dev_pestat; struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; - struct timespec curr_time; - static struct timespec last_rd_time = {0, 0}; unsigned long flags; - curr_time = current_kernel_time(); - memset(stats, 0, sizeof(*stats)); - if (dev->is_pf) { spin_lock_irqsave(&devstat->stats_lock, flags); devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats); spin_unlock_irqrestore(&devstat->stats_lock, flags); } else { - if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1) - if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats)) - return -ENOSYS; + if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats)) + return -ENOSYS; } - stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] + - hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS]; - stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] + - hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC]; - stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] + - hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD]; - stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] + - hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE]; - stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] + - hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS]; - stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] + - hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS]; - stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] + - hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS]; - stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] + - hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS]; - stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG]; - stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS]; - stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG]; - - last_rd_time = curr_time; - return 0; + memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats)); + + return stats->num_counters; } /** @@ -2551,7 +2623,8 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr; iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr; iwibdev->ibdev.dereg_mr = i40iw_dereg_mr; - iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats; + iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats; + iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats; iwibdev->ibdev.query_device = i40iw_query_device; iwibdev->ibdev.create_ah = i40iw_create_ah; iwibdev->ibdev.destroy_ah = i40iw_destroy_ah; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 82d7c4bf5970..ce4034071f9c 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -1308,21 +1308,6 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { SYM_LSB(IntMask, fldname##17IntMask)), \ .msg = #fldname "_C", .sz = sizeof(#fldname "_C") } -static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { - INTR_AUTO_P(SDmaInt), - INTR_AUTO_P(SDmaProgressInt), - INTR_AUTO_P(SDmaIdleInt), - INTR_AUTO_P(SDmaCleanupDone), - INTR_AUTO_C(RcvUrg), - INTR_AUTO_P(ErrInt), - INTR_AUTO(ErrInt), /* non-port-specific errs */ - INTR_AUTO(AssertGPIOInt), - INTR_AUTO_P(SendDoneInt), - INTR_AUTO(SendBufAvailInt), - INTR_AUTO_C(RcvAvail), - { .mask = 0, .sz = 0 } -}; - #define TXSYMPTOM_AUTO_P(fldname) \ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \ .msg = #fldname, .sz = sizeof(#fldname) } diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 0bd18375d7df..d2ac29861af5 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -1172,11 +1172,13 @@ static int pma_get_classportinfo(struct ib_pma_mad *pmp, * Set the most significant bit of CM2 to indicate support for * congestion statistics */ - p->reserved[0] = dd->psxmitwait_supported << 7; + ib_set_cpi_capmask2(p, + dd->psxmitwait_supported << + (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE)); /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. */ - p->resp_time_value = 18; + ib_set_cpi_resp_time(p, 18); return reply((struct ib_smp *) pmp); } diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 6888f03c6d61..4f878151f81f 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -159,6 +159,7 @@ struct qib_other_headers { } at; __be32 imm_data; __be32 aeth; + __be32 ieth; struct ib_atomic_eth atomic_eth; } u; } __packed; diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index b1ffc8b4a6c0..6ca6fa80dd6e 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -525,6 +525,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) return PTR_ERR(task); } + set_user_nice(task, MIN_NICE); cpu = cpumask_first(cpumask_of_node(rdi->dparms.node)); kthread_bind(task, cpu); wake_up_process(task); diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 0ff765bfd619..0f4d4500f45e 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -124,11 +124,13 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, int count) { int m, i = 0; + struct rvt_dev_info *dev = ib_to_rvt(pd->device); mr->mapsz = 0; m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; for (; i < m; i++) { - mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); + mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL, + dev->dparms.node); if (!mr->map[i]) { rvt_deinit_mregion(mr); return -ENOMEM; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 0f12c211c385..5fa4d4d81ee0 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -397,6 +397,7 @@ static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) { unsigned n; + struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) rvt_put_ss(&qp->s_rdma_read_sge); @@ -431,7 +432,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) if (qp->ibqp.qp_type != IB_QPT_RC) return; - for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { + for (n = 0; n < rvt_max_atomic(rdi); n++) { struct rvt_ack_entry *e = &qp->s_ack_queue[n]; if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && @@ -569,7 +570,12 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_ssn = 1; qp->s_lsn = 0; qp->s_mig_state = IB_MIG_MIGRATED; - memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); + if (qp->s_ack_queue) + memset( + qp->s_ack_queue, + 0, + rvt_max_atomic(rdi) * + sizeof(*qp->s_ack_queue)); qp->r_head_ack_queue = 0; qp->s_tail_ack_queue = 0; qp->s_num_rd_atomic = 0; @@ -653,9 +659,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, if (gfp == GFP_NOIO) swq = __vmalloc( (init_attr->cap.max_send_wr + 1) * sz, - gfp, PAGE_KERNEL); + gfp | __GFP_ZERO, PAGE_KERNEL); else - swq = vmalloc_node( + swq = vzalloc_node( (init_attr->cap.max_send_wr + 1) * sz, rdi->dparms.node); if (!swq) @@ -677,6 +683,16 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, goto bail_swq; RCU_INIT_POINTER(qp->next, NULL); + if (init_attr->qp_type == IB_QPT_RC) { + qp->s_ack_queue = + kzalloc_node( + sizeof(*qp->s_ack_queue) * + rvt_max_atomic(rdi), + gfp, + rdi->dparms.node); + if (!qp->s_ack_queue) + goto bail_qp; + } /* * Driver needs to set up it's private QP structure and do any @@ -704,9 +720,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, qp->r_rq.wq = __vmalloc( sizeof(struct rvt_rwq) + qp->r_rq.size * sz, - gfp, PAGE_KERNEL); + gfp | __GFP_ZERO, PAGE_KERNEL); else - qp->r_rq.wq = vmalloc_node( + qp->r_rq.wq = vzalloc_node( sizeof(struct rvt_rwq) + qp->r_rq.size * sz, rdi->dparms.node); @@ -857,6 +873,7 @@ bail_driver_priv: rdi->driver_f.qp_priv_free(rdi, qp); bail_qp: + kfree(qp->s_ack_queue); kfree(qp); bail_swq: @@ -1284,6 +1301,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp) vfree(qp->r_rq.wq); vfree(qp->s_wq); rdi->driver_f.qp_priv_free(rdi, qp); + kfree(qp->s_ack_queue); kfree(qp); return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index caec8e9c4666..bab7db6fa9ab 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -92,6 +92,8 @@ enum { IPOIB_FLAG_UMCAST = 10, IPOIB_STOP_NEIGH_GC = 11, IPOIB_NEIGH_TBL_FLUSH = 12, + IPOIB_FLAG_DEV_ADDR_SET = 13, + IPOIB_FLAG_DEV_ADDR_CTRL = 14, IPOIB_MAX_BACKOFF_SECONDS = 16, @@ -392,6 +394,7 @@ struct ipoib_dev_priv { struct ipoib_ethtool_st ethtool; struct timer_list poll_timer; unsigned max_send_sge; + bool sm_fullmember_sendonly_support; }; struct ipoib_ah { @@ -476,6 +479,7 @@ void ipoib_reap_ah(struct work_struct *work); void ipoib_mark_paths_invalid(struct net_device *dev); void ipoib_flush_paths(struct net_device *dev); +int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 418e5a1c8744..45c40a17d6a6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -997,6 +997,106 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv) return 0; } +/* + * returns true if the device address of the ipoib interface has changed and the + * new address is a valid one (i.e in the gid table), return false otherwise. + */ +static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) +{ + union ib_gid search_gid; + union ib_gid gid0; + union ib_gid *netdev_gid; + int err; + u16 index; + u8 port; + bool ret = false; + + netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4); + if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) + return false; + + netif_addr_lock(priv->dev); + + /* The subnet prefix may have changed, update it now so we won't have + * to do it later + */ + priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix; + netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix; + search_gid.global.subnet_prefix = gid0.global.subnet_prefix; + + search_gid.global.interface_id = priv->local_gid.global.interface_id; + + netif_addr_unlock(priv->dev); + + err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, + priv->dev, &port, &index); + + netif_addr_lock(priv->dev); + + if (search_gid.global.interface_id != + priv->local_gid.global.interface_id) + /* There was a change while we were looking up the gid, bail + * here and let the next work sort this out + */ + goto out; + + /* The next section of code needs some background: + * Per IB spec the port GUID can't change if the HCA is powered on. + * port GUID is the basis for GID at index 0 which is the basis for + * the default device address of a ipoib interface. + * + * so it seems the flow should be: + * if user_changed_dev_addr && gid in gid tbl + * set bit dev_addr_set + * return true + * else + * return false + * + * The issue is that there are devices that don't follow the spec, + * they change the port GUID when the HCA is powered, so in order + * not to break userspace applications, We need to check if the + * user wanted to control the device address and we assume that + * if he sets the device address back to be based on GID index 0, + * he no longer wishs to control it. + * + * If the user doesn't control the the device address, + * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means + * the port GUID has changed and GID at index 0 has changed + * so we need to change priv->local_gid and priv->dev->dev_addr + * to reflect the new GID. + */ + if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { + if (!err && port == priv->port) { + set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); + if (index == 0) + clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL, + &priv->flags); + else + set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); + ret = true; + } else { + ret = false; + } + } else { + if (!err && port == priv->port) { + ret = true; + } else { + if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) { + memcpy(&priv->local_gid, &gid0, + sizeof(priv->local_gid)); + memcpy(priv->dev->dev_addr + 4, &gid0, + sizeof(priv->local_gid)); + ret = true; + } + } + } + +out: + netif_addr_unlock(priv->dev); + + return ret; +} + static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, enum ipoib_flush_level level, int nesting) @@ -1018,6 +1118,9 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && level != IPOIB_FLUSH_HEAVY) { + /* Make sure the dev_addr is set even if not flushing */ + if (level == IPOIB_FLUSH_LIGHT) + ipoib_dev_addr_changed_valid(priv); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); return; } @@ -1029,7 +1132,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, update_parent_pkey(priv); else update_child_pkey(priv); - } + } else if (level == IPOIB_FLUSH_LIGHT) + ipoib_dev_addr_changed_valid(priv); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); return; } @@ -1081,7 +1185,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_up(dev); - ipoib_mcast_restart_task(&priv->restart_task); + if (ipoib_dev_addr_changed_valid(priv)) + ipoib_mcast_restart_task(&priv->restart_task); } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b940ef1c19c7..2d7c16346648 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -99,6 +99,7 @@ static struct net_device *ipoib_get_net_dev_by_params( struct ib_device *dev, u8 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr, void *client_data); +static int ipoib_set_mac(struct net_device *dev, void *addr); static struct ib_client ipoib_client = { .name = "ipoib", @@ -117,6 +118,8 @@ int ipoib_open(struct net_device *dev) set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); + priv->sm_fullmember_sendonly_support = false; + if (ipoib_ib_dev_open(dev)) { if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) return 0; @@ -629,6 +632,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev) spin_unlock_irq(&priv->lock); } +struct classport_info_context { + struct ipoib_dev_priv *priv; + struct completion done; + struct ib_sa_query *sa_query; +}; + +static void classport_info_query_cb(int status, struct ib_class_port_info *rec, + void *context) +{ + struct classport_info_context *cb_ctx = context; + struct ipoib_dev_priv *priv; + + WARN_ON(!context); + + priv = cb_ctx->priv; + + if (status || !rec) { + pr_debug("device: %s failed query classport_info status: %d\n", + priv->dev->name, status); + /* keeps the default, will try next mcast_restart */ + priv->sm_fullmember_sendonly_support = false; + goto out; + } + + if (ib_get_cpi_capmask2(rec) & + IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) { + pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n", + priv->dev->name); + priv->sm_fullmember_sendonly_support = true; + } else { + pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n", + priv->dev->name); + priv->sm_fullmember_sendonly_support = false; + } + +out: + complete(&cb_ctx->done); +} + +int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv) +{ + struct classport_info_context *callback_context; + int ret; + + callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL); + if (!callback_context) + return -ENOMEM; + + callback_context->priv = priv; + init_completion(&callback_context->done); + + ret = ib_sa_classport_info_rec_query(&ipoib_sa_client, + priv->ca, priv->port, 3000, + GFP_KERNEL, + classport_info_query_cb, + callback_context, + &callback_context->sa_query); + if (ret < 0) { + pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n", + priv->dev->name, ret); + kfree(callback_context); + return ret; + } + + /* waiting for the callback to finish before returnning */ + wait_for_completion(&callback_context->done); + kfree(callback_context); + + return ret; +} + void ipoib_flush_paths(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -1649,6 +1723,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = { .ndo_get_vf_config = ipoib_get_vf_config, .ndo_get_vf_stats = ipoib_get_vf_stats, .ndo_set_vf_guid = ipoib_set_vf_guid, + .ndo_set_mac_address = ipoib_set_mac, }; static const struct net_device_ops ipoib_netdev_ops_vf = { @@ -1771,6 +1846,70 @@ int ipoib_add_umcast_attr(struct net_device *dev) return device_create_file(&dev->dev, &dev_attr_umcast); } +static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) +{ + struct ipoib_dev_priv *child_priv; + struct net_device *netdev = priv->dev; + + netif_addr_lock(netdev); + + memcpy(&priv->local_gid.global.interface_id, + &gid->global.interface_id, + sizeof(gid->global.interface_id)); + memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); + clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); + + netif_addr_unlock(netdev); + + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { + down_read(&priv->vlan_rwsem); + list_for_each_entry(child_priv, &priv->child_intfs, list) + set_base_guid(child_priv, gid); + up_read(&priv->vlan_rwsem); + } +} + +static int ipoib_check_lladdr(struct net_device *dev, + struct sockaddr_storage *ss) +{ + union ib_gid *gid = (union ib_gid *)(ss->__data + 4); + int ret = 0; + + netif_addr_lock(dev); + + /* Make sure the QPN, reserved and subnet prefix match the current + * lladdr, it also makes sure the lladdr is unicast. + */ + if (memcmp(dev->dev_addr, ss->__data, + 4 + sizeof(gid->global.subnet_prefix)) || + gid->global.interface_id == 0) + ret = -EINVAL; + + netif_addr_unlock(dev); + + return ret; +} + +static int ipoib_set_mac(struct net_device *dev, void *addr) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + struct sockaddr_storage *ss = addr; + int ret; + + if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) + return -EBUSY; + + ret = ipoib_check_lladdr(dev, ss); + if (ret) + return ret; + + set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); + + queue_work(ipoib_workqueue, &priv->flush_light); + + return 0; +} + static ssize_t create_child(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1894,6 +2033,7 @@ static struct net_device *ipoib_add_port(const char *format, goto device_init_failed; } else memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); + set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); result = ipoib_dev_init(priv->dev, hca, port); if (result < 0) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 25889311b1e9..82fbc9442608 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -64,6 +64,9 @@ struct ipoib_mcast_iter { unsigned int send_only; }; +/* join state that allows creating mcg with sendonly member request */ +#define SENDONLY_FULLMEMBER_JOIN 8 + /* * This should be called with the priv->lock held */ @@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, carrier_on_task); struct ib_port_attr attr; + int ret; if (ib_query_port(priv->ca, priv->port, &attr) || attr.state != IB_PORT_ACTIVE) { ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); return; } + /* + * Check if can send sendonly MCG's with sendonly-fullmember join state. + * It done here after the successfully join to the broadcast group, + * because the broadcast group must always be joined first and is always + * re-joined if the SM changes substantially. + */ + ret = ipoib_check_sm_sendonly_fullmember_support(priv); + if (ret < 0) + pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n", + priv->dev->name, ret); /* * Take rtnl_lock to avoid racing with ipoib_stop() and @@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) rec.hop_limit = priv->broadcast->mcmember.hop_limit; /* - * Send-only IB Multicast joins do not work at the core - * IB layer yet, so we can't use them here. However, - * we are emulating an Ethernet multicast send, which - * does not require a multicast subscription and will - * still send properly. The most appropriate thing to + * Send-only IB Multicast joins work at the core IB layer but + * require specific SM support. + * We can use such joins here only if the current SM supports that feature. + * However, if not, we emulate an Ethernet multicast send, + * which does not require a multicast subscription and will + * still send properly. The most appropriate thing to * do is to create the group if it doesn't exist as that * most closely emulates the behavior, from a user space - * application perspecitive, of Ethernet multicast - * operation. For now, we do a full join, maybe later - * when the core IB layers support send only joins we - * will use them. + * application perspective, of Ethernet multicast operation. */ -#if 0 - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - rec.join_state = 4; -#endif + if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && + priv->sm_fullmember_sendonly_support) + /* SM supports sendonly-fullmember, otherwise fallback to full-member */ + rec.join_state = SENDONLY_FULLMEMBER_JOIN; } spin_unlock_irq(&priv->lock); @@ -570,11 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work) return; } priv->local_lid = port_attr.lid; + netif_addr_lock(dev); - if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL)) - ipoib_warn(priv, "ib_query_gid() failed\n"); - else - memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); + if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { + netif_addr_unlock(dev); + return; + } + netif_addr_unlock(dev); spin_lock_irq(&priv->lock); if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index b809c373e40e..1e7cbbaa15bd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -307,5 +307,8 @@ void ipoib_event(struct ib_event_handler *handler, queue_work(ipoib_workqueue, &priv->flush_normal); } else if (record->event == IB_EVENT_PKEY_CHANGE) { queue_work(ipoib_workqueue, &priv->flush_heavy); + } else if (record->event == IB_EVENT_GID_CHANGE && + !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { + queue_work(ipoib_workqueue, &priv->flush_light); } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index fca1a882de27..64a35595eab8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -68,6 +68,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, priv->pkey = pkey; memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); + memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid)); + set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); priv->dev->broadcast[8] = pkey >> 8; priv->dev->broadcast[9] = pkey & 0xff; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 897b5a4993e8..a990c04208c9 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2596,9 +2596,19 @@ static void isert_free_conn(struct iscsi_conn *conn) isert_put_conn(isert_conn); } +static void isert_get_rx_pdu(struct iscsi_conn *conn) +{ + struct completion comp; + + init_completion(&comp); + + wait_for_completion_interruptible(&comp); +} + static struct iscsit_transport iser_target_transport = { .name = "IB/iSER", .transport_type = ISCSI_INFINIBAND, + .rdma_shutdown = true, .priv_size = sizeof(struct isert_cmd), .owner = THIS_MODULE, .iscsit_setup_np = isert_setup_np, @@ -2614,6 +2624,7 @@ static struct iscsit_transport iser_target_transport = { .iscsit_queue_data_in = isert_put_datain, .iscsit_queue_status = isert_put_response, .iscsit_aborted_task = isert_aborted_task, + .iscsit_get_rx_pdu = isert_get_rx_pdu, .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, }; diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 2843f1ae75bd..e68b20cba70b 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -254,8 +254,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad) memset(cif, 0, sizeof(*cif)); cif->base_version = 1; cif->class_version = 1; - cif->resp_time_value = 20; + ib_set_cpi_resp_time(cif, 20); mad->mad_hdr.status = 0; } @@ -1767,14 +1767,6 @@ static void __srpt_close_all_ch(struct srpt_device *sdev) } } -/** - * srpt_shutdown_session() - Whether or not a session may be shut down. - */ -static int srpt_shutdown_session(struct se_session *se_sess) -{ - return 1; -} - static void srpt_free_ch(struct kref *kref) { struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref); @@ -3064,7 +3056,6 @@ static const struct target_core_fabric_ops srpt_template = { .tpg_get_inst_index = srpt_tpg_get_inst_index, .release_cmd = srpt_release_cmd, .check_stop_free = srpt_check_stop_free, - .shutdown_session = srpt_shutdown_session, .close_session = srpt_close_session, .sess_get_index = srpt_sess_get_index, .sess_get_initiator_sid = NULL, diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 1142a93dd90b..804dbcc37d3f 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -87,7 +87,7 @@ #define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>" #define DRIVER_DESC "X-Box pad driver" -#define XPAD_PKT_LEN 32 +#define XPAD_PKT_LEN 64 /* xbox d-pads should map to buttons, as is required for DDR pads but we map them to axes when possible to simplify things */ @@ -129,6 +129,7 @@ static const struct xpad_device { { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, + { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE }, { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -173,9 +174,11 @@ static const struct xpad_device { { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, @@ -183,6 +186,7 @@ static const struct xpad_device { { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, @@ -199,6 +203,7 @@ static const struct xpad_device { { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 }, { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, @@ -212,6 +217,8 @@ static const struct xpad_device { { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, @@ -307,13 +314,16 @@ static struct usb_device_id xpad_table[] = { { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ + XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ @@ -457,6 +467,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev, u16 cmd, unsigned char *data) { + /* valid pad data */ + if (data[0] != 0x00) + return; + /* digital pad */ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { /* dpad as buttons (left, right, up, down) */ @@ -756,6 +770,7 @@ static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad) if (packet) { memcpy(xpad->odata, packet->data, packet->len); xpad->irq_out->transfer_buffer_length = packet->len; + packet->pending = false; return true; } @@ -797,7 +812,6 @@ static void xpad_irq_out(struct urb *urb) switch (status) { case 0: /* success */ - xpad->out_packets[xpad->last_out_packet].pending = false; xpad->irq_out_active = xpad_prepare_next_out_packet(xpad); break; diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c index 6d96bff32a0e..29ddeb7be84b 100644 --- a/drivers/input/misc/max77693-haptic.c +++ b/drivers/input/misc/max77693-haptic.c @@ -70,10 +70,13 @@ struct max77693_haptic { static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic) { - int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2; + struct pwm_args pargs; + int delta; int error; - error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period); + pwm_get_args(haptic->pwm_dev, &pargs); + delta = (pargs.period + haptic->pwm_duty) / 2; + error = pwm_config(haptic->pwm_dev, delta, pargs.period); if (error) { dev_err(haptic->dev, "failed to configure pwm: %d\n", error); return error; @@ -234,6 +237,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct max77693_haptic *haptic = input_get_drvdata(dev); + struct pwm_args pargs; u64 period_mag_multi; haptic->magnitude = effect->u.rumble.strong_magnitude; @@ -245,7 +249,8 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data, * The formula to convert magnitude to pwm_duty as follows: * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF) */ - period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude; + pwm_get_args(haptic->pwm_dev, &pargs); + period_mag_multi = (u64)pargs.period * haptic->magnitude; haptic->pwm_duty = (unsigned int)(period_mag_multi >> MAX_MAGNITUDE_SHIFT); @@ -329,6 +334,12 @@ static int max77693_haptic_probe(struct platform_device *pdev) return PTR_ERR(haptic->pwm_dev); } + /* + * FIXME: pwm_apply_args() should be removed when switching to the + * atomic PWM API. + */ + pwm_apply_args(haptic->pwm_dev); + haptic->motor_reg = devm_regulator_get(&pdev->dev, "haptic"); if (IS_ERR(haptic->motor_reg)) { dev_err(&pdev->dev, "failed to get regulator\n"); diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c index 8d6326d7e7be..99bc762881d5 100644 --- a/drivers/input/misc/max8997_haptic.c +++ b/drivers/input/misc/max8997_haptic.c @@ -306,6 +306,12 @@ static int max8997_haptic_probe(struct platform_device *pdev) error); goto err_free_mem; } + + /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(chip->pwm); break; default: diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c index f2261ab54701..5f9655d49a65 100644 --- a/drivers/input/misc/pwm-beeper.c +++ b/drivers/input/misc/pwm-beeper.c @@ -20,21 +20,40 @@ #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> +#include <linux/workqueue.h> struct pwm_beeper { struct input_dev *input; struct pwm_device *pwm; + struct work_struct work; unsigned long period; }; #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x)) +static void __pwm_beeper_set(struct pwm_beeper *beeper) +{ + unsigned long period = beeper->period; + + if (period) { + pwm_config(beeper->pwm, period / 2, period); + pwm_enable(beeper->pwm); + } else + pwm_disable(beeper->pwm); +} + +static void pwm_beeper_work(struct work_struct *work) +{ + struct pwm_beeper *beeper = + container_of(work, struct pwm_beeper, work); + + __pwm_beeper_set(beeper); +} + static int pwm_beeper_event(struct input_dev *input, unsigned int type, unsigned int code, int value) { - int ret = 0; struct pwm_beeper *beeper = input_get_drvdata(input); - unsigned long period; if (type != EV_SND || value < 0) return -EINVAL; @@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input, return -EINVAL; } - if (value == 0) { - pwm_disable(beeper->pwm); - } else { - period = HZ_TO_NANOSECONDS(value); - ret = pwm_config(beeper->pwm, period / 2, period); - if (ret) - return ret; - ret = pwm_enable(beeper->pwm); - if (ret) - return ret; - beeper->period = period; - } + if (value == 0) + beeper->period = 0; + else + beeper->period = HZ_TO_NANOSECONDS(value); + + schedule_work(&beeper->work); return 0; } +static void pwm_beeper_stop(struct pwm_beeper *beeper) +{ + cancel_work_sync(&beeper->work); + + if (beeper->period) + pwm_disable(beeper->pwm); +} + +static void pwm_beeper_close(struct input_dev *input) +{ + struct pwm_beeper *beeper = input_get_drvdata(input); + + pwm_beeper_stop(beeper); +} + static int pwm_beeper_probe(struct platform_device *pdev) { unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev); @@ -87,6 +115,14 @@ static int pwm_beeper_probe(struct platform_device *pdev) goto err_free; } + /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(beeper->pwm); + + INIT_WORK(&beeper->work, pwm_beeper_work); + beeper->input = input_allocate_device(); if (!beeper->input) { dev_err(&pdev->dev, "Failed to allocate input device\n"); @@ -106,6 +142,7 @@ static int pwm_beeper_probe(struct platform_device *pdev) beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL); beeper->input->event = pwm_beeper_event; + beeper->input->close = pwm_beeper_close; input_set_drvdata(beeper->input, beeper); @@ -135,7 +172,6 @@ static int pwm_beeper_remove(struct platform_device *pdev) input_unregister_device(beeper->input); - pwm_disable(beeper->pwm); pwm_free(beeper->pwm); kfree(beeper); @@ -147,8 +183,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev) { struct pwm_beeper *beeper = dev_get_drvdata(dev); - if (beeper->period) - pwm_disable(beeper->pwm); + pwm_beeper_stop(beeper); return 0; } @@ -157,10 +192,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev) { struct pwm_beeper *beeper = dev_get_drvdata(dev); - if (beeper->period) { - pwm_config(beeper->pwm, beeper->period / 2, beeper->period); - pwm_enable(beeper->pwm); - } + if (beeper->period) + __pwm_beeper_set(beeper); return 0; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index abe1a927b332..65ebbd111702 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -981,9 +981,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } #ifdef CONFIG_COMPAT + +#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) + static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + if (cmd == UI_SET_PHYS_COMPAT) + cmd = UI_SET_PHYS; + return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); } #endif diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 485794376ee5..d07dd29d4848 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -115,7 +115,6 @@ struct sun4i_ts_data { struct device *dev; struct input_dev *input; - struct thermal_zone_device *tz; void __iomem *base; unsigned int irq; bool ignore_fifo_data; @@ -366,10 +365,7 @@ static int sun4i_ts_probe(struct platform_device *pdev) if (IS_ERR(hwmon)) return PTR_ERR(hwmon); - ts->tz = thermal_zone_of_sensor_register(ts->dev, 0, ts, - &sun4i_ts_tz_ops); - if (IS_ERR(ts->tz)) - ts->tz = NULL; + devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops); writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC); @@ -377,7 +373,6 @@ static int sun4i_ts_probe(struct platform_device *pdev) error = input_register_device(ts->input); if (error) { writel(0, ts->base + TP_INT_FIFOC); - thermal_zone_of_sensor_unregister(ts->dev, ts->tz); return error; } } @@ -394,8 +389,6 @@ static int sun4i_ts_remove(struct platform_device *pdev) if (ts->input) input_unregister_device(ts->input); - thermal_zone_of_sensor_unregister(ts->dev, ts->tz); - /* Deactivate all IRQs */ writel(0, ts->base + TP_INT_FIFOC); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index ebab33e77d67..94b68213c50d 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1477,7 +1477,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); - if (IS_ERR_VALUE(asid)) + if (asid < 0) return asid; cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, @@ -1508,7 +1508,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); - if (IS_ERR_VALUE(vmid)) + if (vmid < 0) return vmid; cfg->vmid = (u16)vmid; @@ -1569,7 +1569,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) smmu_domain->pgtbl_ops = pgtbl_ops; ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); - if (IS_ERR_VALUE(ret)) + if (ret < 0) free_io_pgtable_ops(pgtbl_ops); return ret; @@ -1642,7 +1642,7 @@ static void arm_smmu_detach_dev(struct device *dev) struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); smmu_group->ste.bypass = true; - if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group))) + if (arm_smmu_install_ste_for_group(smmu_group) < 0) dev_warn(dev, "failed to install bypass STE\n"); smmu_group->domain = NULL; @@ -1694,7 +1694,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; ret = arm_smmu_install_ste_for_group(smmu_group); - if (IS_ERR_VALUE(ret)) + if (ret < 0) smmu_group->domain = NULL; out_unlock: @@ -2235,7 +2235,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) arm_smmu_evtq_handler, arm_smmu_evtq_thread, 0, "arm-smmu-v3-evtq", smmu); - if (IS_ERR_VALUE(ret)) + if (ret < 0) dev_warn(smmu->dev, "failed to enable evtq irq\n"); } @@ -2244,7 +2244,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) ret = devm_request_irq(smmu->dev, irq, arm_smmu_cmdq_sync_handler, 0, "arm-smmu-v3-cmdq-sync", smmu); - if (IS_ERR_VALUE(ret)) + if (ret < 0) dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n"); } @@ -2252,7 +2252,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (irq) { ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, 0, "arm-smmu-v3-gerror", smmu); - if (IS_ERR_VALUE(ret)) + if (ret < 0) dev_warn(smmu->dev, "failed to enable gerror irq\n"); } @@ -2264,7 +2264,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) arm_smmu_priq_thread, 0, "arm-smmu-v3-priq", smmu); - if (IS_ERR_VALUE(ret)) + if (ret < 0) dev_warn(smmu->dev, "failed to enable priq irq\n"); else diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e206ce7a4e4b..9345a3fcb706 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -950,7 +950,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); - if (IS_ERR_VALUE(ret)) + if (ret < 0) goto out_unlock; cfg->cbndx = ret; @@ -989,7 +989,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, "arm-smmu-context-fault", domain); - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", cfg->irptndx, irq); cfg->irptndx = INVALID_IRPTNDX; @@ -1099,7 +1099,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, for (i = 0; i < cfg->num_streamids; ++i) { int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, smmu->num_mapping_groups); - if (IS_ERR_VALUE(idx)) { + if (idx < 0) { dev_err(smmu->dev, "failed to allocate free SMR\n"); goto err_free_smrs; } @@ -1233,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) /* Ensure that the domain is finalised */ ret = arm_smmu_init_domain_context(domain, smmu); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; /* diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b2bfb9594508..a644d0cec2d8 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -33,6 +33,7 @@ #include <linux/dma-mapping.h> #include <linux/mempool.h> #include <linux/memory.h> +#include <linux/cpu.h> #include <linux/timer.h> #include <linux/io.h> #include <linux/iova.h> @@ -390,6 +391,7 @@ struct dmar_domain { * domain ids are 16 bit wide according * to VT-d spec, section 9.3 */ + bool has_iotlb_device; struct list_head devices; /* all devices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ @@ -456,27 +458,32 @@ static LIST_HEAD(dmar_rmrr_units); static void flush_unmaps_timeout(unsigned long data); -static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); +struct deferred_flush_entry { + unsigned long iova_pfn; + unsigned long nrpages; + struct dmar_domain *domain; + struct page *freelist; +}; #define HIGH_WATER_MARK 250 -struct deferred_flush_tables { +struct deferred_flush_table { int next; - struct iova *iova[HIGH_WATER_MARK]; - struct dmar_domain *domain[HIGH_WATER_MARK]; - struct page *freelist[HIGH_WATER_MARK]; + struct deferred_flush_entry entries[HIGH_WATER_MARK]; +}; + +struct deferred_flush_data { + spinlock_t lock; + int timer_on; + struct timer_list timer; + long size; + struct deferred_flush_table *tables; }; -static struct deferred_flush_tables *deferred_flush; +DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); /* bitmap for indexing intel_iommus */ static int g_num_of_iommus; -static DEFINE_SPINLOCK(async_umap_flush_lock); -static LIST_HEAD(unmaps_to_do); - -static int timer_on; -static long list_size; - static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct dmar_domain *domain, @@ -1458,10 +1465,35 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, return NULL; } +static void domain_update_iotlb(struct dmar_domain *domain) +{ + struct device_domain_info *info; + bool has_iotlb_device = false; + + assert_spin_locked(&device_domain_lock); + + list_for_each_entry(info, &domain->devices, link) { + struct pci_dev *pdev; + + if (!info->dev || !dev_is_pci(info->dev)) + continue; + + pdev = to_pci_dev(info->dev); + if (pdev->ats_enabled) { + has_iotlb_device = true; + break; + } + } + + domain->has_iotlb_device = has_iotlb_device; +} + static void iommu_enable_dev_iotlb(struct device_domain_info *info) { struct pci_dev *pdev; + assert_spin_locked(&device_domain_lock); + if (!info || !dev_is_pci(info->dev)) return; @@ -1481,6 +1513,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) #endif if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { info->ats_enabled = 1; + domain_update_iotlb(info->domain); info->ats_qdep = pci_ats_queue_depth(pdev); } } @@ -1489,6 +1522,8 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) { struct pci_dev *pdev; + assert_spin_locked(&device_domain_lock); + if (!dev_is_pci(info->dev)) return; @@ -1497,6 +1532,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) if (info->ats_enabled) { pci_disable_ats(pdev); info->ats_enabled = 0; + domain_update_iotlb(info->domain); } #ifdef CONFIG_INTEL_IOMMU_SVM if (info->pri_enabled) { @@ -1517,6 +1553,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, unsigned long flags; struct device_domain_info *info; + if (!domain->has_iotlb_device) + return; + spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry(info, &domain->devices, link) { if (!info->ats_enabled) @@ -1734,6 +1773,7 @@ static struct dmar_domain *alloc_domain(int flags) memset(domain, 0, sizeof(*domain)); domain->nid = -1; domain->flags = flags; + domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); return domain; @@ -1918,8 +1958,12 @@ static void domain_exit(struct dmar_domain *domain) return; /* Flush any lazy unmaps that may reference this domain */ - if (!intel_iommu_strict) - flush_unmaps_timeout(0); + if (!intel_iommu_strict) { + int cpu; + + for_each_possible_cpu(cpu) + flush_unmaps_timeout(cpu); + } /* Remove associated devices and clear attached or cached domains */ rcu_read_lock(); @@ -3077,7 +3121,7 @@ static int __init init_dmars(void) bool copied_tables = false; struct device *dev; struct intel_iommu *iommu; - int i, ret; + int i, ret, cpu; /* * for each drhd @@ -3110,11 +3154,20 @@ static int __init init_dmars(void) goto error; } - deferred_flush = kzalloc(g_num_of_iommus * - sizeof(struct deferred_flush_tables), GFP_KERNEL); - if (!deferred_flush) { - ret = -ENOMEM; - goto free_g_iommus; + for_each_possible_cpu(cpu) { + struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush, + cpu); + + dfd->tables = kzalloc(g_num_of_iommus * + sizeof(struct deferred_flush_table), + GFP_KERNEL); + if (!dfd->tables) { + ret = -ENOMEM; + goto free_g_iommus; + } + + spin_lock_init(&dfd->lock); + setup_timer(&dfd->timer, flush_unmaps_timeout, cpu); } for_each_active_iommu(iommu, drhd) { @@ -3291,19 +3344,20 @@ free_iommu: disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } - kfree(deferred_flush); free_g_iommus: + for_each_possible_cpu(cpu) + kfree(per_cpu_ptr(&deferred_flush, cpu)->tables); kfree(g_iommus); error: return ret; } /* This takes a number of _MM_ pages, not VTD pages */ -static struct iova *intel_alloc_iova(struct device *dev, +static unsigned long intel_alloc_iova(struct device *dev, struct dmar_domain *domain, unsigned long nrpages, uint64_t dma_mask) { - struct iova *iova = NULL; + unsigned long iova_pfn = 0; /* Restrict dma_mask to the width that the iommu can handle */ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); @@ -3316,19 +3370,19 @@ static struct iova *intel_alloc_iova(struct device *dev, * DMA_BIT_MASK(32) and if that fails then try allocating * from higher range */ - iova = alloc_iova(&domain->iovad, nrpages, - IOVA_PFN(DMA_BIT_MASK(32)), 1); - if (iova) - return iova; + iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, + IOVA_PFN(DMA_BIT_MASK(32))); + if (iova_pfn) + return iova_pfn; } - iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); - if (unlikely(!iova)) { + iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask)); + if (unlikely(!iova_pfn)) { pr_err("Allocating %ld-page iova for %s failed", nrpages, dev_name(dev)); - return NULL; + return 0; } - return iova; + return iova_pfn; } static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) @@ -3426,7 +3480,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, { struct dmar_domain *domain; phys_addr_t start_paddr; - struct iova *iova; + unsigned long iova_pfn; int prot = 0; int ret; struct intel_iommu *iommu; @@ -3444,8 +3498,8 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, iommu = domain_get_iommu(domain); size = aligned_nrpages(paddr, size); - iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); - if (!iova) + iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); + if (!iova_pfn) goto error; /* @@ -3463,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, * might have two guest_addr mapping to the same host paddr, but this * is not a big problem */ - ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), + ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), mm_to_dma_pfn(paddr_pfn), size, prot); if (ret) goto error; @@ -3471,18 +3525,18 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, /* it's a non-present to present mapping. Only flush if caching mode */ if (cap_caching_mode(iommu->cap)) iommu_flush_iotlb_psi(iommu, domain, - mm_to_dma_pfn(iova->pfn_lo), + mm_to_dma_pfn(iova_pfn), size, 0, 1); else iommu_flush_write_buffer(iommu); - start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; + start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; start_paddr += paddr & ~PAGE_MASK; return start_paddr; error: - if (iova) - __free_iova(&domain->iovad, iova); + if (iova_pfn) + free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); pr_err("Device %s request: %zx@%llx dir %d --- failed\n", dev_name(dev), size, (unsigned long long)paddr, dir); return 0; @@ -3497,91 +3551,120 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, dir, *dev->dma_mask); } -static void flush_unmaps(void) +static void flush_unmaps(struct deferred_flush_data *flush_data) { int i, j; - timer_on = 0; + flush_data->timer_on = 0; /* just flush them all */ for (i = 0; i < g_num_of_iommus; i++) { struct intel_iommu *iommu = g_iommus[i]; + struct deferred_flush_table *flush_table = + &flush_data->tables[i]; if (!iommu) continue; - if (!deferred_flush[i].next) + if (!flush_table->next) continue; /* In caching mode, global flushes turn emulation expensive */ if (!cap_caching_mode(iommu->cap)) iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - for (j = 0; j < deferred_flush[i].next; j++) { + for (j = 0; j < flush_table->next; j++) { unsigned long mask; - struct iova *iova = deferred_flush[i].iova[j]; - struct dmar_domain *domain = deferred_flush[i].domain[j]; + struct deferred_flush_entry *entry = + &flush_table->entries[j]; + unsigned long iova_pfn = entry->iova_pfn; + unsigned long nrpages = entry->nrpages; + struct dmar_domain *domain = entry->domain; + struct page *freelist = entry->freelist; /* On real hardware multiple invalidations are expensive */ if (cap_caching_mode(iommu->cap)) iommu_flush_iotlb_psi(iommu, domain, - iova->pfn_lo, iova_size(iova), - !deferred_flush[i].freelist[j], 0); + mm_to_dma_pfn(iova_pfn), + nrpages, !freelist, 0); else { - mask = ilog2(mm_to_dma_pfn(iova_size(iova))); - iommu_flush_dev_iotlb(deferred_flush[i].domain[j], - (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); + mask = ilog2(nrpages); + iommu_flush_dev_iotlb(domain, + (uint64_t)iova_pfn << PAGE_SHIFT, mask); } - __free_iova(&deferred_flush[i].domain[j]->iovad, iova); - if (deferred_flush[i].freelist[j]) - dma_free_pagelist(deferred_flush[i].freelist[j]); + free_iova_fast(&domain->iovad, iova_pfn, nrpages); + if (freelist) + dma_free_pagelist(freelist); } - deferred_flush[i].next = 0; + flush_table->next = 0; } - list_size = 0; + flush_data->size = 0; } -static void flush_unmaps_timeout(unsigned long data) +static void flush_unmaps_timeout(unsigned long cpuid) { + struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid); unsigned long flags; - spin_lock_irqsave(&async_umap_flush_lock, flags); - flush_unmaps(); - spin_unlock_irqrestore(&async_umap_flush_lock, flags); + spin_lock_irqsave(&flush_data->lock, flags); + flush_unmaps(flush_data); + spin_unlock_irqrestore(&flush_data->lock, flags); } -static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist) +static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, + unsigned long nrpages, struct page *freelist) { unsigned long flags; - int next, iommu_id; + int entry_id, iommu_id; struct intel_iommu *iommu; + struct deferred_flush_entry *entry; + struct deferred_flush_data *flush_data; + unsigned int cpuid; - spin_lock_irqsave(&async_umap_flush_lock, flags); - if (list_size == HIGH_WATER_MARK) - flush_unmaps(); + cpuid = get_cpu(); + flush_data = per_cpu_ptr(&deferred_flush, cpuid); + + /* Flush all CPUs' entries to avoid deferring too much. If + * this becomes a bottleneck, can just flush us, and rely on + * flush timer for the rest. + */ + if (flush_data->size == HIGH_WATER_MARK) { + int cpu; + + for_each_online_cpu(cpu) + flush_unmaps_timeout(cpu); + } + + spin_lock_irqsave(&flush_data->lock, flags); iommu = domain_get_iommu(dom); iommu_id = iommu->seq_id; - next = deferred_flush[iommu_id].next; - deferred_flush[iommu_id].domain[next] = dom; - deferred_flush[iommu_id].iova[next] = iova; - deferred_flush[iommu_id].freelist[next] = freelist; - deferred_flush[iommu_id].next++; + entry_id = flush_data->tables[iommu_id].next; + ++(flush_data->tables[iommu_id].next); - if (!timer_on) { - mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10)); - timer_on = 1; + entry = &flush_data->tables[iommu_id].entries[entry_id]; + entry->domain = dom; + entry->iova_pfn = iova_pfn; + entry->nrpages = nrpages; + entry->freelist = freelist; + + if (!flush_data->timer_on) { + mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10)); + flush_data->timer_on = 1; } - list_size++; - spin_unlock_irqrestore(&async_umap_flush_lock, flags); + flush_data->size++; + spin_unlock_irqrestore(&flush_data->lock, flags); + + put_cpu(); } -static void intel_unmap(struct device *dev, dma_addr_t dev_addr) +static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) { struct dmar_domain *domain; unsigned long start_pfn, last_pfn; - struct iova *iova; + unsigned long nrpages; + unsigned long iova_pfn; struct intel_iommu *iommu; struct page *freelist; @@ -3593,13 +3676,11 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr) iommu = domain_get_iommu(domain); - iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); - if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", - (unsigned long long)dev_addr)) - return; + iova_pfn = IOVA_PFN(dev_addr); - start_pfn = mm_to_dma_pfn(iova->pfn_lo); - last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; + nrpages = aligned_nrpages(dev_addr, size); + start_pfn = mm_to_dma_pfn(iova_pfn); + last_pfn = start_pfn + nrpages - 1; pr_debug("Device %s unmapping: pfn %lx-%lx\n", dev_name(dev), start_pfn, last_pfn); @@ -3608,12 +3689,12 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr) if (intel_iommu_strict) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, - last_pfn - start_pfn + 1, !freelist, 0); + nrpages, !freelist, 0); /* free iova */ - __free_iova(&domain->iovad, iova); + free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); dma_free_pagelist(freelist); } else { - add_unmap(domain, iova, freelist); + add_unmap(domain, iova_pfn, nrpages, freelist); /* * queue up the release of the unmap to save the 1/6th of the * cpu used up by the iotlb flush operation... @@ -3625,7 +3706,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - intel_unmap(dev, dev_addr); + intel_unmap(dev, dev_addr, size); } static void *intel_alloc_coherent(struct device *dev, size_t size, @@ -3684,7 +3765,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, size = PAGE_ALIGN(size); order = get_order(size); - intel_unmap(dev, dma_handle); + intel_unmap(dev, dma_handle, size); if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) __free_pages(page, order); } @@ -3693,7 +3774,16 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { - intel_unmap(dev, sglist[0].dma_address); + dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; + unsigned long nrpages = 0; + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nelems, i) { + nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg)); + } + + intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); } static int intel_nontranslate_map_sg(struct device *hddev, @@ -3717,7 +3807,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele struct dmar_domain *domain; size_t size = 0; int prot = 0; - struct iova *iova = NULL; + unsigned long iova_pfn; int ret; struct scatterlist *sg; unsigned long start_vpfn; @@ -3736,9 +3826,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele for_each_sg(sglist, sg, nelems, i) size += aligned_nrpages(sg->offset, sg->length); - iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), + iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), *dev->dma_mask); - if (!iova) { + if (!iova_pfn) { sglist->dma_length = 0; return 0; } @@ -3753,13 +3843,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; - start_vpfn = mm_to_dma_pfn(iova->pfn_lo); + start_vpfn = mm_to_dma_pfn(iova_pfn); ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); if (unlikely(ret)) { dma_pte_free_pagetable(domain, start_vpfn, start_vpfn + size - 1); - __free_iova(&domain->iovad, iova); + free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); return 0; } @@ -4498,6 +4588,46 @@ static struct notifier_block intel_iommu_memory_nb = { .priority = 0 }; +static void free_all_cpu_cached_iovas(unsigned int cpu) +{ + int i; + + for (i = 0; i < g_num_of_iommus; i++) { + struct intel_iommu *iommu = g_iommus[i]; + struct dmar_domain *domain; + u16 did; + + if (!iommu) + continue; + + for (did = 0; did < 0xffff; did++) { + domain = get_iommu_domain(iommu, did); + + if (!domain) + continue; + free_cpu_cached_iovas(cpu, &domain->iovad); + } + } +} + +static int intel_iommu_cpu_notifier(struct notifier_block *nfb, + unsigned long action, void *v) +{ + unsigned int cpu = (unsigned long)v; + + switch (action) { + case CPU_DEAD: + case CPU_DEAD_FROZEN: + free_all_cpu_cached_iovas(cpu); + flush_unmaps_timeout(cpu); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block intel_iommu_cpu_nb = { + .notifier_call = intel_iommu_cpu_notifier, +}; static ssize_t intel_iommu_show_version(struct device *dev, struct device_attribute *attr, @@ -4631,7 +4761,6 @@ int __init intel_iommu_init(void) up_write(&dmar_global_lock); pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); - init_timer(&unmap_timer); #ifdef CONFIG_SWIOTLB swiotlb = 0; #endif @@ -4648,6 +4777,7 @@ int __init intel_iommu_init(void) bus_register_notifier(&pci_bus_type, &device_nb); if (si_domain && !hw_pass_through) register_memory_notifier(&intel_iommu_memory_nb); + register_hotcpu_notifier(&intel_iommu_cpu_nb); intel_iommu_enabled = 1; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index fa0adef32bd6..ba764a0835d3 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -20,6 +20,17 @@ #include <linux/iova.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/smp.h> +#include <linux/bitops.h> + +static bool iova_rcache_insert(struct iova_domain *iovad, + unsigned long pfn, + unsigned long size); +static unsigned long iova_rcache_get(struct iova_domain *iovad, + unsigned long size, + unsigned long limit_pfn); +static void init_iova_rcaches(struct iova_domain *iovad); +static void free_iova_rcaches(struct iova_domain *iovad); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, @@ -38,6 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->granule = granule; iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = pfn_32bit; + init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -291,33 +303,18 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, } EXPORT_SYMBOL_GPL(alloc_iova); -/** - * find_iova - find's an iova for a given pfn - * @iovad: - iova domain in question. - * @pfn: - page frame number - * This function finds and returns an iova belonging to the - * given doamin which matches the given pfn. - */ -struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) +static struct iova * +private_find_iova(struct iova_domain *iovad, unsigned long pfn) { - unsigned long flags; - struct rb_node *node; + struct rb_node *node = iovad->rbroot.rb_node; + + assert_spin_locked(&iovad->iova_rbtree_lock); - /* Take the lock so that no other thread is manipulating the rbtree */ - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - node = iovad->rbroot.rb_node; while (node) { struct iova *iova = container_of(node, struct iova, node); /* If pfn falls within iova's range, return iova */ if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - /* We are not holding the lock while this iova - * is referenced by the caller as the same thread - * which called this function also calls __free_iova() - * and it is by design that only one thread can possibly - * reference a particular iova and hence no conflict. - */ return iova; } @@ -327,9 +324,35 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) node = node->rb_right; } - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return NULL; } + +static void private_free_iova(struct iova_domain *iovad, struct iova *iova) +{ + assert_spin_locked(&iovad->iova_rbtree_lock); + __cached_rbnode_delete_update(iovad, iova); + rb_erase(&iova->node, &iovad->rbroot); + free_iova_mem(iova); +} + +/** + * find_iova - finds an iova for a given pfn + * @iovad: - iova domain in question. + * @pfn: - page frame number + * This function finds and returns an iova belonging to the + * given doamin which matches the given pfn. + */ +struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) +{ + unsigned long flags; + struct iova *iova; + + /* Take the lock so that no other thread is manipulating the rbtree */ + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + iova = private_find_iova(iovad, pfn); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return iova; +} EXPORT_SYMBOL_GPL(find_iova); /** @@ -344,10 +367,8 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) unsigned long flags; spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - __cached_rbnode_delete_update(iovad, iova); - rb_erase(&iova->node, &iovad->rbroot); + private_free_iova(iovad, iova); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - free_iova_mem(iova); } EXPORT_SYMBOL_GPL(__free_iova); @@ -370,6 +391,63 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) EXPORT_SYMBOL_GPL(free_iova); /** + * alloc_iova_fast - allocates an iova from rcache + * @iovad: - iova domain in question + * @size: - size of page frames to allocate + * @limit_pfn: - max limit address + * This function tries to satisfy an iova allocation from the rcache, + * and falls back to regular allocation on failure. +*/ +unsigned long +alloc_iova_fast(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn) +{ + bool flushed_rcache = false; + unsigned long iova_pfn; + struct iova *new_iova; + + iova_pfn = iova_rcache_get(iovad, size, limit_pfn); + if (iova_pfn) + return iova_pfn; + +retry: + new_iova = alloc_iova(iovad, size, limit_pfn, true); + if (!new_iova) { + unsigned int cpu; + + if (flushed_rcache) + return 0; + + /* Try replenishing IOVAs by flushing rcache. */ + flushed_rcache = true; + for_each_online_cpu(cpu) + free_cpu_cached_iovas(cpu, iovad); + goto retry; + } + + return new_iova->pfn_lo; +} +EXPORT_SYMBOL_GPL(alloc_iova_fast); + +/** + * free_iova_fast - free iova pfn range into rcache + * @iovad: - iova domain in question. + * @pfn: - pfn that is allocated previously + * @size: - # of pages in range + * This functions frees an iova range by trying to put it into the rcache, + * falling back to regular iova deallocation via free_iova() if this fails. + */ +void +free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) +{ + if (iova_rcache_insert(iovad, pfn, size)) + return; + + free_iova(iovad, pfn); +} +EXPORT_SYMBOL_GPL(free_iova_fast); + +/** * put_iova_domain - destroys the iova doamin * @iovad: - iova domain in question. * All the iova's in that domain are destroyed. @@ -379,6 +457,7 @@ void put_iova_domain(struct iova_domain *iovad) struct rb_node *node; unsigned long flags; + free_iova_rcaches(iovad); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); node = rb_first(&iovad->rbroot); while (node) { @@ -550,5 +629,295 @@ error: return NULL; } +/* + * Magazine caches for IOVA ranges. For an introduction to magazines, + * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab + * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams. + * For simplicity, we use a static magazine size and don't implement the + * dynamic size tuning described in the paper. + */ + +#define IOVA_MAG_SIZE 128 + +struct iova_magazine { + unsigned long size; + unsigned long pfns[IOVA_MAG_SIZE]; +}; + +struct iova_cpu_rcache { + spinlock_t lock; + struct iova_magazine *loaded; + struct iova_magazine *prev; +}; + +static struct iova_magazine *iova_magazine_alloc(gfp_t flags) +{ + return kzalloc(sizeof(struct iova_magazine), flags); +} + +static void iova_magazine_free(struct iova_magazine *mag) +{ + kfree(mag); +} + +static void +iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) +{ + unsigned long flags; + int i; + + if (!mag) + return; + + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + + for (i = 0 ; i < mag->size; ++i) { + struct iova *iova = private_find_iova(iovad, mag->pfns[i]); + + BUG_ON(!iova); + private_free_iova(iovad, iova); + } + + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + + mag->size = 0; +} + +static bool iova_magazine_full(struct iova_magazine *mag) +{ + return (mag && mag->size == IOVA_MAG_SIZE); +} + +static bool iova_magazine_empty(struct iova_magazine *mag) +{ + return (!mag || mag->size == 0); +} + +static unsigned long iova_magazine_pop(struct iova_magazine *mag, + unsigned long limit_pfn) +{ + BUG_ON(iova_magazine_empty(mag)); + + if (mag->pfns[mag->size - 1] >= limit_pfn) + return 0; + + return mag->pfns[--mag->size]; +} + +static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) +{ + BUG_ON(iova_magazine_full(mag)); + + mag->pfns[mag->size++] = pfn; +} + +static void init_iova_rcaches(struct iova_domain *iovad) +{ + struct iova_cpu_rcache *cpu_rcache; + struct iova_rcache *rcache; + unsigned int cpu; + int i; + + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + rcache = &iovad->rcaches[i]; + spin_lock_init(&rcache->lock); + rcache->depot_size = 0; + rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); + if (WARN_ON(!rcache->cpu_rcaches)) + continue; + for_each_possible_cpu(cpu) { + cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + spin_lock_init(&cpu_rcache->lock); + cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL); + cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); + } + } +} + +/* + * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and + * return true on success. Can fail if rcache is full and we can't free + * space, and free_iova() (our only caller) will then return the IOVA + * range to the rbtree instead. + */ +static bool __iova_rcache_insert(struct iova_domain *iovad, + struct iova_rcache *rcache, + unsigned long iova_pfn) +{ + struct iova_magazine *mag_to_free = NULL; + struct iova_cpu_rcache *cpu_rcache; + bool can_insert = false; + unsigned long flags; + + cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); + spin_lock_irqsave(&cpu_rcache->lock, flags); + + if (!iova_magazine_full(cpu_rcache->loaded)) { + can_insert = true; + } else if (!iova_magazine_full(cpu_rcache->prev)) { + swap(cpu_rcache->prev, cpu_rcache->loaded); + can_insert = true; + } else { + struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC); + + if (new_mag) { + spin_lock(&rcache->lock); + if (rcache->depot_size < MAX_GLOBAL_MAGS) { + rcache->depot[rcache->depot_size++] = + cpu_rcache->loaded; + } else { + mag_to_free = cpu_rcache->loaded; + } + spin_unlock(&rcache->lock); + + cpu_rcache->loaded = new_mag; + can_insert = true; + } + } + + if (can_insert) + iova_magazine_push(cpu_rcache->loaded, iova_pfn); + + spin_unlock_irqrestore(&cpu_rcache->lock, flags); + + if (mag_to_free) { + iova_magazine_free_pfns(mag_to_free, iovad); + iova_magazine_free(mag_to_free); + } + + return can_insert; +} + +static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, + unsigned long size) +{ + unsigned int log_size = order_base_2(size); + + if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) + return false; + + return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn); +} + +/* + * Caller wants to allocate a new IOVA range from 'rcache'. If we can + * satisfy the request, return a matching non-NULL range and remove + * it from the 'rcache'. + */ +static unsigned long __iova_rcache_get(struct iova_rcache *rcache, + unsigned long limit_pfn) +{ + struct iova_cpu_rcache *cpu_rcache; + unsigned long iova_pfn = 0; + bool has_pfn = false; + unsigned long flags; + + cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); + spin_lock_irqsave(&cpu_rcache->lock, flags); + + if (!iova_magazine_empty(cpu_rcache->loaded)) { + has_pfn = true; + } else if (!iova_magazine_empty(cpu_rcache->prev)) { + swap(cpu_rcache->prev, cpu_rcache->loaded); + has_pfn = true; + } else { + spin_lock(&rcache->lock); + if (rcache->depot_size > 0) { + iova_magazine_free(cpu_rcache->loaded); + cpu_rcache->loaded = rcache->depot[--rcache->depot_size]; + has_pfn = true; + } + spin_unlock(&rcache->lock); + } + + if (has_pfn) + iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); + + spin_unlock_irqrestore(&cpu_rcache->lock, flags); + + return iova_pfn; +} + +/* + * Try to satisfy IOVA allocation range from rcache. Fail if requested + * size is too big or the DMA limit we are given isn't satisfied by the + * top element in the magazine. + */ +static unsigned long iova_rcache_get(struct iova_domain *iovad, + unsigned long size, + unsigned long limit_pfn) +{ + unsigned int log_size = order_base_2(size); + + if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) + return 0; + + return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn); +} + +/* + * Free a cpu's rcache. + */ +static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad, + struct iova_rcache *rcache) +{ + struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + unsigned long flags; + + spin_lock_irqsave(&cpu_rcache->lock, flags); + + iova_magazine_free_pfns(cpu_rcache->loaded, iovad); + iova_magazine_free(cpu_rcache->loaded); + + iova_magazine_free_pfns(cpu_rcache->prev, iovad); + iova_magazine_free(cpu_rcache->prev); + + spin_unlock_irqrestore(&cpu_rcache->lock, flags); +} + +/* + * free rcache data structures. + */ +static void free_iova_rcaches(struct iova_domain *iovad) +{ + struct iova_rcache *rcache; + unsigned long flags; + unsigned int cpu; + int i, j; + + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + rcache = &iovad->rcaches[i]; + for_each_possible_cpu(cpu) + free_cpu_iova_rcache(cpu, iovad, rcache); + spin_lock_irqsave(&rcache->lock, flags); + free_percpu(rcache->cpu_rcaches); + for (j = 0; j < rcache->depot_size; ++j) { + iova_magazine_free_pfns(rcache->depot[j], iovad); + iova_magazine_free(rcache->depot[j]); + } + spin_unlock_irqrestore(&rcache->lock, flags); + } +} + +/* + * free all the IOVA ranges cached by a cpu (used when cpu is unplugged) + */ +void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) +{ + struct iova_cpu_rcache *cpu_rcache; + struct iova_rcache *rcache; + unsigned long flags; + int i; + + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + rcache = &iovad->rcaches[i]; + cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + spin_lock_irqsave(&cpu_rcache->lock, flags); + iova_magazine_free_pfns(cpu_rcache->loaded, iovad); + iova_magazine_free_pfns(cpu_rcache->prev, iovad); + spin_unlock_irqrestore(&cpu_rcache->lock, flags); + } +} + MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index eb5eb0cd414d..2223b3f15d68 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c @@ -182,7 +182,7 @@ static int __init _clps711x_intc_init(struct device_node *np, writel_relaxed(0, clps711x_intc->intmr[2]); err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id()); - if (IS_ERR_VALUE(err)) + if (err < 0) goto out_iounmap; clps711x_intc->ops.map = clps711x_intc_irq_map; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6bd881be24ea..5eb1f9e17a98 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -41,6 +41,7 @@ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) @@ -82,6 +83,7 @@ struct its_node { u64 flags; u32 ite_size; u32 device_ids; + int numa_node; }; #define ITS_ITT_ALIGN SZ_256 @@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d) static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + /* lpi cannot be routed to a redistributor that is on a foreign node */ + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + if (its_dev->its->numa_node >= 0) { + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + if (!cpumask_intersects(mask_val, cpu_mask)) + return -EINVAL; + } + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + if (cpu >= nr_cpu_ids) return -EINVAL; @@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void) list_for_each_entry(its, &its_nodes, entry) { u64 target; + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + continue; + } + /* * We now have to bind each collection to its target * redistributor. @@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); + its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); /* Map the GIC IRQ and event to the device */ its_send_mapvi(its_dev, d->hwirq, event); @@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; } +static void __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_cavium_22375, }, #endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif { } }; @@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node, its->base = its_base; its->phys_base = res.start; its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; + its->numa_node = of_node_to_nid(node); its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); if (!its->cmd_base) { diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fb042ba9a3db..2c5ba0e704bf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable) while (count--) { val = readl_relaxed(rbase + GICR_WAKER); - if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) break; cpu_relax(); udelay(1); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index b4e647179346..fbc4ae2afd29 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1123,7 +1123,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); - if (IS_ERR_VALUE(irq_base)) { + if (irq_base < 0) { WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", irq_start); irq_base = irq_start; diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 9688d2e2a636..9e25d8ce08e5 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -402,7 +402,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent) nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */ irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); - if (IS_ERR_VALUE(irq_base)) { + if (irq_base < 0) { pr_err("failed to allocate IRQ numbers\n"); return -EINVAL; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index c089f49b63fb..3b5e10aa48ab 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -968,7 +968,7 @@ static void __init __gic_init(unsigned long gic_base_addr, unsigned int cpu_vec, unsigned int irqbase, struct device_node *node) { - unsigned int gicconfig; + unsigned int gicconfig, cpu; unsigned int v[2]; __gic_base_addr = gic_base_addr; @@ -985,6 +985,14 @@ static void __init __gic_init(unsigned long gic_base_addr, gic_vpes = gic_vpes + 1; if (cpu_has_veic) { + /* Set EIC mode for all VPEs */ + for_each_present_cpu(cpu) { + gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), + mips_cm_vp_id(cpu)); + gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL), + GIC_VPE_CTL_EIC_MODE_MSK); + } + /* Always use vector 1 in EIC mode */ gic_cpu_pin = 0; timer_cpu_pin = gic_cpu_pin; diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c index e7155db01d55..73addb4b625b 100644 --- a/drivers/irqchip/irq-pic32-evic.c +++ b/drivers/irqchip/irq-pic32-evic.c @@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data, /* set polarity for external interrupts only */ for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { if (priv->ext_irqs[i] == data->hwirq) { - ret = pic32_set_ext_polarity(i + 1, flow_type); + ret = pic32_set_ext_polarity(i, flow_type); if (ret) return ret; } diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 1ccd2abed65f..1518ba31a80c 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -232,7 +232,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr, nr_irqs += shirq_blocks[i]->nr_irqs; virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); - if (IS_ERR_VALUE(virq_base)) { + if (virq_base < 0) { pr_err("%s: irq desc alloc failed\n", __func__); goto err_unmap; } diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 4783bacb2e9d..a9145aa7f36a 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -91,6 +91,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, struct led_pwm *led, struct device_node *child) { struct led_pwm_data *led_data = &priv->leds[priv->num_leds]; + struct pwm_args pargs; int ret; led_data->active_low = led->active_low; @@ -117,7 +118,15 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, else led_data->cdev.brightness_set_blocking = led_pwm_set_blocking; - led_data->period = pwm_get_period(led_data->pwm); + /* + * FIXME: pwm_apply_args() should be removed when switching to the + * atomic PWM API. + */ + pwm_apply_args(led_data->pwm); + + pwm_get_args(led_data->pwm, &pargs); + + led_data->period = pargs.period; if (!led_data->period && (led->pwm_period_ns > 0)) led_data->period = led->pwm_period_ns; diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 8eeab72b93e2..ca4abe1ccd8d 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -64,7 +64,6 @@ #include "btree.h" #include <linux/blkdev.h> -#include <linux/freezer.h> #include <linux/kthread.h> #include <linux/random.h> #include <trace/events/bcache.h> @@ -288,7 +287,6 @@ do { \ if (kthread_should_stop()) \ return 0; \ \ - try_to_freeze(); \ schedule(); \ mutex_lock(&(ca)->set->bucket_lock); \ } \ diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 22b9e34ceb75..eab505ee0027 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -27,7 +27,6 @@ #include <linux/slab.h> #include <linux/bitops.h> -#include <linux/freezer.h> #include <linux/hash.h> #include <linux/kthread.h> #include <linux/prefetch.h> @@ -1787,7 +1786,6 @@ again: mutex_unlock(&c->bucket_lock); - try_to_freeze(); schedule(); } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index b9346cd9cda1..60123677b382 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -12,7 +12,6 @@ #include "writeback.h" #include <linux/delay.h> -#include <linux/freezer.h> #include <linux/kthread.h> #include <trace/events/bcache.h> @@ -228,7 +227,6 @@ static void read_dirty(struct cached_dev *dc) */ while (!kthread_should_stop()) { - try_to_freeze(); w = bch_keybuf_next(&dc->writeback_keys); if (!w) @@ -433,7 +431,6 @@ static int bch_writeback_thread(void *arg) if (kthread_should_stop()) return 0; - try_to_freeze(); schedule(); continue; } diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c index 9e1731c565e7..e191e295c951 100644 --- a/drivers/media/i2c/adp1653.c +++ b/drivers/media/i2c/adp1653.c @@ -95,7 +95,7 @@ static int adp1653_get_fault(struct adp1653_flash *flash) int rval; fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT); - if (IS_ERR_VALUE(fault)) + if (fault < 0) return fault; flash->fault |= fault; @@ -105,13 +105,13 @@ static int adp1653_get_fault(struct adp1653_flash *flash) /* Clear faults. */ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); - if (IS_ERR_VALUE(rval)) + if (rval < 0) return rval; flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE; rval = adp1653_update_hw(flash); - if (IS_ERR_VALUE(rval)) + if (rval) return rval; return flash->fault; @@ -158,7 +158,7 @@ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl) int rval; rval = adp1653_get_fault(flash); - if (IS_ERR_VALUE(rval)) + if (rval) return rval; ctrl->cur.val = 0; @@ -184,7 +184,7 @@ static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl) int rval; rval = adp1653_get_fault(flash); - if (IS_ERR_VALUE(rval)) + if (rval) return rval; if ((rval & (ADP1653_REG_FAULT_FLT_SCP | ADP1653_REG_FAULT_FLT_OT | diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c index 5ef67774971d..8a5d19469ddc 100644 --- a/drivers/media/platform/s5p-tv/mixer_drv.c +++ b/drivers/media/platform/s5p-tv/mixer_drv.c @@ -146,7 +146,7 @@ int mxr_power_get(struct mxr_device *mdev) /* returning 1 means that power is already enabled, * so zero success be returned */ - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; return 0; } diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c index 95a7388e89d4..09e0f58f6bb7 100644 --- a/drivers/media/usb/dvb-usb-v2/af9015.c +++ b/drivers/media/usb/dvb-usb-v2/af9015.c @@ -398,6 +398,8 @@ error: } #define AF9015_EEPROM_SIZE 256 +/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ +#define GOLDEN_RATIO_PRIME_32 0x9e370001UL /* hash (and dump) eeprom */ static int af9015_eeprom_hash(struct dvb_usb_device *d) diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index c61a284133e0..81ddb17575a9 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -51,6 +51,7 @@ config TI_EMIF config OMAP_GPMC bool + select GPIOLIB help This driver is for the General Purpose Memory Controller (GPMC) present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 2a691da8c1c7..904b4af5f142 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -59,11 +59,11 @@ int fsl_ifc_find(phys_addr_t addr_base) { int i = 0; - if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) + if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs) return -ENODEV; for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { - u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); + u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr); if (cspr & CSPR_V && (cspr & CSPR_BA) == convert_ifc_address(addr_base)) return i; @@ -75,7 +75,7 @@ EXPORT_SYMBOL(fsl_ifc_find); static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) { - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_global __iomem *ifc = ctrl->gregs; /* * Clear all the common status and event registers @@ -104,7 +104,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) irq_dispose_mapping(ctrl->nand_irq); irq_dispose_mapping(ctrl->irq); - iounmap(ctrl->regs); + iounmap(ctrl->gregs); dev_set_drvdata(&dev->dev, NULL); kfree(ctrl); @@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(nand_irq_lock); static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) { - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; unsigned long flags; u32 stat; @@ -157,7 +157,7 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) { struct fsl_ifc_ctrl *ctrl = data; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_global __iomem *ifc = ctrl->gregs; u32 err_axiid, err_srcid, status, cs_err, err_addr; irqreturn_t ret = IRQ_NONE; @@ -215,6 +215,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) { int ret = 0; int version, banks; + void __iomem *addr; dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); @@ -225,22 +226,13 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); /* IOMAP the entire IFC region */ - fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); - if (!fsl_ifc_ctrl_dev->regs) { + fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); + if (!fsl_ifc_ctrl_dev->gregs) { dev_err(&dev->dev, "failed to get memory region\n"); ret = -ENODEV; goto err; } - version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) & - FSL_IFC_VERSION_MASK; - banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; - dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", - version >> 24, (version >> 16) & 0xf, banks); - - fsl_ifc_ctrl_dev->version = version; - fsl_ifc_ctrl_dev->banks = banks; - if (of_property_read_bool(dev->dev.of_node, "little-endian")) { fsl_ifc_ctrl_dev->little_endian = true; dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); @@ -249,8 +241,9 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); } - version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) & + version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) & FSL_IFC_VERSION_MASK; + banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", version >> 24, (version >> 16) & 0xf, banks); @@ -258,6 +251,13 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) fsl_ifc_ctrl_dev->version = version; fsl_ifc_ctrl_dev->banks = banks; + addr = fsl_ifc_ctrl_dev->gregs; + if (version >= FSL_IFC_VERSION_2_0_0) + addr += PGOFFSET_64K; + else + addr += PGOFFSET_4K; + fsl_ifc_ctrl_dev->rregs = addr; + /* get the Controller level irq */ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); if (fsl_ifc_ctrl_dev->irq == 0) { diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 21825ddce4a3..af4884ba6b7c 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -21,15 +21,15 @@ #include <linux/spinlock.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/gpio/driver.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_mtd.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/omap-gpmc.h> -#include <linux/mtd/nand.h> #include <linux/pm_runtime.h> #include <linux/platform_data/mtd-nand-omap2.h> @@ -81,6 +81,8 @@ #define GPMC_CONFIG_LIMITEDADDRESS BIT(1) +#define GPMC_STATUS_EMPTYWRITEBUFFERSTATUS BIT(0) + #define GPMC_CONFIG2_CSEXTRADELAY BIT(7) #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7) #define GPMC_CONFIG4_OEEXTRADELAY BIT(7) @@ -92,6 +94,14 @@ #define GPMC_CS_SIZE 0x30 #define GPMC_BCH_SIZE 0x10 +/* + * The first 1MB of GPMC address space is typically mapped to + * the internal ROM. Never allocate the first page, to + * facilitate bug detection; even if we didn't boot from ROM. + * As GPMC minimum partition size is 16MB we can only start from + * there. + */ +#define GPMC_MEM_START 0x1000000 #define GPMC_MEM_END 0x3FFFFFFF #define GPMC_CHUNK_SHIFT 24 /* 16 MB */ @@ -125,7 +135,6 @@ #define GPMC_CONFIG_RDY_BSY 0x00000001 #define GPMC_CONFIG_DEV_SIZE 0x00000002 #define GPMC_CONFIG_DEV_TYPE 0x00000003 -#define GPMC_SET_IRQ_STATUS 0x00000004 #define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31) #define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30) @@ -174,16 +183,12 @@ #define GPMC_CONFIG_WRITEPROTECT 0x00000010 #define WR_RD_PIN_MONITORING 0x00600000 -#define GPMC_ENABLE_IRQ 0x0000000d - /* ECC commands */ #define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */ #define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */ #define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */ -/* XXX: Only NAND irq has been considered,currently these are the only ones used - */ -#define GPMC_NR_IRQ 2 +#define GPMC_NR_NAND_IRQS 2 /* number of NAND specific IRQs */ enum gpmc_clk_domain { GPMC_CD_FCLK, @@ -199,11 +204,6 @@ struct gpmc_cs_data { struct resource mem; }; -struct gpmc_client_irq { - unsigned irq; - u32 bitmask; -}; - /* Structure to save gpmc cs context */ struct gpmc_cs_config { u32 config1; @@ -231,9 +231,15 @@ struct omap3_gpmc_regs { struct gpmc_cs_config cs_context[GPMC_CS_NUM]; }; -static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; -static struct irq_chip gpmc_irq_chip; -static int gpmc_irq_start; +struct gpmc_device { + struct device *dev; + int irq; + struct irq_chip irq_chip; + struct gpio_chip gpio_chip; + int nirqs; +}; + +static struct irq_domain *gpmc_irq_domain; static struct resource gpmc_mem_root; static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM]; @@ -241,8 +247,6 @@ static DEFINE_SPINLOCK(gpmc_mem_lock); /* Define chip-selects as reserved by default until probe completes */ static unsigned int gpmc_cs_num = GPMC_CS_NUM; static unsigned int gpmc_nr_waitpins; -static struct device *gpmc_dev; -static int gpmc_irq; static resource_size_t phys_base, mem_size; static unsigned gpmc_capability; static void __iomem *gpmc_base; @@ -1054,14 +1058,6 @@ int gpmc_configure(int cmd, int wval) u32 regval; switch (cmd) { - case GPMC_ENABLE_IRQ: - gpmc_write_reg(GPMC_IRQENABLE, wval); - break; - - case GPMC_SET_IRQ_STATUS: - gpmc_write_reg(GPMC_IRQSTATUS, wval); - break; - case GPMC_CONFIG_WP: regval = gpmc_read_reg(GPMC_CONFIG); if (wval) @@ -1084,7 +1080,7 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) { int i; - reg->gpmc_status = gpmc_base + GPMC_STATUS; + reg->gpmc_status = NULL; /* deprecated */ reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET + GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs; reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET + @@ -1118,87 +1114,201 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs) } } -int gpmc_get_client_irq(unsigned irq_config) +static bool gpmc_nand_writebuffer_empty(void) { - int i; + if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS) + return true; - if (hweight32(irq_config) > 1) + return false; +} + +static struct gpmc_nand_ops nand_ops = { + .nand_writebuffer_empty = gpmc_nand_writebuffer_empty, +}; + +/** + * gpmc_omap_get_nand_ops - Get the GPMC NAND interface + * @regs: the GPMC NAND register map exclusive for NAND use. + * @cs: GPMC chip select number on which the NAND sits. The + * register map returned will be specific to this chip select. + * + * Returns NULL on error e.g. invalid cs. + */ +struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs) +{ + if (cs >= gpmc_cs_num) + return NULL; + + gpmc_update_nand_reg(reg, cs); + + return &nand_ops; +} +EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops); + +int gpmc_get_client_irq(unsigned irq_config) +{ + if (!gpmc_irq_domain) { + pr_warn("%s called before GPMC IRQ domain available\n", + __func__); return 0; + } - for (i = 0; i < GPMC_NR_IRQ; i++) - if (gpmc_client_irq[i].bitmask & irq_config) - return gpmc_client_irq[i].irq; + /* we restrict this to NAND IRQs only */ + if (irq_config >= GPMC_NR_NAND_IRQS) + return 0; - return 0; + return irq_create_mapping(gpmc_irq_domain, irq_config); } -static int gpmc_irq_endis(unsigned irq, bool endis) +static int gpmc_irq_endis(unsigned long hwirq, bool endis) { - int i; u32 regval; - for (i = 0; i < GPMC_NR_IRQ; i++) - if (irq == gpmc_client_irq[i].irq) { - regval = gpmc_read_reg(GPMC_IRQENABLE); - if (endis) - regval |= gpmc_client_irq[i].bitmask; - else - regval &= ~gpmc_client_irq[i].bitmask; - gpmc_write_reg(GPMC_IRQENABLE, regval); - break; - } + /* bits GPMC_NR_NAND_IRQS to 8 are reserved */ + if (hwirq >= GPMC_NR_NAND_IRQS) + hwirq += 8 - GPMC_NR_NAND_IRQS; + + regval = gpmc_read_reg(GPMC_IRQENABLE); + if (endis) + regval |= BIT(hwirq); + else + regval &= ~BIT(hwirq); + gpmc_write_reg(GPMC_IRQENABLE, regval); return 0; } static void gpmc_irq_disable(struct irq_data *p) { - gpmc_irq_endis(p->irq, false); + gpmc_irq_endis(p->hwirq, false); } static void gpmc_irq_enable(struct irq_data *p) { - gpmc_irq_endis(p->irq, true); + gpmc_irq_endis(p->hwirq, true); } -static void gpmc_irq_noop(struct irq_data *data) { } +static void gpmc_irq_mask(struct irq_data *d) +{ + gpmc_irq_endis(d->hwirq, false); +} -static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } +static void gpmc_irq_unmask(struct irq_data *d) +{ + gpmc_irq_endis(d->hwirq, true); +} -static int gpmc_setup_irq(void) +static void gpmc_irq_edge_config(unsigned long hwirq, bool rising_edge) { - int i; u32 regval; - if (!gpmc_irq) + /* NAND IRQs polarity is not configurable */ + if (hwirq < GPMC_NR_NAND_IRQS) + return; + + /* WAITPIN starts at BIT 8 */ + hwirq += 8 - GPMC_NR_NAND_IRQS; + + regval = gpmc_read_reg(GPMC_CONFIG); + if (rising_edge) + regval &= ~BIT(hwirq); + else + regval |= BIT(hwirq); + + gpmc_write_reg(GPMC_CONFIG, regval); +} + +static void gpmc_irq_ack(struct irq_data *d) +{ + unsigned int hwirq = d->hwirq; + + /* skip reserved bits */ + if (hwirq >= GPMC_NR_NAND_IRQS) + hwirq += 8 - GPMC_NR_NAND_IRQS; + + /* Setting bit to 1 clears (or Acks) the interrupt */ + gpmc_write_reg(GPMC_IRQSTATUS, BIT(hwirq)); +} + +static int gpmc_irq_set_type(struct irq_data *d, unsigned int trigger) +{ + /* can't set type for NAND IRQs */ + if (d->hwirq < GPMC_NR_NAND_IRQS) return -EINVAL; - gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); - if (gpmc_irq_start < 0) { - pr_err("irq_alloc_descs failed\n"); - return gpmc_irq_start; + /* We can support either rising or falling edge at a time */ + if (trigger == IRQ_TYPE_EDGE_FALLING) + gpmc_irq_edge_config(d->hwirq, false); + else if (trigger == IRQ_TYPE_EDGE_RISING) + gpmc_irq_edge_config(d->hwirq, true); + else + return -EINVAL; + + return 0; +} + +static int gpmc_irq_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + struct gpmc_device *gpmc = d->host_data; + + irq_set_chip_data(virq, gpmc); + if (hw < GPMC_NR_NAND_IRQS) { + irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN); + irq_set_chip_and_handler(virq, &gpmc->irq_chip, + handle_simple_irq); + } else { + irq_set_chip_and_handler(virq, &gpmc->irq_chip, + handle_edge_irq); } - gpmc_irq_chip.name = "gpmc"; - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; - gpmc_irq_chip.irq_enable = gpmc_irq_enable; - gpmc_irq_chip.irq_disable = gpmc_irq_disable; - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; - gpmc_irq_chip.irq_ack = gpmc_irq_noop; - gpmc_irq_chip.irq_mask = gpmc_irq_noop; - gpmc_irq_chip.irq_unmask = gpmc_irq_noop; - - gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; - gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; - - for (i = 0; i < GPMC_NR_IRQ; i++) { - gpmc_client_irq[i].irq = gpmc_irq_start + i; - irq_set_chip_and_handler(gpmc_client_irq[i].irq, - &gpmc_irq_chip, handle_simple_irq); - irq_modify_status(gpmc_client_irq[i].irq, IRQ_NOREQUEST, - IRQ_NOAUTOEN); + return 0; +} + +static const struct irq_domain_ops gpmc_irq_domain_ops = { + .map = gpmc_irq_map, + .xlate = irq_domain_xlate_twocell, +}; + +static irqreturn_t gpmc_handle_irq(int irq, void *data) +{ + int hwirq, virq; + u32 regval, regvalx; + struct gpmc_device *gpmc = data; + + regval = gpmc_read_reg(GPMC_IRQSTATUS); + regvalx = regval; + + if (!regval) + return IRQ_NONE; + + for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) { + /* skip reserved status bits */ + if (hwirq == GPMC_NR_NAND_IRQS) + regvalx >>= 8 - GPMC_NR_NAND_IRQS; + + if (regvalx & BIT(hwirq)) { + virq = irq_find_mapping(gpmc_irq_domain, hwirq); + if (!virq) { + dev_warn(gpmc->dev, + "spurious irq detected hwirq %d, virq %d\n", + hwirq, virq); + } + + generic_handle_irq(virq); + } } + gpmc_write_reg(GPMC_IRQSTATUS, regval); + + return IRQ_HANDLED; +} + +static int gpmc_setup_irq(struct gpmc_device *gpmc) +{ + u32 regval; + int rc; + /* Disable interrupts */ gpmc_write_reg(GPMC_IRQENABLE, 0); @@ -1206,22 +1316,45 @@ static int gpmc_setup_irq(void) regval = gpmc_read_reg(GPMC_IRQSTATUS); gpmc_write_reg(GPMC_IRQSTATUS, regval); - return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL); + gpmc->irq_chip.name = "gpmc"; + gpmc->irq_chip.irq_enable = gpmc_irq_enable; + gpmc->irq_chip.irq_disable = gpmc_irq_disable; + gpmc->irq_chip.irq_ack = gpmc_irq_ack; + gpmc->irq_chip.irq_mask = gpmc_irq_mask; + gpmc->irq_chip.irq_unmask = gpmc_irq_unmask; + gpmc->irq_chip.irq_set_type = gpmc_irq_set_type; + + gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node, + gpmc->nirqs, + &gpmc_irq_domain_ops, + gpmc); + if (!gpmc_irq_domain) { + dev_err(gpmc->dev, "IRQ domain add failed\n"); + return -ENODEV; + } + + rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc); + if (rc) { + dev_err(gpmc->dev, "failed to request irq %d: %d\n", + gpmc->irq, rc); + irq_domain_remove(gpmc_irq_domain); + gpmc_irq_domain = NULL; + } + + return rc; } -static int gpmc_free_irq(void) +static int gpmc_free_irq(struct gpmc_device *gpmc) { - int i; + int hwirq; - if (gpmc_irq) - free_irq(gpmc_irq, NULL); + free_irq(gpmc->irq, gpmc); - for (i = 0; i < GPMC_NR_IRQ; i++) { - irq_set_handler(gpmc_client_irq[i].irq, NULL); - irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip); - } + for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) + irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq)); - irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ); + irq_domain_remove(gpmc_irq_domain); + gpmc_irq_domain = NULL; return 0; } @@ -1242,12 +1375,7 @@ static void gpmc_mem_init(void) { int cs; - /* - * The first 1MB of GPMC address space is typically mapped to - * the internal ROM. Never allocate the first page, to - * facilitate bug detection; even if we didn't boot from ROM. - */ - gpmc_mem_root.start = SZ_1M; + gpmc_mem_root.start = GPMC_MEM_START; gpmc_mem_root.end = GPMC_MEM_END; /* Reserve all regions that has been set up by bootloader */ @@ -1796,105 +1924,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np, of_property_read_bool(np, "gpmc,time-para-granularity"); } -#if IS_ENABLED(CONFIG_MTD_NAND) - -static const char * const nand_xfer_types[] = { - [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", - [NAND_OMAP_POLLED] = "polled", - [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma", - [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq", -}; - -static int gpmc_probe_nand_child(struct platform_device *pdev, - struct device_node *child) -{ - u32 val; - const char *s; - struct gpmc_timings gpmc_t; - struct omap_nand_platform_data *gpmc_nand_data; - - if (of_property_read_u32(child, "reg", &val) < 0) { - dev_err(&pdev->dev, "%s has no 'reg' property\n", - child->full_name); - return -ENODEV; - } - - gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data), - GFP_KERNEL); - if (!gpmc_nand_data) - return -ENOMEM; - - gpmc_nand_data->cs = val; - gpmc_nand_data->of_node = child; - - /* Detect availability of ELM module */ - gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0); - if (gpmc_nand_data->elm_of_node == NULL) - gpmc_nand_data->elm_of_node = - of_parse_phandle(child, "elm_id", 0); - - /* select ecc-scheme for NAND */ - if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) { - pr_err("%s: ti,nand-ecc-opt not found\n", __func__); - return -ENODEV; - } - - if (!strcmp(s, "sw")) - gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW; - else if (!strcmp(s, "ham1") || - !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) - gpmc_nand_data->ecc_opt = - OMAP_ECC_HAM1_CODE_HW; - else if (!strcmp(s, "bch4")) - if (gpmc_nand_data->elm_of_node) - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH4_CODE_HW; - else - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH4_CODE_HW_DETECTION_SW; - else if (!strcmp(s, "bch8")) - if (gpmc_nand_data->elm_of_node) - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH8_CODE_HW; - else - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; - else if (!strcmp(s, "bch16")) - if (gpmc_nand_data->elm_of_node) - gpmc_nand_data->ecc_opt = - OMAP_ECC_BCH16_CODE_HW; - else - pr_err("%s: BCH16 requires ELM support\n", __func__); - else - pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__); - - /* select data transfer mode for NAND controller */ - if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) - for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++) - if (!strcasecmp(s, nand_xfer_types[val])) { - gpmc_nand_data->xfer_type = val; - break; - } - - gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child); - - val = of_get_nand_bus_width(child); - if (val == 16) - gpmc_nand_data->devsize = NAND_BUSWIDTH_16; - - gpmc_read_timings_dt(child, &gpmc_t); - gpmc_nand_init(gpmc_nand_data, &gpmc_t); - - return 0; -} -#else -static int gpmc_probe_nand_child(struct platform_device *pdev, - struct device_node *child) -{ - return 0; -} -#endif - #if IS_ENABLED(CONFIG_MTD_ONENAND) static int gpmc_probe_onenand_child(struct platform_device *pdev, struct device_node *child) @@ -1950,6 +1979,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, const char *name; int ret, cs; u32 val; + struct gpio_desc *waitpin_desc = NULL; + struct gpmc_device *gpmc = platform_get_drvdata(pdev); if (of_property_read_u32(child, "reg", &cs) < 0) { dev_err(&pdev->dev, "%s has no 'reg' property\n", @@ -2010,23 +2041,80 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, if (ret < 0) { dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n", cs, &res.start); + if (res.start < GPMC_MEM_START) { + dev_info(&pdev->dev, + "GPMC CS %d start cannot be lesser than 0x%x\n", + cs, GPMC_MEM_START); + } else if (res.end > GPMC_MEM_END) { + dev_info(&pdev->dev, + "GPMC CS %d end cannot be greater than 0x%x\n", + cs, GPMC_MEM_END); + } goto err; } - ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width); - if (ret < 0) - goto err; + if (of_node_cmp(child->name, "nand") == 0) { + /* Warn about older DT blobs with no compatible property */ + if (!of_property_read_bool(child, "compatible")) { + dev_warn(&pdev->dev, + "Incompatible NAND node: missing compatible"); + ret = -EINVAL; + goto err; + } + } + + if (of_device_is_compatible(child, "ti,omap2-nand")) { + /* NAND specific setup */ + val = 8; + of_property_read_u32(child, "nand-bus-width", &val); + switch (val) { + case 8: + gpmc_s.device_width = GPMC_DEVWIDTH_8BIT; + break; + case 16: + gpmc_s.device_width = GPMC_DEVWIDTH_16BIT; + break; + default: + dev_err(&pdev->dev, "%s: invalid 'nand-bus-width'\n", + child->name); + ret = -EINVAL; + goto err; + } + + /* disable write protect */ + gpmc_configure(GPMC_CONFIG_WP, 0); + gpmc_s.device_nand = true; + } else { + ret = of_property_read_u32(child, "bank-width", + &gpmc_s.device_width); + if (ret < 0) + goto err; + } + + /* Reserve wait pin if it is required and valid */ + if (gpmc_s.wait_on_read || gpmc_s.wait_on_write) { + unsigned int wait_pin = gpmc_s.wait_pin; + + waitpin_desc = gpiochip_request_own_desc(&gpmc->gpio_chip, + wait_pin, "WAITPIN"); + if (IS_ERR(waitpin_desc)) { + dev_err(&pdev->dev, "invalid wait-pin: %d\n", wait_pin); + ret = PTR_ERR(waitpin_desc); + goto err; + } + } gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings"); + ret = gpmc_cs_program_settings(cs, &gpmc_s); if (ret < 0) - goto err; + goto err_cs; ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s); if (ret) { dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n", child->name); - goto err; + goto err_cs; } /* Clear limited address i.e. enable A26-A11 */ @@ -2057,16 +2145,81 @@ err_child_fail: dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name); ret = -ENODEV; +err_cs: + if (waitpin_desc) + gpiochip_free_own_desc(waitpin_desc); + err: gpmc_cs_free(cs); return ret; } +static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) +{ + return 1; /* we're input only */ +} + +static int gpmc_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + return 0; /* we're input only */ +} + +static int gpmc_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + return -EINVAL; /* we're input only */ +} + +static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ +} + +static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + u32 reg; + + offset += 8; + + reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset); + + return !!reg; +} + +static int gpmc_gpio_init(struct gpmc_device *gpmc) +{ + int ret; + + gpmc->gpio_chip.parent = gpmc->dev; + gpmc->gpio_chip.owner = THIS_MODULE; + gpmc->gpio_chip.label = DEVICE_NAME; + gpmc->gpio_chip.ngpio = gpmc_nr_waitpins; + gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction; + gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input; + gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output; + gpmc->gpio_chip.set = gpmc_gpio_set; + gpmc->gpio_chip.get = gpmc_gpio_get; + gpmc->gpio_chip.base = -1; + + ret = gpiochip_add(&gpmc->gpio_chip); + if (ret < 0) { + dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret); + return ret; + } + + return 0; +} + +static void gpmc_gpio_exit(struct gpmc_device *gpmc) +{ + gpiochip_remove(&gpmc->gpio_chip); +} + static int gpmc_probe_dt(struct platform_device *pdev) { int ret; - struct device_node *child; const struct of_device_id *of_id = of_match_device(gpmc_dt_ids, &pdev->dev); @@ -2094,17 +2247,26 @@ static int gpmc_probe_dt(struct platform_device *pdev) return ret; } + return 0; +} + +static int gpmc_probe_dt_children(struct platform_device *pdev) +{ + int ret; + struct device_node *child; + for_each_available_child_of_node(pdev->dev.of_node, child) { if (!child->name) continue; - if (of_node_cmp(child->name, "nand") == 0) - ret = gpmc_probe_nand_child(pdev, child); - else if (of_node_cmp(child->name, "onenand") == 0) + if (of_node_cmp(child->name, "onenand") == 0) ret = gpmc_probe_onenand_child(pdev, child); else ret = gpmc_probe_generic_child(pdev, child); + + if (ret) + return ret; } return 0; @@ -2114,6 +2276,11 @@ static int gpmc_probe_dt(struct platform_device *pdev) { return 0; } + +static int gpmc_probe_dt_children(struct platform_device *pdev) +{ + return 0; +} #endif static int gpmc_probe(struct platform_device *pdev) @@ -2121,6 +2288,14 @@ static int gpmc_probe(struct platform_device *pdev) int rc; u32 l; struct resource *res; + struct gpmc_device *gpmc; + + gpmc = devm_kzalloc(&pdev->dev, sizeof(*gpmc), GFP_KERNEL); + if (!gpmc) + return -ENOMEM; + + gpmc->dev = &pdev->dev; + platform_set_drvdata(pdev, gpmc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) @@ -2134,15 +2309,16 @@ static int gpmc_probe(struct platform_device *pdev) return PTR_ERR(gpmc_base); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res == NULL) - dev_warn(&pdev->dev, "Failed to get resource: irq\n"); - else - gpmc_irq = res->start; + if (!res) { + dev_err(&pdev->dev, "Failed to get resource: irq\n"); + return -ENOENT; + } + + gpmc->irq = res->start; gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(gpmc_l3_clk)) { dev_err(&pdev->dev, "Failed to get GPMC fck\n"); - gpmc_irq = 0; return PTR_ERR(gpmc_l3_clk); } @@ -2151,11 +2327,18 @@ static int gpmc_probe(struct platform_device *pdev) return -EINVAL; } + if (pdev->dev.of_node) { + rc = gpmc_probe_dt(pdev); + if (rc) + return rc; + } else { + gpmc_cs_num = GPMC_CS_NUM; + gpmc_nr_waitpins = GPMC_NR_WAITPINS; + } + pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - gpmc_dev = &pdev->dev; - l = gpmc_read_reg(GPMC_REVISION); /* @@ -2174,36 +2357,51 @@ static int gpmc_probe(struct platform_device *pdev) gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS; if (GPMC_REVISION_MAJOR(l) > 0x5) gpmc_capability |= GPMC_HAS_MUX_AAD; - dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), + dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), GPMC_REVISION_MINOR(l)); gpmc_mem_init(); - - if (gpmc_setup_irq() < 0) - dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); - - if (!pdev->dev.of_node) { - gpmc_cs_num = GPMC_CS_NUM; - gpmc_nr_waitpins = GPMC_NR_WAITPINS; + rc = gpmc_gpio_init(gpmc); + if (rc) + goto gpio_init_failed; + + gpmc->nirqs = GPMC_NR_NAND_IRQS + gpmc_nr_waitpins; + rc = gpmc_setup_irq(gpmc); + if (rc) { + dev_err(gpmc->dev, "gpmc_setup_irq failed\n"); + goto setup_irq_failed; } - rc = gpmc_probe_dt(pdev); + rc = gpmc_probe_dt_children(pdev); if (rc < 0) { - pm_runtime_put_sync(&pdev->dev); - dev_err(gpmc_dev, "failed to probe DT parameters\n"); - return rc; + dev_err(gpmc->dev, "failed to probe DT children\n"); + goto dt_children_failed; } return 0; + +dt_children_failed: + gpmc_free_irq(gpmc); +setup_irq_failed: + gpmc_gpio_exit(gpmc); +gpio_init_failed: + gpmc_mem_exit(); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return rc; } static int gpmc_remove(struct platform_device *pdev) { - gpmc_free_irq(); + struct gpmc_device *gpmc = platform_get_drvdata(pdev); + + gpmc_free_irq(gpmc); + gpmc_gpio_exit(gpmc); gpmc_mem_exit(); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - gpmc_dev = NULL; + return 0; } @@ -2249,25 +2447,6 @@ static __exit void gpmc_exit(void) postcore_initcall(gpmc_init); module_exit(gpmc_exit); -static irqreturn_t gpmc_handle_irq(int irq, void *dev) -{ - int i; - u32 regval; - - regval = gpmc_read_reg(GPMC_IRQSTATUS); - - if (!regval) - return IRQ_NONE; - - for (i = 0; i < GPMC_NR_IRQ; i++) - if (regval & gpmc_client_irq[i].bitmask) - generic_handle_irq(gpmc_client_irq[i].irq); - - gpmc_write_reg(GPMC_IRQSTATUS, regval); - - return IRQ_HANDLED; -} - static struct omap3_gpmc_regs gpmc_context; void omap3_gpmc_save_context(void) diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 40e51b0baa46..b46c0cfc27d9 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -696,7 +696,7 @@ int twl4030_init_irq(struct device *dev, int irq_num) nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS; irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); - if (IS_ERR_VALUE(irq_base)) { + if (irq_base < 0) { dev_err(dev, "Fail to allocate IRQ descs\n"); return irq_base; } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index ddc96206288a..e62fde3ac431 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -618,6 +618,10 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, ioc_err = __mmc_blk_ioctl_cmd(card, md, idata); + /* Always switch back to main area after RPMB access */ + if (md->area_type & MMC_BLK_DATA_AREA_RPMB) + mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); + mmc_put_card(card); err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); @@ -685,6 +689,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, for (i = 0; i < num_of_cmds && !ioc_err; i++) ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]); + /* Always switch back to main area after RPMB access */ + if (md->area_type & MMC_BLK_DATA_AREA_RPMB) + mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); + mmc_put_card(card); /* copy to user if data and response */ @@ -748,16 +756,25 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; + if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) + mmc_retune_pause(card->host); + part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; part_config |= md->part_type; ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); - if (ret) + if (ret) { + if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) + mmc_retune_unpause(card->host); return ret; + } card->ext_csd.part_config = part_config; + + if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) + mmc_retune_unpause(card->host); } main_md->part_curr = md->part_type; @@ -2519,11 +2536,12 @@ static const struct mmc_fixup blk_fixups[] = MMC_QUIRK_BLK_NO_CMD23), /* - * Some Micron MMC cards needs longer data read timeout than - * indicated in CSD. + * Some MMC cards need longer data read timeout than indicated in CSD. */ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_LONG_READ_TIME), /* * On these Samsung MoviNAND parts, performing secure erase or diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 99275e40bf2f..8b4dfd45433b 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -875,11 +875,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) /* * Some cards require longer data read timeout than indicated in CSD. * Address this by setting the read timeout to a "reasonably high" - * value. For the cards tested, 300ms has proven enough. If necessary, + * value. For the cards tested, 600ms has proven enough. If necessary, * this value can be increased if other problematic cards require this. */ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { - data->timeout_ns = 300000000; + data->timeout_ns = 600000000; data->timeout_clks = 0; } diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index e0a3ee16c0d3..1be42fab1a30 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -68,8 +68,32 @@ void mmc_retune_enable(struct mmc_host *host) jiffies + host->retune_period * HZ); } +/* + * Pause re-tuning for a small set of operations. The pause begins after the + * next command and after first doing re-tuning. + */ +void mmc_retune_pause(struct mmc_host *host) +{ + if (!host->retune_paused) { + host->retune_paused = 1; + mmc_retune_needed(host); + mmc_retune_hold(host); + } +} +EXPORT_SYMBOL(mmc_retune_pause); + +void mmc_retune_unpause(struct mmc_host *host) +{ + if (host->retune_paused) { + host->retune_paused = 0; + mmc_retune_release(host); + } +} +EXPORT_SYMBOL(mmc_retune_unpause); + void mmc_retune_disable(struct mmc_host *host) { + mmc_retune_unpause(host); host->can_retune = 0; del_timer_sync(&host->retune_timer); host->retune_now = 0; diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b81b08f81325..5d438ad3ee32 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card) * switch to HS200 mode if bus width is set successfully. */ err = mmc_select_bus_width(card); - if (!IS_ERR_VALUE(err)) { + if (err >= 0) { val = EXT_CSD_TIMING_HS200 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } else if (mmc_card_hs(card)) { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); - if (!IS_ERR_VALUE(err)) { + if (err >= 0) { err = mmc_select_hs_ddr(card); if (err) goto free_card; diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index 8c20b81cafd8..358b0dc853b0 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -66,6 +66,70 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) /* Make sure we use phases which we can enumerate with */ if (!IS_ERR(priv->sample_clk)) clk_set_phase(priv->sample_clk, priv->default_sample_phase); + + /* + * Set the drive phase offset based on speed mode to achieve hold times. + * + * NOTE: this is _not_ a value that is dynamically tuned and is also + * _not_ a value that will vary from board to board. It is a value + * that could vary between different SoC models if they had massively + * different output clock delays inside their dw_mmc IP block (delay_o), + * but since it's OK to overshoot a little we don't need to do complex + * calculations and can pick values that will just work for everyone. + * + * When picking values we'll stick with picking 0/90/180/270 since + * those can be made very accurately on all known Rockchip SoCs. + * + * Note that these values match values from the DesignWare Databook + * tables for the most part except for SDR12 and "ID mode". For those + * two modes the databook calculations assume a clock in of 50MHz. As + * seen above, we always use a clock in rate that is exactly the + * card's input clock (times RK3288_CLKGEN_DIV, but that gets divided + * back out before the controller sees it). + * + * From measurement of a single device, it appears that delay_o is + * about .5 ns. Since we try to leave a bit of margin, it's expected + * that numbers here will be fine even with much larger delay_o + * (the 1.4 ns assumed by the DesignWare Databook would result in the + * same results, for instance). + */ + if (!IS_ERR(priv->drv_clk)) { + int phase; + + /* + * In almost all cases a 90 degree phase offset will provide + * sufficient hold times across all valid input clock rates + * assuming delay_o is not absurd for a given SoC. We'll use + * that as a default. + */ + phase = 90; + + switch (ios->timing) { + case MMC_TIMING_MMC_DDR52: + /* + * Since clock in rate with MMC_DDR52 is doubled when + * bus width is 8 we need to double the phase offset + * to get the same timings. + */ + if (ios->bus_width == MMC_BUS_WIDTH_8) + phase = 180; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* + * In the case of 150 MHz clock (typical max for + * Rockchip SoCs), 90 degree offset will add a delay + * of 1.67 ns. That will meet min hold time of .8 ns + * as long as clock output delay is < .87 ns. On + * SoCs measured this seems to be OK, but it doesn't + * hurt to give margin here, so we use 180. + */ + phase = 180; + break; + } + + clk_set_phase(priv->drv_clk, phase); + } } #define NUM_PHASES 360 @@ -233,10 +297,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host) /* Common capabilities of RK3288 SoC */ static unsigned long dw_mci_rk3288_dwmmc_caps[4] = { - MMC_CAP_ERASE, - MMC_CAP_ERASE, - MMC_CAP_ERASE, - MMC_CAP_ERASE, + MMC_CAP_ERASE | MMC_CAP_CMD23, + MMC_CAP_ERASE | MMC_CAP_CMD23, + MMC_CAP_ERASE | MMC_CAP_CMD23, + MMC_CAP_ERASE | MMC_CAP_CMD23, }; static const struct dw_mci_drv_data rk2928_drv_data = { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 9dd1bd358434..2cc6123b1df9 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1431,7 +1431,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc) int gpio_ro = mmc_gpio_get_ro(mmc); /* Use platform get_ro function, else try on board write protect */ - if (!IS_ERR_VALUE(gpio_ro)) + if (gpio_ro >= 0) read_only = gpio_ro; else read_only = @@ -1454,7 +1454,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc) if ((mmc->caps & MMC_CAP_NEEDS_POLL) || (mmc->caps & MMC_CAP_NONREMOVABLE)) present = 1; - else if (!IS_ERR_VALUE(gpio_cd)) + else if (gpio_cd >= 0) present = gpio_cd; else present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) @@ -2595,13 +2595,13 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) /* Useful defaults if platform data is unset. */ if (host->use_dma == TRANS_MODE_IDMAC) { mmc->max_segs = host->ring_size; - mmc->max_blk_size = 65536; + mmc->max_blk_size = 65535; mmc->max_seg_size = 0x1000; mmc->max_req_size = mmc->max_seg_size * host->ring_size; mmc->max_blk_count = mmc->max_req_size / 512; } else if (host->use_dma == TRANS_MODE_EDMAC) { mmc->max_segs = 64; - mmc->max_blk_size = 65536; + mmc->max_blk_size = 65535; mmc->max_blk_count = 65535; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; @@ -2609,7 +2609,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) } else { /* TRANS_MODE_PIO */ mmc->max_segs = 64; - mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ + mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ mmc->max_blk_count = 512; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; @@ -2927,7 +2927,7 @@ static void dw_mci_enable_cd(struct dw_mci *host) if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) return; - if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc))) + if (mmc_gpio_get_cd(slot->mmc) < 0) break; } if (i == host->num_slots) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index b2d70ba6caa7..458ffb7637e5 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -274,7 +274,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .chip = &sdhci_acpi_chip_int, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY, + MMC_CAP_WAIT_WHILE_BUSY, .caps2 = MMC_CAP2_HC_ERASE_SZ, .flags = SDHCI_ACPI_RUNTIME_PM, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, @@ -289,7 +289,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY, + MMC_CAP_WAIT_WHILE_BUSY, .flags = SDHCI_ACPI_RUNTIME_PM, .pm_caps = MMC_PM_KEEP_POWER, .probe_slot = sdhci_acpi_sdio_probe_slot, @@ -301,7 +301,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | SDHCI_QUIRK2_STOP_WITH_TC, - .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY, + .caps = MMC_CAP_WAIT_WHILE_BUSY, .probe_slot = sdhci_acpi_sd_probe_slot, }; @@ -378,7 +378,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; acpi_handle handle = ACPI_HANDLE(dev); - struct acpi_device *device; + struct acpi_device *device, *child; struct sdhci_acpi_host *c; struct sdhci_host *host; struct resource *iomem; @@ -390,6 +390,11 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (acpi_bus_get_device(handle, &device)) return -ENODEV; + /* Power on the SDHCI controller and its children */ + acpi_device_fix_up_power(device); + list_for_each_entry(child, &device->children, node) + acpi_device_fix_up_power(child); + if (acpi_bus_get_status(device) || !device->status.present) return -ENODEV; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 2d300d87cda8..9d3ae1f4bd3c 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1011,7 +1011,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, if (ret) return ret; - if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) + if (mmc_gpio_get_cd(host->mmc) >= 0) host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; return 0; diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 25f779e09d8e..d4cef713d246 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -289,7 +289,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) * to enable polling via device tree with broken-cd property. */ if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && - IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) { + mmc_gpio_get_cd(host->mmc) < 0) { host->mmc->caps |= MMC_CAP_NEEDS_POLL; host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 97d4eebd6bf5..a4dbf7421edc 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -356,7 +356,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY; slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; slot->hw_reset = sdhci_pci_int_hw_reset; @@ -372,15 +371,13 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY; return 0; } static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) { - slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST | - MMC_CAP_WAIT_WHILE_BUSY; + slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; slot->cd_con_id = NULL; slot->cd_idx = 0; slot->cd_override_level = true; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index e010ea4eb6f5..0e3d7c056cb1 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1624,7 +1624,7 @@ static int sdhci_get_cd(struct mmc_host *mmc) * Try slot gpio detect, if defined it take precedence * over build in controller functionality */ - if (!IS_ERR_VALUE(gpio_cd)) + if (gpio_cd >= 0) return !!gpio_cd; /* If polling, assume that the card is always present. */ @@ -3077,7 +3077,7 @@ int sdhci_add_host(struct sdhci_host *host) if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && !(mmc->caps & MMC_CAP_NONREMOVABLE) && - IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) + mmc_gpio_get_cd(host->mmc) < 0) mmc->caps |= MMC_CAP_NEEDS_POLL; /* If there are external regulators, get them */ diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 7fc8b7aa83f0..2ee4c21ec55e 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, - [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, - [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 }, + [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 }, }; static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, @@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev) MMC_CAP_1_8V_DDR | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; - /* TODO MMC DDR is not working on A80 */ - if (of_device_is_compatible(pdev->dev.of_node, - "allwinner,sun9i-a80-mmc")) - mmc->caps &= ~MMC_CAP_1_8V_DDR; - ret = mmc_of_parse(mmc); if (ret) goto error_free_dma; diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index 3b3dabce58de..bbfa1f129266 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -115,6 +115,7 @@ config MTD_MAP_BANK_WIDTH_16 config MTD_MAP_BANK_WIDTH_32 bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY + select MTD_COMPLEX_MAPPINGS if HAS_IOMEM default n help If you wish to support CFI devices on a physical bus which is diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c index 347bb83db864..1c65c15b31a1 100644 --- a/drivers/mtd/devices/bcm47xxsflash.c +++ b/drivers/mtd/devices/bcm47xxsflash.c @@ -2,6 +2,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/ioport.h> #include <linux/mtd/mtd.h> #include <linux/platform_device.h> #include <linux/bcma/bcma.h> @@ -109,8 +110,7 @@ static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, if ((from + len) > mtd->size) return -EINVAL; - memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from), - len); + memcpy_fromio(buf, b47s->window + from, len); *retlen = len; return len; @@ -275,15 +275,33 @@ static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset, static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) { - struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; + struct bcma_sflash *sflash = dev_get_platdata(dev); struct bcm47xxsflash *b47s; + struct resource *res; int err; - b47s = devm_kzalloc(&pdev->dev, sizeof(*b47s), GFP_KERNEL); + b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL); if (!b47s) return -ENOMEM; sflash->priv = b47s; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "invalid resource\n"); + return -EINVAL; + } + if (!devm_request_mem_region(dev, res->start, resource_size(res), + res->name)) { + dev_err(dev, "can't request region for resource %pR\n", res); + return -EBUSY; + } + b47s->window = ioremap_cache(res->start, resource_size(res)); + if (!b47s->window) { + dev_err(dev, "ioremap failed for resource %pR\n", res); + return -ENOMEM; + } + b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash); b47s->cc_read = bcm47xxsflash_bcma_cc_read; b47s->cc_write = bcm47xxsflash_bcma_cc_write; @@ -297,7 +315,6 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) break; } - b47s->window = sflash->window; b47s->blocksize = sflash->blocksize; b47s->numblocks = sflash->numblocks; b47s->size = sflash->size; @@ -306,6 +323,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0); if (err) { pr_err("Failed to register MTD device: %d\n", err); + iounmap(b47s->window); return err; } @@ -321,6 +339,7 @@ static int bcm47xxsflash_bcma_remove(struct platform_device *pdev) struct bcm47xxsflash *b47s = sflash->priv; mtd_device_unregister(&b47s->mtd); + iounmap(b47s->window); return 0; } diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h index fe93daf4f489..1564b62b412e 100644 --- a/drivers/mtd/devices/bcm47xxsflash.h +++ b/drivers/mtd/devices/bcm47xxsflash.h @@ -65,7 +65,8 @@ struct bcm47xxsflash { enum bcm47xxsflash_type type; - u32 window; + void __iomem *window; + u32 blocksize; u16 numblocks; u32 size; diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index e7b2e439696c..b833e6cc684c 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -67,16 +67,40 @@ module_param(reliable_mode, uint, 0); MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, " "2=reliable) : MLC normal operations are in normal mode"); -/** - * struct docg3_oobinfo - DiskOnChip G3 OOB layout - * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC) - * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC) - * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15 - */ -static struct nand_ecclayout docg3_oobinfo = { - .eccbytes = 8, - .eccpos = {7, 8, 9, 10, 11, 12, 13, 14}, - .oobfree = {{0, 7}, {15, 1} }, +static int docg3_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + /* byte 7 is Hamming ECC, byte 8-14 are BCH ECC */ + oobregion->offset = 7; + oobregion->length = 8; + + return 0; +} + +static int docg3_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 1) + return -ERANGE; + + /* free bytes: byte 0 until byte 6, byte 15 */ + if (!section) { + oobregion->offset = 0; + oobregion->length = 7; + } else { + oobregion->offset = 15; + oobregion->length = 1; + } + + return 0; +} + +static const struct mtd_ooblayout_ops nand_ooblayout_docg3_ops = { + .ecc = docg3_ooblayout_ecc, + .free = docg3_ooblayout_free, }; static inline u8 doc_readb(struct docg3 *docg3, u16 reg) @@ -1857,7 +1881,7 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) mtd->_read_oob = doc_read_oob; mtd->_write_oob = doc_write_oob; mtd->_block_isbad = doc_block_isbad; - mtd->ecclayout = &docg3_oobinfo; + mtd_set_ooblayout(mtd, &nand_ooblayout_docg3_ops); mtd->oobavail = 8; mtd->ecc_strength = DOC_ECC_BCH_T; diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index c9c3b7fa3051..9d6854467651 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -131,6 +131,28 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, /* convert the dummy cycles to the number of bytes */ dummy /= 8; + if (spi_flash_read_supported(spi)) { + struct spi_flash_read_message msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + + msg.buf = buf; + msg.from = from; + msg.len = len; + msg.read_opcode = nor->read_opcode; + msg.addr_width = nor->addr_width; + msg.dummy_bytes = dummy; + /* TODO: Support other combinations */ + msg.opcode_nbits = SPI_NBITS_SINGLE; + msg.addr_nbits = SPI_NBITS_SINGLE; + msg.data_nbits = m25p80_rx_nbits(nor); + + ret = spi_flash_read(spi, &msg); + *retlen = msg.retlen; + return ret; + } + spi_message_init(&m); memset(t, 0, (sizeof t)); diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index 708b7e8c8b18..220f9200fa52 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c @@ -353,7 +353,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len, * mechanism * returns the size of the memory region found. */ -static int fixup_pmc551(struct pci_dev *dev) +static int __init fixup_pmc551(struct pci_dev *dev) { #ifdef CONFIG_MTD_PMC551_BUGFIX u32 dram_data; diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index 0455166f05fa..4f206a99164c 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c @@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window) } -static int ck804xrom_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __init ck804xrom_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; u8 byte; diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c index 76ed651b515b..9646b0766ce0 100644 --- a/drivers/mtd/maps/esb2rom.c +++ b/drivers/mtd/maps/esb2rom.c @@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window) pci_dev_put(window->pdev); } -static int esb2rom_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __init esb2rom_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; struct esb2rom_window *window = &esb2rom_window; diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c index 8636bba42200..e17d02ae03f0 100644 --- a/drivers/mtd/maps/ichxrom.c +++ b/drivers/mtd/maps/ichxrom.c @@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window) } -static int ichxrom_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __init ichxrom_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; struct ichxrom_window *window = &ichxrom_window; diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index c1af83db5202..00a8190797ec 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c @@ -4,11 +4,13 @@ * uclinux.c -- generic memory mapped MTD driver for uclinux * * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) + * + * License: GPL */ /****************************************************************************/ -#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> @@ -117,27 +119,6 @@ static int __init uclinux_mtd_init(void) return(0); } - -/****************************************************************************/ - -static void __exit uclinux_mtd_cleanup(void) -{ - if (uclinux_ram_mtdinfo) { - mtd_device_unregister(uclinux_ram_mtdinfo); - map_destroy(uclinux_ram_mtdinfo); - uclinux_ram_mtdinfo = NULL; - } - if (uclinux_ram_map.virt) - uclinux_ram_map.virt = 0; -} - -/****************************************************************************/ - -module_init(uclinux_mtd_init); -module_exit(uclinux_mtd_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>"); -MODULE_DESCRIPTION("Generic MTD for uClinux"); +device_initcall(uclinux_mtd_init); /****************************************************************************/ diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 6d19835b80a9..2a47a3f0e730 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -465,35 +465,108 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, } /* - * Copies (and truncates, if necessary) data from the larger struct, - * nand_ecclayout, to the smaller, deprecated layout struct, - * nand_ecclayout_user. This is necessary only to support the deprecated - * API ioctl ECCGETLAYOUT while allowing all new functionality to use - * nand_ecclayout flexibly (i.e. the struct may change size in new - * releases without requiring major rewrites). + * Copies (and truncates, if necessary) OOB layout information to the + * deprecated layout struct, nand_ecclayout_user. This is necessary only to + * support the deprecated API ioctl ECCGETLAYOUT while allowing all new + * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops + * can describe any kind of OOB layout with almost zero overhead from a + * memory usage point of view). */ -static int shrink_ecclayout(const struct nand_ecclayout *from, - struct nand_ecclayout_user *to) +static int shrink_ecclayout(struct mtd_info *mtd, + struct nand_ecclayout_user *to) { - int i; + struct mtd_oob_region oobregion; + int i, section = 0, ret; - if (!from || !to) + if (!mtd || !to) return -EINVAL; memset(to, 0, sizeof(*to)); - to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES); - for (i = 0; i < to->eccbytes; i++) - to->eccpos[i] = from->eccpos[i]; + to->eccbytes = 0; + for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { + u32 eccpos; + + ret = mtd_ooblayout_ecc(mtd, section, &oobregion); + if (ret < 0) { + if (ret != -ERANGE) + return ret; + + break; + } + + eccpos = oobregion.offset; + for (; i < MTD_MAX_ECCPOS_ENTRIES && + eccpos < oobregion.offset + oobregion.length; i++) { + to->eccpos[i] = eccpos++; + to->eccbytes++; + } + } for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { - if (from->oobfree[i].length == 0 && - from->oobfree[i].offset == 0) + ret = mtd_ooblayout_free(mtd, i, &oobregion); + if (ret < 0) { + if (ret != -ERANGE) + return ret; + + break; + } + + to->oobfree[i].offset = oobregion.offset; + to->oobfree[i].length = oobregion.length; + to->oobavail += to->oobfree[i].length; + } + + return 0; +} + +static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) +{ + struct mtd_oob_region oobregion; + int i, section = 0, ret; + + if (!mtd || !to) + return -EINVAL; + + memset(to, 0, sizeof(*to)); + + to->eccbytes = 0; + for (i = 0; i < ARRAY_SIZE(to->eccpos);) { + u32 eccpos; + + ret = mtd_ooblayout_ecc(mtd, section, &oobregion); + if (ret < 0) { + if (ret != -ERANGE) + return ret; + break; - to->oobavail += from->oobfree[i].length; - to->oobfree[i] = from->oobfree[i]; + } + + if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) + return -EINVAL; + + eccpos = oobregion.offset; + for (; eccpos < oobregion.offset + oobregion.length; i++) { + to->eccpos[i] = eccpos++; + to->eccbytes++; + } } + for (i = 0; i < 8; i++) { + ret = mtd_ooblayout_free(mtd, i, &oobregion); + if (ret < 0) { + if (ret != -ERANGE) + return ret; + + break; + } + + to->oobfree[i][0] = oobregion.offset; + to->oobfree[i][1] = oobregion.length; + } + + to->useecc = MTD_NANDECC_AUTOPLACE; + return 0; } @@ -815,16 +888,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct nand_oobinfo oi; - if (!mtd->ecclayout) + if (!mtd->ooblayout) return -EOPNOTSUPP; - if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) - return -EINVAL; - oi.useecc = MTD_NANDECC_AUTOPLACE; - memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); - memcpy(&oi.oobfree, mtd->ecclayout->oobfree, - sizeof(oi.oobfree)); - oi.eccbytes = mtd->ecclayout->eccbytes; + ret = get_oobinfo(mtd, &oi); + if (ret) + return ret; if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) return -EFAULT; @@ -913,14 +982,14 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct nand_ecclayout_user *usrlay; - if (!mtd->ecclayout) + if (!mtd->ooblayout) return -EOPNOTSUPP; usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); if (!usrlay) return -ENOMEM; - shrink_ecclayout(mtd->ecclayout, usrlay); + shrink_ecclayout(mtd, usrlay); if (copy_to_user(argp, usrlay, sizeof(*usrlay))) ret = -EFAULT; diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 239a8c806b67..d573606b91c2 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -777,7 +777,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c } - concat->mtd.ecclayout = subdev[0]->ecclayout; + mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout); concat->num_subdev = num_devs; concat->mtd.name = name; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index bee180bd11e7..e3936b847c6b 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -1016,6 +1016,366 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to, } EXPORT_SYMBOL_GPL(mtd_write_oob); +/** + * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section + * @mtd: MTD device structure + * @section: ECC section. Depending on the layout you may have all the ECC + * bytes stored in a single contiguous section, or one section + * per ECC chunk (and sometime several sections for a single ECC + * ECC chunk) + * @oobecc: OOB region struct filled with the appropriate ECC position + * information + * + * This functions return ECC section information in the OOB area. I you want + * to get all the ECC bytes information, then you should call + * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc) +{ + memset(oobecc, 0, sizeof(*oobecc)); + + if (!mtd || section < 0) + return -EINVAL; + + if (!mtd->ooblayout || !mtd->ooblayout->ecc) + return -ENOTSUPP; + + return mtd->ooblayout->ecc(mtd, section, oobecc); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); + +/** + * mtd_ooblayout_free - Get the OOB region definition of a specific free + * section + * @mtd: MTD device structure + * @section: Free section you are interested in. Depending on the layout + * you may have all the free bytes stored in a single contiguous + * section, or one section per ECC chunk plus an extra section + * for the remaining bytes (or other funky layout). + * @oobfree: OOB region struct filled with the appropriate free position + * information + * + * This functions return free bytes position in the OOB area. I you want + * to get all the free bytes information, then you should call + * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree) +{ + memset(oobfree, 0, sizeof(*oobfree)); + + if (!mtd || section < 0) + return -EINVAL; + + if (!mtd->ooblayout || !mtd->ooblayout->free) + return -ENOTSUPP; + + return mtd->ooblayout->free(mtd, section, oobfree); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_free); + +/** + * mtd_ooblayout_find_region - Find the region attached to a specific byte + * @mtd: mtd info structure + * @byte: the byte we are searching for + * @sectionp: pointer where the section id will be stored + * @oobregion: used to retrieve the ECC position + * @iter: iterator function. Should be either mtd_ooblayout_free or + * mtd_ooblayout_ecc depending on the region type you're searching for + * + * This functions returns the section id and oobregion information of a + * specific byte. For example, say you want to know where the 4th ECC byte is + * stored, you'll use: + * + * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); + * + * Returns zero on success, a negative error code otherwise. + */ +static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, + int *sectionp, struct mtd_oob_region *oobregion, + int (*iter)(struct mtd_info *, + int section, + struct mtd_oob_region *oobregion)) +{ + int pos = 0, ret, section = 0; + + memset(oobregion, 0, sizeof(*oobregion)); + + while (1) { + ret = iter(mtd, section, oobregion); + if (ret) + return ret; + + if (pos + oobregion->length > byte) + break; + + pos += oobregion->length; + section++; + } + + /* + * Adjust region info to make it start at the beginning at the + * 'start' ECC byte. + */ + oobregion->offset += byte - pos; + oobregion->length -= byte - pos; + *sectionp = section; + + return 0; +} + +/** + * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific + * ECC byte + * @mtd: mtd info structure + * @eccbyte: the byte we are searching for + * @sectionp: pointer where the section id will be stored + * @oobregion: OOB region information + * + * Works like mtd_ooblayout_find_region() except it searches for a specific ECC + * byte. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, + int *section, + struct mtd_oob_region *oobregion) +{ + return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, + mtd_ooblayout_ecc); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); + +/** + * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer + * @mtd: mtd info structure + * @buf: destination buffer to store OOB bytes + * @oobbuf: OOB buffer + * @start: first byte to retrieve + * @nbytes: number of bytes to retrieve + * @iter: section iterator + * + * Extract bytes attached to a specific category (ECC or free) + * from the OOB buffer and copy them into buf. + * + * Returns zero on success, a negative error code otherwise. + */ +static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, + const u8 *oobbuf, int start, int nbytes, + int (*iter)(struct mtd_info *, + int section, + struct mtd_oob_region *oobregion)) +{ + struct mtd_oob_region oobregion = { }; + int section = 0, ret; + + ret = mtd_ooblayout_find_region(mtd, start, §ion, + &oobregion, iter); + + while (!ret) { + int cnt; + + cnt = oobregion.length > nbytes ? nbytes : oobregion.length; + memcpy(buf, oobbuf + oobregion.offset, cnt); + buf += cnt; + nbytes -= cnt; + + if (!nbytes) + break; + + ret = iter(mtd, ++section, &oobregion); + } + + return ret; +} + +/** + * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer + * @mtd: mtd info structure + * @buf: source buffer to get OOB bytes from + * @oobbuf: OOB buffer + * @start: first OOB byte to set + * @nbytes: number of OOB bytes to set + * @iter: section iterator + * + * Fill the OOB buffer with data provided in buf. The category (ECC or free) + * is selected by passing the appropriate iterator. + * + * Returns zero on success, a negative error code otherwise. + */ +static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, + u8 *oobbuf, int start, int nbytes, + int (*iter)(struct mtd_info *, + int section, + struct mtd_oob_region *oobregion)) +{ + struct mtd_oob_region oobregion = { }; + int section = 0, ret; + + ret = mtd_ooblayout_find_region(mtd, start, §ion, + &oobregion, iter); + + while (!ret) { + int cnt; + + cnt = oobregion.length > nbytes ? nbytes : oobregion.length; + memcpy(oobbuf + oobregion.offset, buf, cnt); + buf += cnt; + nbytes -= cnt; + + if (!nbytes) + break; + + ret = iter(mtd, ++section, &oobregion); + } + + return ret; +} + +/** + * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category + * @mtd: mtd info structure + * @iter: category iterator + * + * Count the number of bytes in a given category. + * + * Returns a positive value on success, a negative error code otherwise. + */ +static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, + int (*iter)(struct mtd_info *, + int section, + struct mtd_oob_region *oobregion)) +{ + struct mtd_oob_region oobregion = { }; + int section = 0, ret, nbytes = 0; + + while (1) { + ret = iter(mtd, section++, &oobregion); + if (ret) { + if (ret == -ERANGE) + ret = nbytes; + break; + } + + nbytes += oobregion.length; + } + + return ret; +} + +/** + * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer + * @mtd: mtd info structure + * @eccbuf: destination buffer to store ECC bytes + * @oobbuf: OOB buffer + * @start: first ECC byte to retrieve + * @nbytes: number of ECC bytes to retrieve + * + * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, + const u8 *oobbuf, int start, int nbytes) +{ + return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, + mtd_ooblayout_ecc); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); + +/** + * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer + * @mtd: mtd info structure + * @eccbuf: source buffer to get ECC bytes from + * @oobbuf: OOB buffer + * @start: first ECC byte to set + * @nbytes: number of ECC bytes to set + * + * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, + u8 *oobbuf, int start, int nbytes) +{ + return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, + mtd_ooblayout_ecc); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); + +/** + * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer + * @mtd: mtd info structure + * @databuf: destination buffer to store ECC bytes + * @oobbuf: OOB buffer + * @start: first ECC byte to retrieve + * @nbytes: number of ECC bytes to retrieve + * + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, + const u8 *oobbuf, int start, int nbytes) +{ + return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, + mtd_ooblayout_free); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); + +/** + * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer + * @mtd: mtd info structure + * @eccbuf: source buffer to get data bytes from + * @oobbuf: OOB buffer + * @start: first ECC byte to set + * @nbytes: number of ECC bytes to set + * + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, + u8 *oobbuf, int start, int nbytes) +{ + return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, + mtd_ooblayout_free); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); + +/** + * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB + * @mtd: mtd info structure + * + * Works like mtd_ooblayout_count_bytes(), except it count free bytes. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) +{ + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); + +/** + * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB + * @mtd: mtd info structure + * + * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. + * + * Returns zero on success, a negative error code otherwise. + */ +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) +{ + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); +} +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); + /* * Method to access the protection register area, present in some flash * devices. The user data is one time programmable but the factory data is read diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 08de4b2cf0f5..1f13e32556f8 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -317,6 +317,27 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) return res; } +static int part_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct mtd_part *part = mtd_to_part(mtd); + + return mtd_ooblayout_ecc(part->master, section, oobregion); +} + +static int part_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct mtd_part *part = mtd_to_part(mtd); + + return mtd_ooblayout_free(part->master, section, oobregion); +} + +static const struct mtd_ooblayout_ops part_ooblayout_ops = { + .ecc = part_ooblayout_ecc, + .free = part_ooblayout_free, +}; + static inline void free_partition(struct mtd_part *p) { kfree(p->mtd.name); @@ -533,7 +554,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, part->name); } - slave->mtd.ecclayout = master->ecclayout; + mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); slave->mtd.ecc_step_size = master->ecc_step_size; slave->mtd.ecc_strength = master->ecc_strength; slave->mtd.bitflip_threshold = master->bitflip_threshold; diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index 68b58c85789c..78e12cc8bac2 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c @@ -224,6 +224,7 @@ static int ams_delta_init(struct platform_device *pdev) /* 25 us command delay time */ this->chip_delay = 30; this->ecc.mode = NAND_ECC_SOFT; + this->ecc.algo = NAND_ECC_HAMMING; platform_set_drvdata(pdev, io_base); diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 20cbaabb2959..68b9160108c9 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -36,7 +36,6 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <linux/of_mtd.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> @@ -68,34 +67,44 @@ struct atmel_nand_caps { uint8_t pmecc_max_correction; }; -struct atmel_nand_nfc_caps { - uint32_t rb_mask; -}; - -/* oob layout for large page size +/* + * oob layout for large page size * bad block info is on bytes 0 and 1 * the bytes have to be consecutives to avoid * several NAND_CMD_RNDOUT during read - */ -static struct nand_ecclayout atmel_oobinfo_large = { - .eccbytes = 4, - .eccpos = {60, 61, 62, 63}, - .oobfree = { - {2, 58} - }, -}; - -/* oob layout for small page size + * + * oob layout for small page size * bad block info is on bytes 4 and 5 * the bytes have to be consecutives to avoid * several NAND_CMD_RNDOUT during read */ -static struct nand_ecclayout atmel_oobinfo_small = { - .eccbytes = 4, - .eccpos = {0, 1, 2, 3}, - .oobfree = { - {6, 10} - }, +static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->length = 4; + oobregion->offset = 0; + + return 0; +} + +static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 6; + oobregion->length = mtd->oobsize - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = { + .ecc = atmel_ooblayout_ecc_sp, + .free = atmel_ooblayout_free_sp, }; struct atmel_nfc { @@ -116,7 +125,6 @@ struct atmel_nfc { /* Point to the sram bank which include readed data via NFC */ void *data_in_sram; bool will_write_sram; - const struct atmel_nand_nfc_caps *caps; }; static struct atmel_nfc nand_nfc; @@ -163,8 +171,6 @@ struct atmel_nand_host { int *pmecc_delta; }; -static struct nand_ecclayout atmel_pmecc_oobinfo; - /* * Enable NAND. */ @@ -434,14 +440,13 @@ err_buf: static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) { struct nand_chip *chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(chip); if (use_dma && len > mtd->oobsize) /* only use DMA for bigger than oob size: better performances */ if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) return; - if (host->board.bus_width_16) + if (chip->options & NAND_BUSWIDTH_16) atmel_read_buf16(mtd, buf, len); else atmel_read_buf8(mtd, buf, len); @@ -450,14 +455,13 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) { struct nand_chip *chip = mtd_to_nand(mtd); - struct atmel_nand_host *host = nand_get_controller_data(chip); if (use_dma && len > mtd->oobsize) /* only use DMA for bigger than oob size: better performances */ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) return; - if (host->board.bus_width_16) + if (chip->options & NAND_BUSWIDTH_16) atmel_write_buf16(mtd, buf, len); else atmel_write_buf8(mtd, buf, len); @@ -483,22 +487,6 @@ static int pmecc_get_ecc_bytes(int cap, int sector_size) return (m * cap + 7) / 8; } -static void pmecc_config_ecc_layout(struct nand_ecclayout *layout, - int oobsize, int ecc_len) -{ - int i; - - layout->eccbytes = ecc_len; - - /* ECC will occupy the last ecc_len bytes continuously */ - for (i = 0; i < ecc_len; i++) - layout->eccpos[i] = oobsize - ecc_len + i; - - layout->oobfree[0].offset = PMECC_OOB_RESERVED_BYTES; - layout->oobfree[0].length = - oobsize - ecc_len - layout->oobfree[0].offset; -} - static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) { int table_size; @@ -836,13 +824,16 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", pos, bit_pos, err_byte, *(buf + byte_pos)); } else { + struct mtd_oob_region oobregion; + /* Bit flip in OOB area */ tmp = sector_num * nand_chip->ecc.bytes + (byte_pos - sector_size); err_byte = ecc[tmp]; ecc[tmp] ^= (1 << bit_pos); - pos = tmp + nand_chip->ecc.layout->eccpos[0]; + mtd_ooblayout_ecc(mtd, 0, &oobregion); + pos = tmp + oobregion.offset; dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", pos, bit_pos, err_byte, ecc[tmp]); } @@ -863,17 +854,6 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf, uint8_t *buf_pos; int max_bitflips = 0; - /* If can correct bitfilps from erased page, do the normal check */ - if (host->caps->pmecc_correct_erase_page) - goto normal_check; - - for (i = 0; i < nand_chip->ecc.total; i++) - if (ecc[i] != 0xff) - goto normal_check; - /* Erased page, return OK */ - return 0; - -normal_check: for (i = 0; i < nand_chip->ecc.steps; i++) { err_nbr = 0; if (pmecc_stat & 0x1) { @@ -884,16 +864,30 @@ normal_check: pmecc_get_sigma(mtd); err_nbr = pmecc_err_location(mtd); - if (err_nbr == -1) { + if (err_nbr >= 0) { + pmecc_correct_data(mtd, buf_pos, ecc, i, + nand_chip->ecc.bytes, + err_nbr); + } else if (!host->caps->pmecc_correct_erase_page) { + u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes); + + /* Try to detect erased pages */ + err_nbr = nand_check_erased_ecc_chunk(buf_pos, + host->pmecc_sector_size, + ecc_pos, + nand_chip->ecc.bytes, + NULL, 0, + nand_chip->ecc.strength); + } + + if (err_nbr < 0) { dev_err(host->dev, "PMECC: Too many errors\n"); mtd->ecc_stats.failed++; return -EIO; - } else { - pmecc_correct_data(mtd, buf_pos, ecc, i, - nand_chip->ecc.bytes, err_nbr); - mtd->ecc_stats.corrected += err_nbr; - max_bitflips = max_t(int, max_bitflips, err_nbr); } + + mtd->ecc_stats.corrected += err_nbr; + max_bitflips = max_t(int, max_bitflips, err_nbr); } pmecc_stat >>= 1; } @@ -931,7 +925,6 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, struct atmel_nand_host *host = nand_get_controller_data(chip); int eccsize = chip->ecc.size * chip->ecc.steps; uint8_t *oob = chip->oob_poi; - uint32_t *eccpos = chip->ecc.layout->eccpos; uint32_t stat; unsigned long end_time; int bitflips = 0; @@ -953,7 +946,11 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, stat = pmecc_readl_relaxed(host->ecc, ISR); if (stat != 0) { - bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]); + struct mtd_oob_region oobregion; + + mtd_ooblayout_ecc(mtd, 0, &oobregion); + bitflips = pmecc_correction(mtd, stat, buf, + &oob[oobregion.offset]); if (bitflips < 0) /* uncorrectable errors */ return 0; @@ -967,8 +964,8 @@ static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, int page) { struct atmel_nand_host *host = nand_get_controller_data(chip); - uint32_t *eccpos = chip->ecc.layout->eccpos; - int i, j; + struct mtd_oob_region oobregion = { }; + int i, j, section = 0; unsigned long end_time; if (!host->nfc || !host->nfc->write_by_sram) { @@ -987,11 +984,14 @@ static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, for (i = 0; i < chip->ecc.steps; i++) { for (j = 0; j < chip->ecc.bytes; j++) { - int pos; + if (!oobregion.length) + mtd_ooblayout_ecc(mtd, section, &oobregion); - pos = i * chip->ecc.bytes + j; - chip->oob_poi[eccpos[pos]] = + chip->oob_poi[oobregion.offset] = pmecc_readb_ecc_relaxed(host->ecc, i, j); + oobregion.length--; + oobregion.offset++; + section++; } } chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -1003,8 +1003,9 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd) { struct nand_chip *nand_chip = mtd_to_nand(mtd); struct atmel_nand_host *host = nand_get_controller_data(nand_chip); + int eccbytes = mtd_ooblayout_count_eccbytes(mtd); uint32_t val = 0; - struct nand_ecclayout *ecc_layout; + struct mtd_oob_region oobregion; pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); @@ -1054,11 +1055,11 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd) | PMECC_CFG_AUTO_DISABLE); pmecc_writel(host->ecc, CFG, val); - ecc_layout = nand_chip->ecc.layout; pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1); - pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]); + mtd_ooblayout_ecc(mtd, 0, &oobregion); + pmecc_writel(host->ecc, SADDR, oobregion.offset); pmecc_writel(host->ecc, EADDR, - ecc_layout->eccpos[ecc_layout->eccbytes - 1]); + oobregion.offset + eccbytes - 1); /* See datasheet about PMECC Clock Control Register */ pmecc_writel(host->ecc, CLK, 2); pmecc_writel(host->ecc, IDR, 0xff); @@ -1206,6 +1207,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev, dev_warn(host->dev, "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n"); nand_chip->ecc.mode = NAND_ECC_SOFT; + nand_chip->ecc.algo = NAND_ECC_HAMMING; return 0; } @@ -1280,11 +1282,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev, err_no = -EINVAL; goto err; } - pmecc_config_ecc_layout(&atmel_pmecc_oobinfo, - mtd->oobsize, - nand_chip->ecc.total); - nand_chip->ecc.layout = &atmel_pmecc_oobinfo; + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); break; default: dev_warn(host->dev, @@ -1292,6 +1291,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev, /* page size not handled by HW ECC */ /* switching back to soft ECC */ nand_chip->ecc.mode = NAND_ECC_SOFT; + nand_chip->ecc.algo = NAND_ECC_HAMMING; return 0; } @@ -1359,12 +1359,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, { int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; - uint32_t *eccpos = chip->ecc.layout->eccpos; uint8_t *p = buf; uint8_t *oob = chip->oob_poi; uint8_t *ecc_pos; int stat; unsigned int max_bitflips = 0; + struct mtd_oob_region oobregion = {}; /* * Errata: ALE is incorrectly wired up to the ECC controller @@ -1382,19 +1382,20 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, chip->read_buf(mtd, p, eccsize); /* move to ECC position if needed */ - if (eccpos[0] != 0) { - /* This only works on large pages - * because the ECC controller waits for - * NAND_CMD_RNDOUTSTART after the - * NAND_CMD_RNDOUT. - * anyway, for small pages, the eccpos[0] == 0 + mtd_ooblayout_ecc(mtd, 0, &oobregion); + if (oobregion.offset != 0) { + /* + * This only works on large pages because the ECC controller + * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT. + * Anyway, for small pages, the first ECC byte is at offset + * 0 in the OOB area. */ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, - mtd->writesize + eccpos[0], -1); + mtd->writesize + oobregion.offset, -1); } /* the ECC controller needs to read the ECC just after the data */ - ecc_pos = oob + eccpos[0]; + ecc_pos = oob + oobregion.offset; chip->read_buf(mtd, ecc_pos, eccbytes); /* check if there's an error */ @@ -1504,58 +1505,17 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) ecc_writel(host->ecc, CR, ATMEL_ECC_RST); } -static int atmel_of_init_port(struct atmel_nand_host *host, - struct device_node *np) +static int atmel_of_init_ecc(struct atmel_nand_host *host, + struct device_node *np) { - u32 val; u32 offset[2]; - int ecc_mode; - struct atmel_nand_data *board = &host->board; - enum of_gpio_flags flags = 0; - - host->caps = (struct atmel_nand_caps *) - of_device_get_match_data(host->dev); - - if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { - if (val >= 32) { - dev_err(host->dev, "invalid addr-offset %u\n", val); - return -EINVAL; - } - board->ale = val; - } - - if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) { - if (val >= 32) { - dev_err(host->dev, "invalid cmd-offset %u\n", val); - return -EINVAL; - } - board->cle = val; - } - - ecc_mode = of_get_nand_ecc_mode(np); - - board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode; - - board->on_flash_bbt = of_get_nand_on_flash_bbt(np); - - board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma"); - - if (of_get_nand_bus_width(np) == 16) - board->bus_width_16 = 1; - - board->rdy_pin = of_get_gpio_flags(np, 0, &flags); - board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW); - - board->enable_pin = of_get_gpio(np, 1); - board->det_pin = of_get_gpio(np, 2); + u32 val; host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc"); - /* load the nfc driver if there is */ - of_platform_populate(np, NULL, NULL, host->dev); - - if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc) - return 0; /* Not using PMECC */ + /* Not using PMECC */ + if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc) + return 0; /* use PMECC, get correction capability, sector size and lookup * table offset. @@ -1596,16 +1556,65 @@ static int atmel_of_init_port(struct atmel_nand_host *host, /* Will build a lookup table and initialize the offset later */ return 0; } + if (!offset[0] && !offset[1]) { dev_err(host->dev, "Invalid PMECC lookup table offset\n"); return -EINVAL; } + host->pmecc_lookup_table_offset_512 = offset[0]; host->pmecc_lookup_table_offset_1024 = offset[1]; return 0; } +static int atmel_of_init_port(struct atmel_nand_host *host, + struct device_node *np) +{ + u32 val; + struct atmel_nand_data *board = &host->board; + enum of_gpio_flags flags = 0; + + host->caps = (struct atmel_nand_caps *) + of_device_get_match_data(host->dev); + + if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { + if (val >= 32) { + dev_err(host->dev, "invalid addr-offset %u\n", val); + return -EINVAL; + } + board->ale = val; + } + + if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) { + if (val >= 32) { + dev_err(host->dev, "invalid cmd-offset %u\n", val); + return -EINVAL; + } + board->cle = val; + } + + board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma"); + + board->rdy_pin = of_get_gpio_flags(np, 0, &flags); + board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW); + + board->enable_pin = of_get_gpio(np, 1); + board->det_pin = of_get_gpio(np, 2); + + /* load the nfc driver if there is */ + of_platform_populate(np, NULL, NULL, host->dev); + + /* + * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value + * even if the nand-ecc-mode property is not defined. + */ + host->nand_chip.ecc.mode = NAND_ECC_SOFT; + host->nand_chip.ecc.algo = NAND_ECC_HAMMING; + + return 0; +} + static int atmel_hw_nand_init_params(struct platform_device *pdev, struct atmel_nand_host *host) { @@ -1618,6 +1627,7 @@ static int atmel_hw_nand_init_params(struct platform_device *pdev, dev_err(host->dev, "Can't get I/O resource regs, use software ECC\n"); nand_chip->ecc.mode = NAND_ECC_SOFT; + nand_chip->ecc.algo = NAND_ECC_HAMMING; return 0; } @@ -1631,25 +1641,26 @@ static int atmel_hw_nand_init_params(struct platform_device *pdev, /* set ECC page size and oob layout */ switch (mtd->writesize) { case 512: - nand_chip->ecc.layout = &atmel_oobinfo_small; + mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops); ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528); break; case 1024: - nand_chip->ecc.layout = &atmel_oobinfo_large; + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056); break; case 2048: - nand_chip->ecc.layout = &atmel_oobinfo_large; + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112); break; case 4096: - nand_chip->ecc.layout = &atmel_oobinfo_large; + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224); break; default: /* page size not handled by HW ECC */ /* switching back to soft ECC */ nand_chip->ecc.mode = NAND_ECC_SOFT; + nand_chip->ecc.algo = NAND_ECC_HAMMING; return 0; } @@ -1699,9 +1710,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id) nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); ret = IRQ_HANDLED; } - if (pending & host->nfc->caps->rb_mask) { + if (pending & NFC_SR_RB_EDGE) { complete(&host->nfc->comp_ready); - nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask); + nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE); ret = IRQ_HANDLED; } if (pending & NFC_SR_CMD_DONE) { @@ -1719,7 +1730,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag) if (flag & NFC_SR_XFR_DONE) init_completion(&host->nfc->comp_xfer_done); - if (flag & host->nfc->caps->rb_mask) + if (flag & NFC_SR_RB_EDGE) init_completion(&host->nfc->comp_ready); if (flag & NFC_SR_CMD_DONE) @@ -1737,7 +1748,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag) if (flag & NFC_SR_XFR_DONE) comp[index++] = &host->nfc->comp_xfer_done; - if (flag & host->nfc->caps->rb_mask) + if (flag & NFC_SR_RB_EDGE) comp[index++] = &host->nfc->comp_ready; if (flag & NFC_SR_CMD_DONE) @@ -1805,7 +1816,7 @@ static int nfc_device_ready(struct mtd_info *mtd) dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", mask & status); - return status & host->nfc->caps->rb_mask; + return status & NFC_SR_RB_EDGE; } static void nfc_select_chip(struct mtd_info *mtd, int chip) @@ -1978,8 +1989,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command, } /* fall through */ default: - nfc_prepare_interrupt(host, host->nfc->caps->rb_mask); - nfc_wait_interrupt(host, host->nfc->caps->rb_mask); + nfc_prepare_interrupt(host, NFC_SR_RB_EDGE); + nfc_wait_interrupt(host, NFC_SR_RB_EDGE); } } @@ -2147,6 +2158,19 @@ static int atmel_nand_probe(struct platform_device *pdev) } else { memcpy(&host->board, dev_get_platdata(&pdev->dev), sizeof(struct atmel_nand_data)); + nand_chip->ecc.mode = host->board.ecc_mode; + + /* + * When using software ECC every supported avr32 board means + * Hamming algorithm. If that ever changes we'll need to add + * ecc_algo field to the struct atmel_nand_data. + */ + if (nand_chip->ecc.mode == NAND_ECC_SOFT) + nand_chip->ecc.algo = NAND_ECC_HAMMING; + + /* 16-bit bus width */ + if (host->board.bus_width_16) + nand_chip->options |= NAND_BUSWIDTH_16; } /* link the private data structures */ @@ -2188,11 +2212,8 @@ static int atmel_nand_probe(struct platform_device *pdev) nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl; } - nand_chip->ecc.mode = host->board.ecc_mode; nand_chip->chip_delay = 40; /* 40us command delay time */ - if (host->board.bus_width_16) /* 16-bit bus width */ - nand_chip->options |= NAND_BUSWIDTH_16; nand_chip->read_buf = atmel_read_buf; nand_chip->write_buf = atmel_write_buf; @@ -2225,11 +2246,6 @@ static int atmel_nand_probe(struct platform_device *pdev) } } - if (host->board.on_flash_bbt || on_flash_bbt) { - dev_info(&pdev->dev, "Use On Flash BBT\n"); - nand_chip->bbt_options |= NAND_BBT_USE_FLASH; - } - if (!host->board.has_dma) use_dma = 0; @@ -2256,6 +2272,18 @@ static int atmel_nand_probe(struct platform_device *pdev) goto err_scan_ident; } + if (host->board.on_flash_bbt || on_flash_bbt) + nand_chip->bbt_options |= NAND_BBT_USE_FLASH; + + if (nand_chip->bbt_options & NAND_BBT_USE_FLASH) + dev_info(&pdev->dev, "Use On Flash BBT\n"); + + if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { + res = atmel_of_init_ecc(host, pdev->dev.of_node); + if (res) + goto err_hw_ecc; + } + if (nand_chip->ecc.mode == NAND_ECC_HW) { if (host->has_pmecc) res = atmel_pmecc_nand_init_params(pdev, host); @@ -2393,11 +2421,6 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev) } } - nfc->caps = (const struct atmel_nand_nfc_caps *) - of_device_get_match_data(&pdev->dev); - if (!nfc->caps) - return -ENODEV; - nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ @@ -2426,17 +2449,8 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev) return 0; } -static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = { - .rb_mask = NFC_SR_RB_EDGE0, -}; - -static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = { - .rb_mask = NFC_SR_RB_EDGE3, -}; - static const struct of_device_id atmel_nand_nfc_match[] = { - { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps }, - { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps }, + { .compatible = "atmel,sama5d3-nfc" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match); diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h index 0bbc1fa97dba..4d5d26221a7e 100644 --- a/drivers/mtd/nand/atmel_nand_nfc.h +++ b/drivers/mtd/nand/atmel_nand_nfc.h @@ -42,8 +42,7 @@ #define NFC_SR_UNDEF (1 << 21) #define NFC_SR_AWB (1 << 22) #define NFC_SR_ASE (1 << 23) -#define NFC_SR_RB_EDGE0 (1 << 24) -#define NFC_SR_RB_EDGE3 (1 << 27) +#define NFC_SR_RB_EDGE (1 << 24) #define ATMEL_HSMC_NFC_IER 0x0c #define ATMEL_HSMC_NFC_IDR 0x10 diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 341ea4904164..9bf6d9915694 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -459,6 +459,7 @@ static int au1550nd_probe(struct platform_device *pdev) /* 30 us command delay time */ this->chip_delay = 30; this->ecc.mode = NAND_ECC_SOFT; + this->ecc.algo = NAND_ECC_HAMMING; if (pd->devwidth) this->options |= NAND_BUSWIDTH_16; diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 7f6b30e615b7..37da4236ab90 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -109,28 +109,33 @@ static const unsigned short bfin_nfc_pin_req[] = 0}; #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC -static struct nand_ecclayout bootrom_ecclayout = { - .eccbytes = 24, - .eccpos = { - 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2, - 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2, - 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2, - 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2, - 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2, - 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2, - 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2, - 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2 - }, - .oobfree = { - { 0x8 * 0 + 3, 5 }, - { 0x8 * 1 + 3, 5 }, - { 0x8 * 2 + 3, 5 }, - { 0x8 * 3 + 3, 5 }, - { 0x8 * 4 + 3, 5 }, - { 0x8 * 5 + 3, 5 }, - { 0x8 * 6 + 3, 5 }, - { 0x8 * 7 + 3, 5 }, - } +static int bootrom_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 7) + return -ERANGE; + + oobregion->offset = section * 8; + oobregion->length = 3; + + return 0; +} + +static int bootrom_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 7) + return -ERANGE; + + oobregion->offset = (section * 8) + 3; + oobregion->length = 5; + + return 0; +} + +static const struct mtd_ooblayout_ops bootrom_ooblayout_ops = { + .ecc = bootrom_ooblayout_ecc, + .free = bootrom_ooblayout_free, }; #endif @@ -800,7 +805,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev) /* setup hardware ECC data struct */ if (hardware_ecc) { #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC - chip->ecc.layout = &bootrom_ecclayout; + mtd_set_ooblayout(mtd, &bootrom_ooblayout_ops); #endif chip->read_buf = bf5xx_nand_dma_read_buf; chip->write_buf = bf5xx_nand_dma_write_buf; @@ -812,6 +817,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev) chip->ecc.write_page_raw = bf5xx_nand_write_page_raw; } else { chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; } /* scan hardware nand chip and setup mtd info data struct */ diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index e0528397306a..b76ad7c0144f 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -32,7 +32,6 @@ #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/of.h> -#include <linux/of_mtd.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/list.h> @@ -601,7 +600,7 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) { - if (ctrl->nand_version < 0x0700) + if (ctrl->nand_version < 0x0602) return 24; return 0; } @@ -781,127 +780,183 @@ static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg) } /* - * Returns a nand_ecclayout strucutre for the given layout/configuration. - * Returns NULL on failure. + * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given + * the layout/configuration. + * Returns -ERRCODE on failure. */ -static struct nand_ecclayout *brcmnand_create_layout(int ecc_level, - struct brcmnand_host *host) +static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) { + struct nand_chip *chip = mtd_to_nand(mtd); + struct brcmnand_host *host = nand_get_controller_data(chip); struct brcmnand_cfg *cfg = &host->hwcfg; - int i, j; - struct nand_ecclayout *layout; - int req; - int sectors; - int sas; - int idx1, idx2; - - layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL); - if (!layout) - return NULL; - - sectors = cfg->page_size / (512 << cfg->sector_size_1k); - sas = cfg->spare_area_size << cfg->sector_size_1k; - - /* Hamming */ - if (is_hamming_ecc(cfg)) { - for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) { - /* First sector of each page may have BBI */ - if (i == 0) { - layout->oobfree[idx2].offset = i * sas + 1; - /* Small-page NAND use byte 6 for BBI */ - if (cfg->page_size == 512) - layout->oobfree[idx2].offset--; - layout->oobfree[idx2].length = 5; - } else { - layout->oobfree[idx2].offset = i * sas; - layout->oobfree[idx2].length = 6; - } - idx2++; - layout->eccpos[idx1++] = i * sas + 6; - layout->eccpos[idx1++] = i * sas + 7; - layout->eccpos[idx1++] = i * sas + 8; - layout->oobfree[idx2].offset = i * sas + 9; - layout->oobfree[idx2].length = 7; - idx2++; - /* Leave zero-terminated entry for OOBFREE */ - if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE || - idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) - break; - } + int sas = cfg->spare_area_size << cfg->sector_size_1k; + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); - return layout; - } + if (section >= sectors) + return -ERANGE; - /* - * CONTROLLER_VERSION: - * < v5.0: ECC_REQ = ceil(BCH_T * 13/8) - * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8) - * But we will just be conservative. - */ - req = DIV_ROUND_UP(ecc_level * 14, 8); - if (req >= sas) { - dev_err(&host->pdev->dev, - "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n", - req, sas); - return NULL; - } + oobregion->offset = (section * sas) + 6; + oobregion->length = 3; + + return 0; +} + +static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_cfg *cfg = &host->hwcfg; + int sas = cfg->spare_area_size << cfg->sector_size_1k; + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); - layout->eccbytes = req * sectors; - for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) { - for (j = sas - req; j < sas && idx1 < - MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++) - layout->eccpos[idx1] = i * sas + j; + if (section >= sectors * 2) + return -ERANGE; + + oobregion->offset = (section / 2) * sas; + + if (section & 1) { + oobregion->offset += 9; + oobregion->length = 7; + } else { + oobregion->length = 6; /* First sector of each page may have BBI */ - if (i == 0) { - if (cfg->page_size == 512 && (sas - req >= 6)) { - /* Small-page NAND use byte 6 for BBI */ - layout->oobfree[idx2].offset = 0; - layout->oobfree[idx2].length = 5; - idx2++; - if (sas - req > 6) { - layout->oobfree[idx2].offset = 6; - layout->oobfree[idx2].length = - sas - req - 6; - idx2++; - } - } else if (sas > req + 1) { - layout->oobfree[idx2].offset = i * sas + 1; - layout->oobfree[idx2].length = sas - req - 1; - idx2++; - } - } else if (sas > req) { - layout->oobfree[idx2].offset = i * sas; - layout->oobfree[idx2].length = sas - req; - idx2++; + if (!section) { + /* + * Small-page NAND use byte 6 for BBI while large-page + * NAND use byte 0. + */ + if (cfg->page_size > 512) + oobregion->offset++; + oobregion->length--; } - /* Leave zero-terminated entry for OOBFREE */ - if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE || - idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) - break; } - return layout; + return 0; +} + +static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = { + .ecc = brcmnand_hamming_ooblayout_ecc, + .free = brcmnand_hamming_ooblayout_free, +}; + +static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_cfg *cfg = &host->hwcfg; + int sas = cfg->spare_area_size << cfg->sector_size_1k; + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); + + if (section >= sectors) + return -ERANGE; + + oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes; + oobregion->length = chip->ecc.bytes; + + return 0; +} + +static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_cfg *cfg = &host->hwcfg; + int sas = cfg->spare_area_size << cfg->sector_size_1k; + int sectors = cfg->page_size / (512 << cfg->sector_size_1k); + + if (section >= sectors) + return -ERANGE; + + if (sas <= chip->ecc.bytes) + return 0; + + oobregion->offset = section * sas; + oobregion->length = sas - chip->ecc.bytes; + + if (!section) { + oobregion->offset++; + oobregion->length--; + } + + return 0; } -static struct nand_ecclayout *brcmstb_choose_ecc_layout( - struct brcmnand_host *host) +static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_cfg *cfg = &host->hwcfg; + int sas = cfg->spare_area_size << cfg->sector_size_1k; + + if (section > 1 || sas - chip->ecc.bytes < 6 || + (section && sas - chip->ecc.bytes == 6)) + return -ERANGE; + + if (!section) { + oobregion->offset = 0; + oobregion->length = 5; + } else { + oobregion->offset = 6; + oobregion->length = sas - chip->ecc.bytes - 6; + } + + return 0; +} + +static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = { + .ecc = brcmnand_bch_ooblayout_ecc, + .free = brcmnand_bch_ooblayout_free_lp, +}; + +static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = { + .ecc = brcmnand_bch_ooblayout_ecc, + .free = brcmnand_bch_ooblayout_free_sp, +}; + +static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) { - struct nand_ecclayout *layout; struct brcmnand_cfg *p = &host->hwcfg; + struct mtd_info *mtd = nand_to_mtd(&host->chip); + struct nand_ecc_ctrl *ecc = &host->chip.ecc; unsigned int ecc_level = p->ecc_level; + int sas = p->spare_area_size << p->sector_size_1k; + int sectors = p->page_size / (512 << p->sector_size_1k); if (p->sector_size_1k) ecc_level <<= 1; - layout = brcmnand_create_layout(ecc_level, host); - if (!layout) { + if (is_hamming_ecc(p)) { + ecc->bytes = 3 * sectors; + mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); + return 0; + } + + /* + * CONTROLLER_VERSION: + * < v5.0: ECC_REQ = ceil(BCH_T * 13/8) + * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8) + * But we will just be conservative. + */ + ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8); + if (p->page_size == 512) + mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops); + else + mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops); + + if (ecc->bytes >= sas) { dev_err(&host->pdev->dev, - "no proper ecc_layout for this NAND cfg\n"); - return NULL; + "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n", + ecc->bytes, sas); + return -EINVAL; } - return layout; + return 0; } static void brcmnand_wp(struct mtd_info *mtd, int wp) @@ -1870,9 +1925,31 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) cfg->col_adr_bytes = 2; cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize); + if (chip->ecc.mode != NAND_ECC_HW) { + dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n", + chip->ecc.mode); + return -EINVAL; + } + + if (chip->ecc.algo == NAND_ECC_UNKNOWN) { + if (chip->ecc.strength == 1 && chip->ecc.size == 512) + /* Default to Hamming for 1-bit ECC, if unspecified */ + chip->ecc.algo = NAND_ECC_HAMMING; + else + /* Otherwise, BCH */ + chip->ecc.algo = NAND_ECC_BCH; + } + + if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 || + chip->ecc.size != 512)) { + dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n", + chip->ecc.strength, chip->ecc.size); + return -EINVAL; + } + switch (chip->ecc.size) { case 512: - if (chip->ecc.strength == 1) /* Hamming */ + if (chip->ecc.algo == NAND_ECC_HAMMING) cfg->ecc_level = 15; else cfg->ecc_level = chip->ecc.strength; @@ -2001,8 +2078,8 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn) */ chip->options |= NAND_USE_BOUNCE_BUFFER; - if (of_get_nand_on_flash_bbt(dn)) - chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; + if (chip->bbt_options & NAND_BBT_USE_FLASH) + chip->bbt_options |= NAND_BBT_NO_OOB; if (brcmnand_setup_dev(host)) return -ENXIO; @@ -2011,9 +2088,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn) /* only use our internal HW threshold */ mtd->bitflip_threshold = 1; - chip->ecc.layout = brcmstb_choose_ecc_layout(host); - if (!chip->ecc.layout) - return -ENXIO; + ret = brcmstb_choose_ecc_layout(host); + if (ret) + return ret; if (nand_scan_tail(mtd)) return -ENXIO; @@ -2115,6 +2192,7 @@ static const struct of_device_id brcmnand_of_match[] = { { .compatible = "brcm,brcmnand-v5.0" }, { .compatible = "brcm,brcmnand-v6.0" }, { .compatible = "brcm,brcmnand-v6.1" }, + { .compatible = "brcm,brcmnand-v6.2" }, { .compatible = "brcm,brcmnand-v7.0" }, { .compatible = "brcm,brcmnand-v7.1" }, {}, diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index e553aff68987..0b0c93702abb 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -459,10 +459,37 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, return max_bitflips; } -static struct nand_ecclayout cafe_oobinfo_2048 = { - .eccbytes = 14, - .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, - .oobfree = {{14, 50}} +static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = 0; + oobregion->length = chip->ecc.total; + + return 0; +} + +static int cafe_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = chip->ecc.total; + oobregion->length = mtd->oobsize - chip->ecc.total; + + return 0; +} + +static const struct mtd_ooblayout_ops cafe_ooblayout_ops = { + .ecc = cafe_ooblayout_ecc, + .free = cafe_ooblayout_free, }; /* Ick. The BBT code really ought to be able to work this bit out @@ -494,12 +521,6 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = { .pattern = cafe_mirror_pattern_2048 }; -static struct nand_ecclayout cafe_oobinfo_512 = { - .eccbytes = 14, - .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, - .oobfree = {{14, 2}} -}; - static struct nand_bbt_descr cafe_bbt_main_descr_512 = { .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | NAND_BBT_2BIT | NAND_BBT_VERSION, @@ -743,12 +764,11 @@ static int cafe_nand_probe(struct pci_dev *pdev, cafe->ctl2 |= 1<<29; /* 2KiB page size */ /* Set up ECC according to the type of chip we found */ + mtd_set_ooblayout(mtd, &cafe_ooblayout_ops); if (mtd->writesize == 2048) { - cafe->nand.ecc.layout = &cafe_oobinfo_2048; cafe->nand.bbt_td = &cafe_bbt_main_descr_2048; cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048; } else if (mtd->writesize == 512) { - cafe->nand.ecc.layout = &cafe_oobinfo_512; cafe->nand.bbt_td = &cafe_bbt_main_descr_512; cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512; } else { diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 6f97ebba52c4..49133783ca53 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c @@ -187,6 +187,7 @@ static int __init cmx270_init(void) /* 15 us command delay time */ this->chip_delay = 20; this->ecc.mode = NAND_ECC_SOFT; + this->ecc.algo = NAND_ECC_HAMMING; /* read/write functions */ this->read_byte = cmx270_read_byte; diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 8cb821b6686e..cc07ba0f044d 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -34,7 +34,6 @@ #include <linux/slab.h> #include <linux/of_device.h> #include <linux/of.h> -#include <linux/of_mtd.h> #include <linux/platform_data/mtd-davinci.h> #include <linux/platform_data/mtd-davinci-aemif.h> @@ -54,7 +53,6 @@ */ struct davinci_nand_info { struct nand_chip chip; - struct nand_ecclayout ecclayout; struct device *dev; struct clk *clk; @@ -480,63 +478,46 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd) * ten ECC bytes plus the manufacturer's bad block marker byte, and * and not overlapping the default BBT markers. */ -static struct nand_ecclayout hwecc4_small = { - .eccbytes = 10, - .eccpos = { 0, 1, 2, 3, 4, - /* offset 5 holds the badblock marker */ - 6, 7, - 13, 14, 15, }, - .oobfree = { - {.offset = 8, .length = 5, }, - {.offset = 16, }, - }, -}; +static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 2) + return -ERANGE; + + if (!section) { + oobregion->offset = 0; + oobregion->length = 5; + } else if (section == 1) { + oobregion->offset = 6; + oobregion->length = 2; + } else { + oobregion->offset = 13; + oobregion->length = 3; + } -/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash, - * storing ten ECC bytes plus the manufacturer's bad block marker byte, - * and not overlapping the default BBT markers. - */ -static struct nand_ecclayout hwecc4_2048 = { - .eccbytes = 40, - .eccpos = { - /* at the end of spare sector */ - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - }, - .oobfree = { - /* 2 bytes at offset 0 hold manufacturer badblock markers */ - {.offset = 2, .length = 22, }, - /* 5 bytes at offset 8 hold BBT markers */ - /* 8 bytes at offset 16 hold JFFS2 clean markers */ - }, -}; + return 0; +} -/* - * An ECC layout for using 4-bit ECC with large-page (4096bytes) flash, - * storing ten ECC bytes plus the manufacturer's bad block marker byte, - * and not overlapping the default BBT markers. - */ -static struct nand_ecclayout hwecc4_4096 = { - .eccbytes = 80, - .eccpos = { - /* at the end of spare sector */ - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, - 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, - }, - .oobfree = { - /* 2 bytes at offset 0 hold manufacturer badblock markers */ - {.offset = 2, .length = 46, }, - /* 5 bytes at offset 8 hold BBT markers */ - /* 8 bytes at offset 16 hold JFFS2 clean markers */ - }, +static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 1) + return -ERANGE; + + if (!section) { + oobregion->offset = 8; + oobregion->length = 5; + } else { + oobregion->offset = 16; + oobregion->length = mtd->oobsize - 16; + } + + return 0; +} + +static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = { + .ecc = hwecc4_ooblayout_small_ecc, + .free = hwecc4_ooblayout_small_free, }; #if defined(CONFIG_OF) @@ -577,8 +558,6 @@ static struct davinci_nand_pdata "ti,davinci-mask-chipsel", &prop)) pdata->mask_chipsel = prop; if (!of_property_read_string(pdev->dev.of_node, - "nand-ecc-mode", &mode) || - !of_property_read_string(pdev->dev.of_node, "ti,davinci-ecc-mode", &mode)) { if (!strncmp("none", mode, 4)) pdata->ecc_mode = NAND_ECC_NONE; @@ -591,14 +570,11 @@ static struct davinci_nand_pdata "ti,davinci-ecc-bits", &prop)) pdata->ecc_bits = prop; - prop = of_get_nand_bus_width(pdev->dev.of_node); - if (0 < prop || !of_property_read_u32(pdev->dev.of_node, - "ti,davinci-nand-buswidth", &prop)) - if (prop == 16) - pdata->options |= NAND_BUSWIDTH_16; + if (!of_property_read_u32(pdev->dev.of_node, + "ti,davinci-nand-buswidth", &prop) && prop == 16) + pdata->options |= NAND_BUSWIDTH_16; + if (of_property_read_bool(pdev->dev.of_node, - "nand-on-flash-bbt") || - of_property_read_bool(pdev->dev.of_node, "ti,davinci-nand-use-bbt")) pdata->bbt_options = NAND_BBT_USE_FLASH; @@ -628,7 +604,6 @@ static int nand_davinci_probe(struct platform_device *pdev) void __iomem *base; int ret; uint32_t val; - nand_ecc_modes_t ecc_mode; struct mtd_info *mtd; pdata = nand_davinci_get_pdata(pdev); @@ -712,13 +687,53 @@ static int nand_davinci_probe(struct platform_device *pdev) info->chip.write_buf = nand_davinci_write_buf; /* Use board-specific ECC config */ - ecc_mode = pdata->ecc_mode; + info->chip.ecc.mode = pdata->ecc_mode; ret = -EINVAL; - switch (ecc_mode) { + + info->clk = devm_clk_get(&pdev->dev, "aemif"); + if (IS_ERR(info->clk)) { + ret = PTR_ERR(info->clk); + dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(info->clk); + if (ret < 0) { + dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", + ret); + goto err_clk_enable; + } + + spin_lock_irq(&davinci_nand_lock); + + /* put CSxNAND into NAND mode */ + val = davinci_nand_readl(info, NANDFCR_OFFSET); + val |= BIT(info->core_chipsel); + davinci_nand_writel(info, NANDFCR_OFFSET, val); + + spin_unlock_irq(&davinci_nand_lock); + + /* Scan to find existence of the device(s) */ + ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL); + if (ret < 0) { + dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); + goto err; + } + + switch (info->chip.ecc.mode) { case NAND_ECC_NONE: + pdata->ecc_bits = 0; + break; case NAND_ECC_SOFT: pdata->ecc_bits = 0; + /* + * This driver expects Hamming based ECC when ecc_mode is set + * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to + * avoid adding an extra ->ecc_algo field to + * davinci_nand_pdata. + */ + info->chip.ecc.algo = NAND_ECC_HAMMING; break; case NAND_ECC_HW: if (pdata->ecc_bits == 4) { @@ -754,37 +769,6 @@ static int nand_davinci_probe(struct platform_device *pdev) default: return -EINVAL; } - info->chip.ecc.mode = ecc_mode; - - info->clk = devm_clk_get(&pdev->dev, "aemif"); - if (IS_ERR(info->clk)) { - ret = PTR_ERR(info->clk); - dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); - return ret; - } - - ret = clk_prepare_enable(info->clk); - if (ret < 0) { - dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", - ret); - goto err_clk_enable; - } - - spin_lock_irq(&davinci_nand_lock); - - /* put CSxNAND into NAND mode */ - val = davinci_nand_readl(info, NANDFCR_OFFSET); - val |= BIT(info->core_chipsel); - davinci_nand_writel(info, NANDFCR_OFFSET, val); - - spin_unlock_irq(&davinci_nand_lock); - - /* Scan to find existence of the device(s) */ - ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL); - if (ret < 0) { - dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); - goto err; - } /* Update ECC layout if needed ... for 1-bit HW ECC, the default * is OK, but it allocates 6 bytes when only 3 are needed (for @@ -805,26 +789,14 @@ static int nand_davinci_probe(struct platform_device *pdev) * table marker fits in the free bytes. */ if (chunks == 1) { - info->ecclayout = hwecc4_small; - info->ecclayout.oobfree[1].length = mtd->oobsize - 16; - goto syndrome_done; - } - if (chunks == 4) { - info->ecclayout = hwecc4_2048; - info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST; - goto syndrome_done; - } - if (chunks == 8) { - info->ecclayout = hwecc4_4096; + mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops); + } else if (chunks == 4 || chunks == 8) { + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST; - goto syndrome_done; + } else { + ret = -EIO; + goto err; } - - ret = -EIO; - goto err; - -syndrome_done: - info->chip.ecc.layout = &info->ecclayout; } ret = nand_scan_tail(mtd); @@ -850,7 +822,7 @@ err: err_clk_enable: spin_lock_irq(&davinci_nand_lock); - if (ecc_mode == NAND_ECC_HW_SYNDROME) + if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) ecc4_busy = false; spin_unlock_irq(&davinci_nand_lock); return ret; diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 30bf5f690f78..0476ae8776d9 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -1374,13 +1374,41 @@ static void denali_hw_init(struct denali_nand_info *denali) * correction */ #define ECC_8BITS 14 -static struct nand_ecclayout nand_8bit_oob = { - .eccbytes = 14, -}; - #define ECC_15BITS 26 -static struct nand_ecclayout nand_15bit_oob = { - .eccbytes = 26, + +static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = denali->bbtskipbytes; + oobregion->length = chip->ecc.total; + + return 0; +} + +static int denali_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = chip->ecc.total + denali->bbtskipbytes; + oobregion->length = mtd->oobsize - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops denali_ooblayout_ops = { + .ecc = denali_ooblayout_ecc, + .free = denali_ooblayout_free, }; static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; @@ -1561,7 +1589,6 @@ int denali_init(struct denali_nand_info *denali) ECC_SECTOR_SIZE)))) { /* if MLC OOB size is large enough, use 15bit ECC*/ denali->nand.ecc.strength = 15; - denali->nand.ecc.layout = &nand_15bit_oob; denali->nand.ecc.bytes = ECC_15BITS; iowrite32(15, denali->flash_reg + ECC_CORRECTION); } else if (mtd->oobsize < (denali->bbtskipbytes + @@ -1571,20 +1598,13 @@ int denali_init(struct denali_nand_info *denali) goto failed_req_irq; } else { denali->nand.ecc.strength = 8; - denali->nand.ecc.layout = &nand_8bit_oob; denali->nand.ecc.bytes = ECC_8BITS; iowrite32(8, denali->flash_reg + ECC_CORRECTION); } + mtd_set_ooblayout(mtd, &denali_ooblayout_ops); denali->nand.ecc.bytes *= denali->devnum; denali->nand.ecc.strength *= denali->devnum; - denali->nand.ecc.layout->eccbytes *= - mtd->writesize / ECC_SECTOR_SIZE; - denali->nand.ecc.layout->oobfree[0].offset = - denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes; - denali->nand.ecc.layout->oobfree[0].length = - mtd->oobsize - denali->nand.ecc.layout->eccbytes - - denali->bbtskipbytes; /* * Let driver know the total blocks number and how many blocks diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index 547c1002941d..a023ab9e9cbf 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -950,20 +950,50 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, //u_char mydatabuf[528]; -/* The strange out-of-order .oobfree list below is a (possibly unneeded) - * attempt to retain compatibility. It used to read: - * .oobfree = { {8, 8} } - * Since that leaves two bytes unusable, it was changed. But the following - * scheme might affect existing jffs2 installs by moving the cleanmarker: - * .oobfree = { {6, 10} } - * jffs2 seems to handle the above gracefully, but the current scheme seems - * safer. The only problem with it is that any code that parses oobfree must - * be able to handle out-of-order segments. - */ -static struct nand_ecclayout doc200x_oobinfo = { - .eccbytes = 6, - .eccpos = {0, 1, 2, 3, 4, 5}, - .oobfree = {{8, 8}, {6, 2}} +static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 0; + oobregion->length = 6; + + return 0; +} + +static int doc200x_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 1) + return -ERANGE; + + /* + * The strange out-of-order free bytes definition is a (possibly + * unneeded) attempt to retain compatibility. It used to read: + * .oobfree = { {8, 8} } + * Since that leaves two bytes unusable, it was changed. But the + * following scheme might affect existing jffs2 installs by moving the + * cleanmarker: + * .oobfree = { {6, 10} } + * jffs2 seems to handle the above gracefully, but the current scheme + * seems safer. The only problem with it is that any code retrieving + * free bytes position must be able to handle out-of-order segments. + */ + if (!section) { + oobregion->offset = 8; + oobregion->length = 8; + } else { + oobregion->offset = 6; + oobregion->length = 2; + } + + return 0; +} + +static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = { + .ecc = doc200x_ooblayout_ecc, + .free = doc200x_ooblayout_free, }; /* Find the (I)NFTL Media Header, and optionally also the mirror media header. @@ -1537,6 +1567,7 @@ static int __init doc_probe(unsigned long physadr) nand->bbt_md = nand->bbt_td + 1; mtd->owner = THIS_MODULE; + mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops); nand_set_controller_data(nand, doc); nand->select_chip = doc200x_select_chip; @@ -1548,7 +1579,6 @@ static int __init doc_probe(unsigned long physadr) nand->ecc.calculate = doc200x_calculate_ecc; nand->ecc.correct = doc200x_correct_data; - nand->ecc.layout = &doc200x_oobinfo; nand->ecc.mode = NAND_ECC_HW_SYNDROME; nand->ecc.size = 512; nand->ecc.bytes = 6; diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index d86a60e1bbcb..47316998017f 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -222,10 +222,33 @@ struct docg4_priv { * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14. * Byte 15 (the last) is used by the driver as a "page written" flag. */ -static struct nand_ecclayout docg4_oobinfo = { - .eccbytes = 9, - .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, - .oobfree = { {.offset = 2, .length = 5} } +static int docg4_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 7; + oobregion->length = 9; + + return 0; +} + +static int docg4_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 2; + oobregion->length = 5; + + return 0; +} + +static const struct mtd_ooblayout_ops docg4_ooblayout_ops = { + .ecc = docg4_ooblayout_ecc, + .free = docg4_ooblayout_free, }; /* @@ -1209,6 +1232,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) mtd->writesize = DOCG4_PAGE_SIZE; mtd->erasesize = DOCG4_BLOCK_SIZE; mtd->oobsize = DOCG4_OOB_SIZE; + mtd_set_ooblayout(mtd, &docg4_ooblayout_ops); nand->chipsize = DOCG4_CHIP_SIZE; nand->chip_shift = DOCG4_CHIP_SHIFT; nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT; @@ -1217,7 +1241,6 @@ static void __init init_mtd_structs(struct mtd_info *mtd) nand->pagemask = 0x3ffff; nand->badblockpos = NAND_LARGE_BADBLOCK_POS; nand->badblockbits = 8; - nand->ecc.layout = &docg4_oobinfo; nand->ecc.mode = NAND_ECC_HW_SYNDROME; nand->ecc.size = DOCG4_PAGE_SIZE; nand->ecc.prepad = 8; diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 059d5f7ec124..60a88f24c6b3 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -79,32 +79,53 @@ struct fsl_elbc_fcm_ctrl { /* These map to the positions used by the FCM hardware ECC generator */ -/* Small Page FLASH with FMR[ECCM] = 0 */ -static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = { - .eccbytes = 3, - .eccpos = {6, 7, 8}, - .oobfree = { {0, 5}, {9, 7} }, -}; +static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct fsl_elbc_mtd *priv = nand_get_controller_data(chip); -/* Small Page FLASH with FMR[ECCM] = 1 */ -static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = { - .eccbytes = 3, - .eccpos = {8, 9, 10}, - .oobfree = { {0, 5}, {6, 2}, {11, 5} }, -}; + if (section >= chip->ecc.steps) + return -ERANGE; -/* Large Page FLASH with FMR[ECCM] = 0 */ -static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = { - .eccbytes = 12, - .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56}, - .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} }, -}; + oobregion->offset = (16 * section) + 6; + if (priv->fmr & FMR_ECCM) + oobregion->offset += 2; -/* Large Page FLASH with FMR[ECCM] = 1 */ -static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = { - .eccbytes = 12, - .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58}, - .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} }, + oobregion->length = chip->ecc.bytes; + + return 0; +} + +static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct fsl_elbc_mtd *priv = nand_get_controller_data(chip); + + if (section > chip->ecc.steps) + return -ERANGE; + + if (!section) { + oobregion->offset = 0; + if (mtd->writesize > 512) + oobregion->offset++; + oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5; + } else { + oobregion->offset = (16 * section) - + ((priv->fmr & FMR_ECCM) ? 5 : 7); + if (section < chip->ecc.steps) + oobregion->length = 13; + else + oobregion->length = mtd->oobsize - oobregion->offset; + } + + return 0; +} + +static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = { + .ecc = fsl_elbc_ooblayout_ecc, + .free = fsl_elbc_ooblayout_free, }; /* @@ -657,8 +678,8 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) chip->ecc.bytes); dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n", chip->ecc.total); - dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n", - chip->ecc.layout); + dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n", + mtd->ooblayout); dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags); dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size); dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n", @@ -675,14 +696,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) } else if (mtd->writesize == 2048) { priv->page_size = 1; setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS); - /* adjust ecc setup if needed */ - if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == - BR_DECC_CHK_GEN) { - chip->ecc.size = 512; - chip->ecc.layout = (priv->fmr & FMR_ECCM) ? - &fsl_elbc_oob_lp_eccm1 : - &fsl_elbc_oob_lp_eccm0; - } } else { dev_err(priv->dev, "fsl_elbc_init: page size %d is not supported\n", @@ -780,15 +793,14 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == BR_DECC_CHK_GEN) { chip->ecc.mode = NAND_ECC_HW; - /* put in small page settings and adjust later if needed */ - chip->ecc.layout = (priv->fmr & FMR_ECCM) ? - &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; + mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops); chip->ecc.size = 512; chip->ecc.bytes = 3; chip->ecc.strength = 1; } else { /* otherwise fall back to default software ECC */ chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; } return 0; diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 43f5a3a4873f..4e9e5fd8faf3 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -67,136 +67,6 @@ struct fsl_ifc_nand_ctrl { static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl; -/* 512-byte page with 4-bit ECC, 8-bit */ -static struct nand_ecclayout oob_512_8bit_ecc4 = { - .eccbytes = 8, - .eccpos = {8, 9, 10, 11, 12, 13, 14, 15}, - .oobfree = { {0, 5}, {6, 2} }, -}; - -/* 512-byte page with 4-bit ECC, 16-bit */ -static struct nand_ecclayout oob_512_16bit_ecc4 = { - .eccbytes = 8, - .eccpos = {8, 9, 10, 11, 12, 13, 14, 15}, - .oobfree = { {2, 6}, }, -}; - -/* 2048-byte page size with 4-bit ECC */ -static struct nand_ecclayout oob_2048_ecc4 = { - .eccbytes = 32, - .eccpos = { - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, - }, - .oobfree = { {2, 6}, {40, 24} }, -}; - -/* 4096-byte page size with 4-bit ECC */ -static struct nand_ecclayout oob_4096_ecc4 = { - .eccbytes = 64, - .eccpos = { - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, - }, - .oobfree = { {2, 6}, {72, 56} }, -}; - -/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */ -static struct nand_ecclayout oob_4096_ecc8 = { - .eccbytes = 128, - .eccpos = { - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, - }, - .oobfree = { {2, 6}, {136, 82} }, -}; - -/* 8192-byte page size with 4-bit ECC */ -static struct nand_ecclayout oob_8192_ecc4 = { - .eccbytes = 128, - .eccpos = { - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, - }, - .oobfree = { {2, 6}, {136, 208} }, -}; - -/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */ -static struct nand_ecclayout oob_8192_ecc8 = { - .eccbytes = 256, - .eccpos = { - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, - 208, 209, 210, 211, 212, 213, 214, 215, - 216, 217, 218, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 228, 229, 230, 231, - 232, 233, 234, 235, 236, 237, 238, 239, - 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, - 256, 257, 258, 259, 260, 261, 262, 263, - }, - .oobfree = { {2, 6}, {264, 80} }, -}; - /* * Generic flash bbt descriptors */ @@ -223,6 +93,57 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = mirror_pattern, }; +static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = 8; + oobregion->length = chip->ecc.total; + + return 0; +} + +static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section > 1) + return -ERANGE; + + if (mtd->writesize == 512 && + !(chip->options & NAND_BUSWIDTH_16)) { + if (!section) { + oobregion->offset = 0; + oobregion->length = 5; + } else { + oobregion->offset = 6; + oobregion->length = 2; + } + + return 0; + } + + if (!section) { + oobregion->offset = 2; + oobregion->length = 6; + } else { + oobregion->offset = chip->ecc.total + 8; + oobregion->length = mtd->oobsize - oobregion->offset; + } + + return 0; +} + +static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = { + .ecc = fsl_ifc_ooblayout_ecc, + .free = fsl_ifc_ooblayout_free, +}; + /* * Set up the IFC hardware block and page address fields, and the ifc nand * structure addr field to point to the correct IFC buffer in memory @@ -232,7 +153,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) struct nand_chip *chip = mtd_to_nand(mtd); struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; int buf_num; ifc_nand_ctrl->page = page_addr; @@ -257,18 +178,22 @@ static int is_blank(struct mtd_info *mtd, unsigned int bufnum) u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2); u32 __iomem *mainarea = (u32 __iomem *)addr; u8 __iomem *oob = addr + mtd->writesize; - int i; + struct mtd_oob_region oobregion = { }; + int i, section = 0; for (i = 0; i < mtd->writesize / 4; i++) { if (__raw_readl(&mainarea[i]) != 0xffffffff) return 0; } - for (i = 0; i < chip->ecc.layout->eccbytes; i++) { - int pos = chip->ecc.layout->eccpos[i]; + mtd_ooblayout_ecc(mtd, section++, &oobregion); + while (oobregion.length) { + for (i = 0; i < oobregion.length; i++) { + if (__raw_readb(&oob[oobregion.offset + i]) != 0xff) + return 0; + } - if (__raw_readb(&oob[pos]) != 0xff) - return 0; + mtd_ooblayout_ecc(mtd, section++, &oobregion); } return 1; @@ -295,7 +220,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; u32 eccstat[4]; int i; @@ -371,7 +296,7 @@ static void fsl_ifc_do_read(struct nand_chip *chip, { struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ if (mtd->writesize > 512) { @@ -411,7 +336,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, struct nand_chip *chip = mtd_to_nand(mtd); struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; /* clear the read buffer */ ifc_nand_ctrl->read_bytes = 0; @@ -723,7 +648,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) { struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; u32 nand_fsr; /* Use READ_STATUS command, but wait for the device to be ready */ @@ -808,8 +733,8 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) chip->ecc.bytes); dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__, chip->ecc.total); - dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__, - chip->ecc.layout); + dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__, + mtd->ooblayout); dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags); dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size); dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__, @@ -825,39 +750,42 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) { struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; + struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; uint32_t csor = 0, csor_8k = 0, csor_ext = 0; uint32_t cs = priv->bank; /* Save CSOR and CSOR_ext */ - csor = ifc_in32(&ifc->csor_cs[cs].csor); - csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext); + csor = ifc_in32(&ifc_global->csor_cs[cs].csor); + csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); /* chage PageSize 8K and SpareSize 1K*/ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; - ifc_out32(csor_8k, &ifc->csor_cs[cs].csor); - ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext); + ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); + ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); /* READID */ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | - (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | - (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), - &ifc->ifc_nand.nand_fir0); + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | + (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), + &ifc_runtime->ifc_nand.nand_fir0); ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, - &ifc->ifc_nand.nand_fcr0); - ifc_out32(0x0, &ifc->ifc_nand.row3); + &ifc_runtime->ifc_nand.nand_fcr0); + ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); - ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr); + ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); /* Program ROW0/COL0 */ - ifc_out32(0x0, &ifc->ifc_nand.row0); - ifc_out32(0x0, &ifc->ifc_nand.col0); + ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); + ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); /* set the chip select for NAND Transaction */ - ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); + ifc_out32(cs << IFC_NAND_CSEL_SHIFT, + &ifc_runtime->ifc_nand.nand_csel); /* start read seq */ - ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); + ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, + &ifc_runtime->ifc_nand.nandseq_strt); /* wait for command complete flag or timeout */ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, @@ -867,17 +795,17 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); /* Restore CSOR and CSOR_ext */ - ifc_out32(csor, &ifc->csor_cs[cs].csor); - ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext); + ifc_out32(csor, &ifc_global->csor_cs[cs].csor); + ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); } static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) { struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_regs __iomem *ifc = ctrl->regs; + struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; + struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; struct nand_chip *chip = &priv->chip; struct mtd_info *mtd = nand_to_mtd(&priv->chip); - struct nand_ecclayout *layout; u32 csor; /* Fill in fsl_ifc_mtd structure */ @@ -886,7 +814,8 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) /* fill in nand_chip structure */ /* set up function call table */ - if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) + if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) + & CSPR_PORT_SIZE_16) chip->read_byte = fsl_ifc_read_byte16; else chip->read_byte = fsl_ifc_read_byte; @@ -900,13 +829,14 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) chip->bbt_td = &bbt_main_descr; chip->bbt_md = &bbt_mirror_descr; - ifc_out32(0x0, &ifc->ifc_nand.ncfgr); + ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); /* set up nand options */ chip->bbt_options = NAND_BBT_USE_FLASH; chip->options = NAND_NO_SUBPAGE_WRITE; - if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { + if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) + & CSPR_PORT_SIZE_16) { chip->read_byte = fsl_ifc_read_byte16; chip->options |= NAND_BUSWIDTH_16; } else { @@ -919,20 +849,11 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) chip->ecc.read_page = fsl_ifc_read_page; chip->ecc.write_page = fsl_ifc_write_page; - csor = ifc_in32(&ifc->csor_cs[priv->bank].csor); - - /* Hardware generates ECC per 512 Bytes */ - chip->ecc.size = 512; - chip->ecc.bytes = 8; - chip->ecc.strength = 4; + csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); switch (csor & CSOR_NAND_PGS_MASK) { case CSOR_NAND_PGS_512: - if (chip->options & NAND_BUSWIDTH_16) { - layout = &oob_512_16bit_ecc4; - } else { - layout = &oob_512_8bit_ecc4; - + if (!(chip->options & NAND_BUSWIDTH_16)) { /* Avoid conflict with bad block marker */ bbt_main_descr.offs = 0; bbt_mirror_descr.offs = 0; @@ -942,35 +863,16 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) break; case CSOR_NAND_PGS_2K: - layout = &oob_2048_ecc4; priv->bufnum_mask = 3; break; case CSOR_NAND_PGS_4K: - if ((csor & CSOR_NAND_ECC_MODE_MASK) == - CSOR_NAND_ECC_MODE_4) { - layout = &oob_4096_ecc4; - } else { - layout = &oob_4096_ecc8; - chip->ecc.bytes = 16; - chip->ecc.strength = 8; - } - priv->bufnum_mask = 1; break; case CSOR_NAND_PGS_8K: - if ((csor & CSOR_NAND_ECC_MODE_MASK) == - CSOR_NAND_ECC_MODE_4) { - layout = &oob_8192_ecc4; - } else { - layout = &oob_8192_ecc8; - chip->ecc.bytes = 16; - chip->ecc.strength = 8; - } - priv->bufnum_mask = 0; - break; + break; default: dev_err(priv->dev, "bad csor %#x: bad page size\n", csor); @@ -980,9 +882,20 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */ if (csor & CSOR_NAND_ECC_DEC_EN) { chip->ecc.mode = NAND_ECC_HW; - chip->ecc.layout = layout; + mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops); + + /* Hardware generates ECC per 512 Bytes */ + chip->ecc.size = 512; + if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) { + chip->ecc.bytes = 8; + chip->ecc.strength = 4; + } else { + chip->ecc.bytes = 16; + chip->ecc.strength = 8; + } } else { chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; } if (ctrl->version == FSL_IFC_VERSION_1_1_0) @@ -1007,10 +920,10 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) return 0; } -static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, +static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank, phys_addr_t addr) { - u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr); + u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); if (!(cspr & CSPR_V)) return 0; @@ -1024,7 +937,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex); static int fsl_ifc_nand_probe(struct platform_device *dev) { - struct fsl_ifc_regs __iomem *ifc; + struct fsl_ifc_runtime __iomem *ifc; struct fsl_ifc_mtd *priv; struct resource res; static const char *part_probe_types[] @@ -1034,9 +947,9 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) struct device_node *node = dev->dev.of_node; struct mtd_info *mtd; - if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) + if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) return -ENODEV; - ifc = fsl_ifc_ctrl_dev->regs; + ifc = fsl_ifc_ctrl_dev->rregs; /* get, allocate and map the memory resource */ ret = of_address_to_resource(node, 0, &res); @@ -1047,7 +960,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) /* find which chip select it is connected to */ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { - if (match_bank(ifc, bank, res.start)) + if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) break; } diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index cafd12de7276..d85fa2555b68 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -170,6 +170,7 @@ static int fun_chip_init(struct fsl_upm_nand *fun, fun->chip.read_buf = fun_read_buf; fun->chip.write_buf = fun_write_buf; fun->chip.ecc.mode = NAND_ECC_SOFT; + fun->chip.ecc.algo = NAND_ECC_HAMMING; if (fun->mchip_count > 1) fun->chip.select_chip = fun_select_chip; diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 1bdcd4fa26d4..d4f454a4b35e 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -39,210 +39,41 @@ #include <linux/amba/bus.h> #include <mtd/mtd-abi.h> -static struct nand_ecclayout fsmc_ecc1_128_layout = { - .eccbytes = 24, - .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, - 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, - .oobfree = { - {.offset = 8, .length = 8}, - {.offset = 24, .length = 8}, - {.offset = 40, .length = 8}, - {.offset = 56, .length = 8}, - {.offset = 72, .length = 8}, - {.offset = 88, .length = 8}, - {.offset = 104, .length = 8}, - {.offset = 120, .length = 8} - } -}; +static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); -static struct nand_ecclayout fsmc_ecc1_64_layout = { - .eccbytes = 12, - .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52}, - .oobfree = { - {.offset = 8, .length = 8}, - {.offset = 24, .length = 8}, - {.offset = 40, .length = 8}, - {.offset = 56, .length = 8}, - } -}; + if (section >= chip->ecc.steps) + return -ERANGE; -static struct nand_ecclayout fsmc_ecc1_16_layout = { - .eccbytes = 3, - .eccpos = {2, 3, 4}, - .oobfree = { - {.offset = 8, .length = 8}, - } -}; + oobregion->offset = (section * 16) + 2; + oobregion->length = 3; -/* - * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes - * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46 - * bytes are free for use. - */ -static struct nand_ecclayout fsmc_ecc4_256_layout = { - .eccbytes = 208, - .eccpos = { 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, - 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, - 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, - 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, - 66, 67, 68, 69, 70, 71, 72, - 73, 74, 75, 76, 77, 78, - 82, 83, 84, 85, 86, 87, 88, - 89, 90, 91, 92, 93, 94, - 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, - 114, 115, 116, 117, 118, 119, 120, - 121, 122, 123, 124, 125, 126, - 130, 131, 132, 133, 134, 135, 136, - 137, 138, 139, 140, 141, 142, - 146, 147, 148, 149, 150, 151, 152, - 153, 154, 155, 156, 157, 158, - 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, - 178, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, - 194, 195, 196, 197, 198, 199, 200, - 201, 202, 203, 204, 205, 206, - 210, 211, 212, 213, 214, 215, 216, - 217, 218, 219, 220, 221, 222, - 226, 227, 228, 229, 230, 231, 232, - 233, 234, 235, 236, 237, 238, - 242, 243, 244, 245, 246, 247, 248, - 249, 250, 251, 252, 253, 254 - }, - .oobfree = { - {.offset = 15, .length = 3}, - {.offset = 31, .length = 3}, - {.offset = 47, .length = 3}, - {.offset = 63, .length = 3}, - {.offset = 79, .length = 3}, - {.offset = 95, .length = 3}, - {.offset = 111, .length = 3}, - {.offset = 127, .length = 3}, - {.offset = 143, .length = 3}, - {.offset = 159, .length = 3}, - {.offset = 175, .length = 3}, - {.offset = 191, .length = 3}, - {.offset = 207, .length = 3}, - {.offset = 223, .length = 3}, - {.offset = 239, .length = 3}, - {.offset = 255, .length = 1} - } -}; + return 0; +} -/* - * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes - * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118 - * bytes are free for use. - */ -static struct nand_ecclayout fsmc_ecc4_224_layout = { - .eccbytes = 104, - .eccpos = { 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, - 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, - 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, - 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, - 66, 67, 68, 69, 70, 71, 72, - 73, 74, 75, 76, 77, 78, - 82, 83, 84, 85, 86, 87, 88, - 89, 90, 91, 92, 93, 94, - 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, - 114, 115, 116, 117, 118, 119, 120, - 121, 122, 123, 124, 125, 126 - }, - .oobfree = { - {.offset = 15, .length = 3}, - {.offset = 31, .length = 3}, - {.offset = 47, .length = 3}, - {.offset = 63, .length = 3}, - {.offset = 79, .length = 3}, - {.offset = 95, .length = 3}, - {.offset = 111, .length = 3}, - {.offset = 127, .length = 97} - } -}; +static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); -/* - * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes - * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22 - * bytes are free for use. - */ -static struct nand_ecclayout fsmc_ecc4_128_layout = { - .eccbytes = 104, - .eccpos = { 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, - 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, - 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, - 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, - 66, 67, 68, 69, 70, 71, 72, - 73, 74, 75, 76, 77, 78, - 82, 83, 84, 85, 86, 87, 88, - 89, 90, 91, 92, 93, 94, - 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, - 114, 115, 116, 117, 118, 119, 120, - 121, 122, 123, 124, 125, 126 - }, - .oobfree = { - {.offset = 15, .length = 3}, - {.offset = 31, .length = 3}, - {.offset = 47, .length = 3}, - {.offset = 63, .length = 3}, - {.offset = 79, .length = 3}, - {.offset = 95, .length = 3}, - {.offset = 111, .length = 3}, - {.offset = 127, .length = 1} - } -}; + if (section >= chip->ecc.steps) + return -ERANGE; -/* - * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of - * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10 - * bytes are free for use. - */ -static struct nand_ecclayout fsmc_ecc4_64_layout = { - .eccbytes = 52, - .eccpos = { 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, - 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, - 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, - 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, - }, - .oobfree = { - {.offset = 15, .length = 3}, - {.offset = 31, .length = 3}, - {.offset = 47, .length = 3}, - {.offset = 63, .length = 1}, - } -}; + oobregion->offset = (section * 16) + 8; -/* - * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of - * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One - * byte is free for use. - */ -static struct nand_ecclayout fsmc_ecc4_16_layout = { - .eccbytes = 13, - .eccpos = { 0, 1, 2, 3, 6, 7, 8, - 9, 10, 11, 12, 13, 14 - }, - .oobfree = { - {.offset = 15, .length = 1}, - } + if (section < chip->ecc.steps - 1) + oobregion->length = 8; + else + oobregion->length = mtd->oobsize - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = { + .ecc = fsmc_ecc1_ooblayout_ecc, + .free = fsmc_ecc1_ooblayout_free, }; /* @@ -250,28 +81,46 @@ static struct nand_ecclayout fsmc_ecc4_16_layout = { * There are 13 bytes of ecc for every 512 byte block and it has to be read * consecutively and immediately after the 512 byte data block for hardware to * generate the error bit offsets in 512 byte data. - * Managing the ecc bytes in the following way makes it easier for software to - * read ecc bytes consecutive to data bytes. This way is similar to - * oobfree structure maintained already in generic nand driver */ -static struct fsmc_eccplace fsmc_ecc4_lp_place = { - .eccplace = { - {.offset = 2, .length = 13}, - {.offset = 18, .length = 13}, - {.offset = 34, .length = 13}, - {.offset = 50, .length = 13}, - {.offset = 66, .length = 13}, - {.offset = 82, .length = 13}, - {.offset = 98, .length = 13}, - {.offset = 114, .length = 13} - } -}; +static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); -static struct fsmc_eccplace fsmc_ecc4_sp_place = { - .eccplace = { - {.offset = 0, .length = 4}, - {.offset = 6, .length = 9} - } + if (section >= chip->ecc.steps) + return -ERANGE; + + oobregion->length = chip->ecc.bytes; + + if (!section && mtd->writesize <= 512) + oobregion->offset = 0; + else + oobregion->offset = (section * 16) + 2; + + return 0; +} + +static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section >= chip->ecc.steps) + return -ERANGE; + + oobregion->offset = (section * 16) + 15; + + if (section < chip->ecc.steps - 1) + oobregion->length = 3; + else + oobregion->length = mtd->oobsize - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = { + .ecc = fsmc_ecc4_ooblayout_ecc, + .free = fsmc_ecc4_ooblayout_free, }; /** @@ -283,7 +132,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = { * @partitions: Partition info for a NAND Flash. * @nr_partitions: Total number of partition of a NAND flash. * - * @ecc_place: ECC placing locations in oobfree type format. * @bank: Bank number for probed device. * @clk: Clock structure for FSMC. * @@ -303,7 +151,6 @@ struct fsmc_nand_data { struct mtd_partition *partitions; unsigned int nr_partitions; - struct fsmc_eccplace *ecc_place; unsigned int bank; struct device *dev; enum access_mode mode; @@ -710,8 +557,6 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf, static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { - struct fsmc_nand_data *host = mtd_to_fsmc(mtd); - struct fsmc_eccplace *ecc_place = host->ecc_place; int i, j, s, stat, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; @@ -734,9 +579,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, chip->read_buf(mtd, p, eccsize); for (j = 0; j < eccbytes;) { - off = ecc_place->eccplace[group].offset; - len = ecc_place->eccplace[group].length; - group++; + struct mtd_oob_region oobregion; + int ret; + + ret = mtd_ooblayout_ecc(mtd, group++, &oobregion); + if (ret) + return ret; + + off = oobregion.offset; + len = oobregion.length; /* * length is intentionally kept a higher multiple of 2 @@ -1084,24 +935,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) if (AMBA_REV_BITS(host->pid) >= 8) { switch (mtd->oobsize) { case 16: - nand->ecc.layout = &fsmc_ecc4_16_layout; - host->ecc_place = &fsmc_ecc4_sp_place; - break; case 64: - nand->ecc.layout = &fsmc_ecc4_64_layout; - host->ecc_place = &fsmc_ecc4_lp_place; - break; case 128: - nand->ecc.layout = &fsmc_ecc4_128_layout; - host->ecc_place = &fsmc_ecc4_lp_place; - break; case 224: - nand->ecc.layout = &fsmc_ecc4_224_layout; - host->ecc_place = &fsmc_ecc4_lp_place; - break; case 256: - nand->ecc.layout = &fsmc_ecc4_256_layout; - host->ecc_place = &fsmc_ecc4_lp_place; break; default: dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n", @@ -1109,6 +946,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) ret = -EINVAL; goto err_probe; } + + mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops); } else { switch (nand->ecc.mode) { case NAND_ECC_HW: @@ -1119,9 +958,11 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) nand->ecc.strength = 1; break; - case NAND_ECC_SOFT_BCH: - dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n"); - break; + case NAND_ECC_SOFT: + if (nand->ecc.algo == NAND_ECC_BCH) { + dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n"); + break; + } default: dev_err(&pdev->dev, "Unsupported ECC mode!\n"); @@ -1132,16 +973,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) * Don't set layout for BCH4 SW ECC. This will be * generated later in nand_bch_init() later. */ - if (nand->ecc.mode != NAND_ECC_SOFT_BCH) { + if (nand->ecc.mode == NAND_ECC_HW) { switch (mtd->oobsize) { case 16: - nand->ecc.layout = &fsmc_ecc1_16_layout; - break; case 64: - nand->ecc.layout = &fsmc_ecc1_64_layout; - break; case 128: - nand->ecc.layout = &fsmc_ecc1_128_layout; + mtd_set_ooblayout(mtd, + &fsmc_ecc1_ooblayout_ops); break; default: dev_warn(&pdev->dev, diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index ded658fc7d73..6317f6836022 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -273,6 +273,7 @@ static int gpio_nand_probe(struct platform_device *pdev) nand_set_flash_node(chip, pdev->dev.of_node); chip->IO_ADDR_W = chip->IO_ADDR_R; chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; chip->options = gpiomtd->plat.options; chip->chip_delay = gpiomtd->plat.chip_delay; chip->cmd_ctrl = gpio_nand_cmd_ctrl; diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 8122c699ccf2..6e461560c6a8 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -25,7 +25,6 @@ #include <linux/mtd/partitions.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_mtd.h> #include "gpmi-nand.h" #include "bch-regs.h" @@ -47,10 +46,44 @@ static struct nand_bbt_descr gpmi_bbt_descr = { * We may change the layout if we can get the ECC info from the datasheet, * else we will use all the (page + OOB). */ -static struct nand_ecclayout gpmi_hw_ecclayout = { - .eccbytes = 0, - .eccpos = { 0, }, - .oobfree = { {.offset = 0, .length = 0} } +static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct gpmi_nand_data *this = nand_get_controller_data(chip); + struct bch_geometry *geo = &this->bch_geometry; + + if (section) + return -ERANGE; + + oobregion->offset = 0; + oobregion->length = geo->page_size - mtd->writesize; + + return 0; +} + +static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct gpmi_nand_data *this = nand_get_controller_data(chip); + struct bch_geometry *geo = &this->bch_geometry; + + if (section) + return -ERANGE; + + /* The available oob size we have. */ + if (geo->page_size < mtd->writesize + mtd->oobsize) { + oobregion->offset = geo->page_size - mtd->writesize; + oobregion->length = mtd->oobsize - oobregion->offset; + } + + return 0; +} + +static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { + .ecc = gpmi_ooblayout_ecc, + .free = gpmi_ooblayout_free, }; static const struct gpmi_devdata gpmi_devdata_imx23 = { @@ -141,7 +174,6 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this) struct bch_geometry *geo = &this->bch_geometry; struct nand_chip *chip = &this->nand; struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree; unsigned int block_mark_bit_offset; if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) @@ -229,12 +261,6 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this) geo->page_size = mtd->writesize + geo->metadata_size + (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; - /* The available oob size we have. */ - if (geo->page_size < mtd->writesize + mtd->oobsize) { - of->offset = geo->page_size - mtd->writesize; - of->length = mtd->oobsize - of->offset; - } - geo->payload_size = mtd->writesize; geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4); @@ -797,6 +823,7 @@ static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) this->cmd_buffer = NULL; this->data_buffer_dma = NULL; + this->raw_buffer = NULL; this->page_buffer_virt = NULL; this->page_buffer_size = 0; } @@ -1037,14 +1064,87 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, /* Loop over status bytes, accumulating ECC status. */ status = auxiliary_virt + nfc_geo->auxiliary_status_offset; + read_page_swap_end(this, buf, nfc_geo->payload_size, + this->payload_virt, this->payload_phys, + nfc_geo->payload_size, + payload_virt, payload_phys); + for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) continue; if (*status == STATUS_UNCORRECTABLE) { + int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; + u8 *eccbuf = this->raw_buffer; + int offset, bitoffset; + int eccbytes; + int flips; + + /* Read ECC bytes into our internal raw_buffer */ + offset = nfc_geo->metadata_size * 8; + offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1); + offset -= eccbits; + bitoffset = offset % 8; + eccbytes = DIV_ROUND_UP(offset + eccbits, 8); + offset /= 8; + eccbytes -= offset; + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1); + chip->read_buf(mtd, eccbuf, eccbytes); + + /* + * ECC data are not byte aligned and we may have + * in-band data in the first and last byte of + * eccbuf. Set non-eccbits to one so that + * nand_check_erased_ecc_chunk() does not count them + * as bitflips. + */ + if (bitoffset) + eccbuf[0] |= GENMASK(bitoffset - 1, 0); + + bitoffset = (bitoffset + eccbits) % 8; + if (bitoffset) + eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset); + + /* + * The ECC hardware has an uncorrectable ECC status + * code in case we have bitflips in an erased page. As + * nothing was written into this subpage the ECC is + * obviously wrong and we can not trust it. We assume + * at this point that we are reading an erased page and + * try to correct the bitflips in buffer up to + * ecc_strength bitflips. If this is a page with random + * data, we exceed this number of bitflips and have a + * ECC failure. Otherwise we use the corrected buffer. + */ + if (i == 0) { + /* The first block includes metadata */ + flips = nand_check_erased_ecc_chunk( + buf + i * nfc_geo->ecc_chunk_size, + nfc_geo->ecc_chunk_size, + eccbuf, eccbytes, + auxiliary_virt, + nfc_geo->metadata_size, + nfc_geo->ecc_strength); + } else { + flips = nand_check_erased_ecc_chunk( + buf + i * nfc_geo->ecc_chunk_size, + nfc_geo->ecc_chunk_size, + eccbuf, eccbytes, + NULL, 0, + nfc_geo->ecc_strength); + } + + if (flips > 0) { + max_bitflips = max_t(unsigned int, max_bitflips, + flips); + mtd->ecc_stats.corrected += flips; + continue; + } + mtd->ecc_stats.failed++; continue; } + mtd->ecc_stats.corrected += *status; max_bitflips = max_t(unsigned int, max_bitflips, *status); } @@ -1064,11 +1164,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; } - read_page_swap_end(this, buf, nfc_geo->payload_size, - this->payload_virt, this->payload_phys, - nfc_geo->payload_size, - payload_virt, payload_phys); - return max_bitflips; } @@ -1327,18 +1422,19 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, static int gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { - struct nand_oobfree *of = mtd->ecclayout->oobfree; + struct mtd_oob_region of = { }; int status = 0; /* Do we have available oob area? */ - if (!of->length) + mtd_ooblayout_free(mtd, 0, &of); + if (!of.length) return -EPERM; if (!nand_is_slc(chip)) return -EPERM; - chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page); - chip->write_buf(mtd, chip->oob_poi + of->offset, of->length); + chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page); + chip->write_buf(mtd, chip->oob_poi + of.offset, of.length); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); @@ -1840,6 +1936,7 @@ static void gpmi_nand_exit(struct gpmi_nand_data *this) static int gpmi_init_last(struct gpmi_nand_data *this) { struct nand_chip *chip = &this->nand; + struct mtd_info *mtd = nand_to_mtd(chip); struct nand_ecc_ctrl *ecc = &chip->ecc; struct bch_geometry *bch_geo = &this->bch_geometry; int ret; @@ -1861,7 +1958,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this) ecc->mode = NAND_ECC_HW; ecc->size = bch_geo->ecc_chunk_size; ecc->strength = bch_geo->ecc_strength; - ecc->layout = &gpmi_hw_ecclayout; + mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops); /* * We only enable the subpage read when: @@ -1914,16 +2011,6 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ this->swap_block_mark = !GPMI_IS_MX23(this); - if (of_get_nand_on_flash_bbt(this->dev->of_node)) { - chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; - - if (of_property_read_bool(this->dev->of_node, - "fsl,no-blockmark-swap")) - this->swap_block_mark = false; - } - dev_dbg(this->dev, "Blockmark swapping %sabled\n", - this->swap_block_mark ? "en" : "dis"); - /* * Allocate a temporary DMA buffer for reading ID in the * nand_scan_ident(). @@ -1938,6 +2025,16 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) if (ret) goto err_out; + if (chip->bbt_options & NAND_BBT_USE_FLASH) { + chip->bbt_options |= NAND_BBT_NO_OOB; + + if (of_property_read_bool(this->dev->of_node, + "fsl,no-blockmark-swap")) + this->swap_block_mark = false; + } + dev_dbg(this->dev, "Blockmark swapping %sabled\n", + this->swap_block_mark ? "en" : "dis"); + ret = gpmi_init_last(this); if (ret) goto err_out; diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index 96502b624cfb..9432546f4cd4 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c @@ -19,7 +19,6 @@ * GNU General Public License for more details. */ #include <linux/of.h> -#include <linux/of_mtd.h> #include <linux/mtd/mtd.h> #include <linux/sizes.h> #include <linux/clk.h> @@ -631,8 +630,28 @@ static void hisi_nfc_host_init(struct hinfc_host *host) hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN); } -static struct nand_ecclayout nand_ecc_2K_16bits = { - .oobfree = { {2, 6} }, +static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + /* FIXME: add ECC bytes position */ + return -ENOTSUPP; +} + +static int hisi_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 2; + oobregion->length = 6; + + return 0; +} + +static const struct mtd_ooblayout_ops hisi_ooblayout_ops = { + .ecc = hisi_ooblayout_ecc, + .free = hisi_ooblayout_free, }; static int hisi_nfc_ecc_probe(struct hinfc_host *host) @@ -642,10 +661,9 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host) struct device *dev = host->dev; struct nand_chip *chip = &host->chip; struct mtd_info *mtd = nand_to_mtd(chip); - struct device_node *np = host->dev->of_node; - size = of_get_nand_ecc_step_size(np); - strength = of_get_nand_ecc_strength(np); + size = chip->ecc.size; + strength = chip->ecc.strength; if (size != 1024) { dev_err(dev, "error ecc size: %d\n", size); return -EINVAL; @@ -668,7 +686,7 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host) case 16: ecc_bits = 6; if (mtd->writesize == 2048) - chip->ecc.layout = &nand_ecc_2K_16bits; + mtd_set_ooblayout(mtd, &hisi_ooblayout_ops); /* TODO: add more page size support */ break; @@ -695,7 +713,7 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host) static int hisi_nfc_probe(struct platform_device *pdev) { - int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP; + int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP; struct device *dev = &pdev->dev; struct hinfc_host *host; struct nand_chip *chip; @@ -747,12 +765,6 @@ static int hisi_nfc_probe(struct platform_device *pdev) chip->read_buf = hisi_nfc_read_buf; chip->chip_delay = HINFC504_CHIP_DELAY; - chip->ecc.mode = of_get_nand_ecc_mode(np); - - buswidth = of_get_nand_bus_width(np); - if (buswidth == 16) - chip->options |= NAND_BUSWIDTH_16; - hisi_nfc_host_init(host); ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host); diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index 673ceb2a0b44..5551c36adbdf 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -221,7 +221,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat, struct jz_nand *nand = mtd_to_jz_nand(mtd); int i, error_count, index; uint32_t reg, status, error; - uint32_t t; unsigned int timeout = 1000; for (i = 0; i < 9; ++i) @@ -476,7 +475,7 @@ static int jz_nand_probe(struct platform_device *pdev) } if (pdata && pdata->ident_callback) { - pdata->ident_callback(pdev, chip, &pdata->partitions, + pdata->ident_callback(pdev, mtd, &pdata->partitions, &pdata->num_partitions); } diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c index 755499c6650e..d74f4ba4a6f4 100644 --- a/drivers/mtd/nand/jz4780_bch.c +++ b/drivers/mtd/nand/jz4780_bch.c @@ -287,7 +287,6 @@ static struct jz4780_bch *jz4780_bch_get(struct device_node *np) bch = platform_get_drvdata(pdev); clk_prepare_enable(bch->clk); - bch->dev = &pdev->dev; return bch; } diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c index e1c016c9d32d..daf3c4217f4d 100644 --- a/drivers/mtd/nand/jz4780_nand.c +++ b/drivers/mtd/nand/jz4780_nand.c @@ -17,7 +17,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/gpio/consumer.h> -#include <linux/of_mtd.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> @@ -56,8 +55,6 @@ struct jz4780_nand_chip { struct nand_chip chip; struct list_head chip_list; - struct nand_ecclayout ecclayout; - struct gpio_desc *busy_gpio; struct gpio_desc *wp_gpio; unsigned int reading: 1; @@ -165,8 +162,7 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de struct nand_chip *chip = &nand->chip; struct mtd_info *mtd = nand_to_mtd(chip); struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller); - struct nand_ecclayout *layout = &nand->ecclayout; - u32 start, i; + int eccbytes; chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) * (chip->ecc.strength / 8); @@ -183,7 +179,6 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de chip->ecc.correct = jz4780_nand_ecc_correct; /* fall through */ case NAND_ECC_SOFT: - case NAND_ECC_SOFT_BCH: dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n", (nfc->bch) ? "hardware BCH" : "software ECC", chip->ecc.strength, chip->ecc.size, chip->ecc.bytes); @@ -201,23 +196,17 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de return 0; /* Generate ECC layout. ECC codes are right aligned in the OOB area. */ - layout->eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes; + eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes; - if (layout->eccbytes > mtd->oobsize - 2) { + if (eccbytes > mtd->oobsize - 2) { dev_err(dev, "invalid ECC config: required %d ECC bytes, but only %d are available", - layout->eccbytes, mtd->oobsize - 2); + eccbytes, mtd->oobsize - 2); return -EINVAL; } - start = mtd->oobsize - layout->eccbytes; - for (i = 0; i < layout->eccbytes; i++) - layout->eccpos[i] = start + i; - - layout->oobfree[0].offset = 2; - layout->oobfree[0].length = mtd->oobsize - layout->eccbytes - 2; + mtd->ooblayout = &nand_ooblayout_lp_ops; - chip->ecc.layout = layout; return 0; } diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index d8c3e7afcc0b..852388171f20 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -35,7 +35,6 @@ #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/of.h> -#include <linux/of_mtd.h> #include <linux/of_gpio.h> #include <linux/mtd/lpc32xx_mlc.h> #include <linux/io.h> @@ -139,22 +138,37 @@ struct lpc32xx_nand_cfg_mlc { unsigned num_parts; }; -static struct nand_ecclayout lpc32xx_nand_oob = { - .eccbytes = 40, - .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, - .oobfree = { - { .offset = 0, - .length = 6, }, - { .offset = 16, - .length = 6, }, - { .offset = 32, - .length = 6, }, - { .offset = 48, - .length = 6, }, - }, +static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand_chip = mtd_to_nand(mtd); + + if (section >= nand_chip->ecc.steps) + return -ERANGE; + + oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes; + oobregion->length = nand_chip->ecc.bytes; + + return 0; +} + +static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand_chip = mtd_to_nand(mtd); + + if (section >= nand_chip->ecc.steps) + return -ERANGE; + + oobregion->offset = 16 * section; + oobregion->length = 16 - nand_chip->ecc.bytes; + + return 0; +} + +static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = { + .ecc = lpc32xx_ooblayout_ecc, + .free = lpc32xx_ooblayout_free, }; static struct nand_bbt_descr lpc32xx_nand_bbt = { @@ -713,6 +727,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) nand_chip->ecc.write_oob = lpc32xx_write_oob; nand_chip->ecc.read_oob = lpc32xx_read_oob; nand_chip->ecc.strength = 4; + nand_chip->ecc.bytes = 10; nand_chip->waitfunc = lpc32xx_waitfunc; nand_chip->options = NAND_NO_SUBPAGE_WRITE; @@ -751,7 +766,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) nand_chip->ecc.mode = NAND_ECC_HW; nand_chip->ecc.size = 512; - nand_chip->ecc.layout = &lpc32xx_nand_oob; + mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops); host->mlcsubpages = mtd->writesize / 512; /* initially clear interrupt status */ diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c index 3b8f3735f3e8..8d3edc34958e 100644 --- a/drivers/mtd/nand/lpc32xx_slc.c +++ b/drivers/mtd/nand/lpc32xx_slc.c @@ -35,7 +35,6 @@ #include <linux/mtd/nand_ecc.h> #include <linux/gpio.h> #include <linux/of.h> -#include <linux/of_mtd.h> #include <linux/of_gpio.h> #include <linux/mtd/lpc32xx_slc.h> @@ -146,13 +145,38 @@ * NAND ECC Layout for small page NAND devices * Note: For large and huge page devices, the default layouts are used */ -static struct nand_ecclayout lpc32xx_nand_oob_16 = { - .eccbytes = 6, - .eccpos = {10, 11, 12, 13, 14, 15}, - .oobfree = { - { .offset = 0, .length = 4 }, - { .offset = 6, .length = 4 }, - }, +static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->length = 6; + oobregion->offset = 10; + + return 0; +} + +static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 1) + return -ERANGE; + + if (!section) { + oobregion->offset = 0; + oobregion->length = 4; + } else { + oobregion->offset = 6; + oobregion->length = 4; + } + + return 0; +} + +static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = { + .ecc = lpc32xx_ooblayout_ecc, + .free = lpc32xx_ooblayout_free, }; static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; @@ -194,7 +218,6 @@ struct lpc32xx_nand_cfg_slc { uint32_t rwidth; uint32_t rhold; uint32_t rsetup; - bool use_bbt; int wp_gpio; struct mtd_partition *parts; unsigned num_parts; @@ -604,7 +627,8 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd, int oob_required, int page) { struct lpc32xx_nand_host *host = nand_get_controller_data(chip); - int stat, i, status; + struct mtd_oob_region oobregion = { }; + int stat, i, status, error; uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE]; /* Issue read command */ @@ -620,7 +644,11 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd, lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps); /* Pointer to ECC data retrieved from NAND spare area */ - oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0]; + error = mtd_ooblayout_ecc(mtd, 0, &oobregion); + if (error) + return error; + + oobecc = chip->oob_poi + oobregion.offset; for (i = 0; i < chip->ecc.steps; i++) { stat = chip->ecc.correct(mtd, buf, oobecc, @@ -666,7 +694,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd, int oob_required, int page) { struct lpc32xx_nand_host *host = nand_get_controller_data(chip); - uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0]; + struct mtd_oob_region oobregion = { }; + uint8_t *pb; int error; /* Write data, calculate ECC on outbound data */ @@ -678,6 +707,11 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd, * The calculated ECC needs some manual work done to it before * committing it to NAND. Process the calculated ECC and place * the resultant values directly into the OOB buffer. */ + error = mtd_ooblayout_ecc(mtd, 0, &oobregion); + if (error) + return error; + + pb = chip->oob_poi + oobregion.offset; lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps); /* Write ECC data to device */ @@ -747,7 +781,6 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev) return NULL; } - ncfg->use_bbt = of_get_nand_on_flash_bbt(np); ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0); return ncfg; @@ -875,26 +908,22 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) * custom BBT marker layout. */ if (mtd->writesize <= 512) - chip->ecc.layout = &lpc32xx_nand_oob_16; + mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops); /* These sizes remain the same regardless of page size */ chip->ecc.size = 256; chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES; chip->ecc.prepad = chip->ecc.postpad = 0; - /* Avoid extra scan if using BBT, setup BBT support */ - if (host->ncfg->use_bbt) { - chip->bbt_options |= NAND_BBT_USE_FLASH; - - /* - * Use a custom BBT marker setup for small page FLASH that - * won't interfere with the ECC layout. Large and huge page - * FLASH use the standard layout. - */ - if (mtd->writesize <= 512) { - chip->bbt_td = &bbt_smallpage_main_descr; - chip->bbt_md = &bbt_smallpage_mirror_descr; - } + /* + * Use a custom BBT marker setup for small page FLASH that + * won't interfere with the ECC layout. Large and huge page + * FLASH use the standard layout. + */ + if ((chip->bbt_options & NAND_BBT_USE_FLASH) && + mtd->writesize <= 512) { + chip->bbt_td = &bbt_smallpage_main_descr; + chip->bbt_md = &bbt_smallpage_mirror_descr; } /* diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index 5d7843ffff6a..7eacb2f545f5 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -710,6 +710,7 @@ static int mpc5121_nfc_probe(struct platform_device *op) chip->select_chip = mpc5121_nfc_select_chip; chip->bbt_options = NAND_BBT_USE_FLASH; chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; /* Support external chip-select logic on ADS5121 board */ if (of_machine_is_compatible("fsl,mpc5121ads")) { diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 854c832597aa..5173fadc9a4e 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -34,7 +34,6 @@ #include <linux/completion.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_mtd.h> #include <asm/mach/flash.h> #include <linux/platform_data/mtd-mxc_nand.h> @@ -149,7 +148,7 @@ struct mxc_nand_devtype_data { int (*check_int)(struct mxc_nand_host *); void (*irq_control)(struct mxc_nand_host *, int); u32 (*get_ecc_status)(struct mxc_nand_host *); - struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k; + const struct mtd_ooblayout_ops *ooblayout; void (*select_chip)(struct mtd_info *mtd, int chip); int (*correct_data)(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); @@ -200,73 +199,6 @@ struct mxc_nand_host { struct mxc_nand_platform_data pdata; }; -/* OOB placement block for use with hardware ecc generation */ -static struct nand_ecclayout nandv1_hw_eccoob_smallpage = { - .eccbytes = 5, - .eccpos = {6, 7, 8, 9, 10}, - .oobfree = {{0, 5}, {12, 4}, } -}; - -static struct nand_ecclayout nandv1_hw_eccoob_largepage = { - .eccbytes = 20, - .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26, - 38, 39, 40, 41, 42, 54, 55, 56, 57, 58}, - .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, } -}; - -/* OOB description for 512 byte pages with 16 byte OOB */ -static struct nand_ecclayout nandv2_hw_eccoob_smallpage = { - .eccbytes = 1 * 9, - .eccpos = { - 7, 8, 9, 10, 11, 12, 13, 14, 15 - }, - .oobfree = { - {.offset = 0, .length = 5} - } -}; - -/* OOB description for 2048 byte pages with 64 byte OOB */ -static struct nand_ecclayout nandv2_hw_eccoob_largepage = { - .eccbytes = 4 * 9, - .eccpos = { - 7, 8, 9, 10, 11, 12, 13, 14, 15, - 23, 24, 25, 26, 27, 28, 29, 30, 31, - 39, 40, 41, 42, 43, 44, 45, 46, 47, - 55, 56, 57, 58, 59, 60, 61, 62, 63 - }, - .oobfree = { - {.offset = 2, .length = 4}, - {.offset = 16, .length = 7}, - {.offset = 32, .length = 7}, - {.offset = 48, .length = 7} - } -}; - -/* OOB description for 4096 byte pages with 128 byte OOB */ -static struct nand_ecclayout nandv2_hw_eccoob_4k = { - .eccbytes = 8 * 9, - .eccpos = { - 7, 8, 9, 10, 11, 12, 13, 14, 15, - 23, 24, 25, 26, 27, 28, 29, 30, 31, - 39, 40, 41, 42, 43, 44, 45, 46, 47, - 55, 56, 57, 58, 59, 60, 61, 62, 63, - 71, 72, 73, 74, 75, 76, 77, 78, 79, - 87, 88, 89, 90, 91, 92, 93, 94, 95, - 103, 104, 105, 106, 107, 108, 109, 110, 111, - 119, 120, 121, 122, 123, 124, 125, 126, 127, - }, - .oobfree = { - {.offset = 2, .length = 4}, - {.offset = 16, .length = 7}, - {.offset = 32, .length = 7}, - {.offset = 48, .length = 7}, - {.offset = 64, .length = 7}, - {.offset = 80, .length = 7}, - {.offset = 96, .length = 7}, - {.offset = 112, .length = 7}, - } -}; - static const char * const part_probes[] = { "cmdlinepart", "RedBoot", "ofpart", NULL }; @@ -942,6 +874,99 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) } } +static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand_chip = mtd_to_nand(mtd); + + if (section >= nand_chip->ecc.steps) + return -ERANGE; + + oobregion->offset = (section * 16) + 6; + oobregion->length = nand_chip->ecc.bytes; + + return 0; +} + +static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand_chip = mtd_to_nand(mtd); + + if (section > nand_chip->ecc.steps) + return -ERANGE; + + if (!section) { + if (mtd->writesize <= 512) { + oobregion->offset = 0; + oobregion->length = 5; + } else { + oobregion->offset = 2; + oobregion->length = 4; + } + } else { + oobregion->offset = ((section - 1) * 16) + + nand_chip->ecc.bytes + 6; + if (section < nand_chip->ecc.steps) + oobregion->length = (section * 16) + 6 - + oobregion->offset; + else + oobregion->length = mtd->oobsize - oobregion->offset; + } + + return 0; +} + +static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = { + .ecc = mxc_v1_ooblayout_ecc, + .free = mxc_v1_ooblayout_free, +}; + +static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand_chip = mtd_to_nand(mtd); + int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; + + if (section >= nand_chip->ecc.steps) + return -ERANGE; + + oobregion->offset = (section * stepsize) + 7; + oobregion->length = nand_chip->ecc.bytes; + + return 0; +} + +static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand_chip = mtd_to_nand(mtd); + int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; + + if (section > nand_chip->ecc.steps) + return -ERANGE; + + if (!section) { + if (mtd->writesize <= 512) { + oobregion->offset = 0; + oobregion->length = 5; + } else { + oobregion->offset = 2; + oobregion->length = 4; + } + } else { + oobregion->offset = section * stepsize; + oobregion->length = 7; + } + + return 0; +} + +static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = { + .ecc = mxc_v2_ooblayout_ecc, + .free = mxc_v2_ooblayout_free, +}; + /* * v2 and v3 type controllers can do 4bit or 8bit ecc depending * on how much oob the nand chip has. For 8bit ecc we need at least @@ -959,23 +984,6 @@ static int get_eccsize(struct mtd_info *mtd) return 8; } -static void ecc_8bit_layout_4k(struct nand_ecclayout *layout) -{ - int i, j; - - layout->eccbytes = 8*18; - for (i = 0; i < 8; i++) - for (j = 0; j < 18; j++) - layout->eccpos[i*18 + j] = i*26 + j + 7; - - layout->oobfree[0].offset = 2; - layout->oobfree[0].length = 4; - for (i = 1; i < 8; i++) { - layout->oobfree[i].offset = i*26; - layout->oobfree[i].length = 7; - } -} - static void preset_v1(struct mtd_info *mtd) { struct nand_chip *nand_chip = mtd_to_nand(mtd); @@ -1269,9 +1277,7 @@ static const struct mxc_nand_devtype_data imx21_nand_devtype_data = { .check_int = check_int_v1_v2, .irq_control = irq_control_v1_v2, .get_ecc_status = get_ecc_status_v1, - .ecclayout_512 = &nandv1_hw_eccoob_smallpage, - .ecclayout_2k = &nandv1_hw_eccoob_largepage, - .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */ + .ooblayout = &mxc_v1_ooblayout_ops, .select_chip = mxc_nand_select_chip_v1_v3, .correct_data = mxc_nand_correct_data_v1, .irqpending_quirk = 1, @@ -1294,9 +1300,7 @@ static const struct mxc_nand_devtype_data imx27_nand_devtype_data = { .check_int = check_int_v1_v2, .irq_control = irq_control_v1_v2, .get_ecc_status = get_ecc_status_v1, - .ecclayout_512 = &nandv1_hw_eccoob_smallpage, - .ecclayout_2k = &nandv1_hw_eccoob_largepage, - .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */ + .ooblayout = &mxc_v1_ooblayout_ops, .select_chip = mxc_nand_select_chip_v1_v3, .correct_data = mxc_nand_correct_data_v1, .irqpending_quirk = 0, @@ -1320,9 +1324,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = { .check_int = check_int_v1_v2, .irq_control = irq_control_v1_v2, .get_ecc_status = get_ecc_status_v2, - .ecclayout_512 = &nandv2_hw_eccoob_smallpage, - .ecclayout_2k = &nandv2_hw_eccoob_largepage, - .ecclayout_4k = &nandv2_hw_eccoob_4k, + .ooblayout = &mxc_v2_ooblayout_ops, .select_chip = mxc_nand_select_chip_v2, .correct_data = mxc_nand_correct_data_v2_v3, .irqpending_quirk = 0, @@ -1346,9 +1348,7 @@ static const struct mxc_nand_devtype_data imx51_nand_devtype_data = { .check_int = check_int_v3, .irq_control = irq_control_v3, .get_ecc_status = get_ecc_status_v3, - .ecclayout_512 = &nandv2_hw_eccoob_smallpage, - .ecclayout_2k = &nandv2_hw_eccoob_largepage, - .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */ + .ooblayout = &mxc_v2_ooblayout_ops, .select_chip = mxc_nand_select_chip_v1_v3, .correct_data = mxc_nand_correct_data_v2_v3, .irqpending_quirk = 0, @@ -1373,9 +1373,7 @@ static const struct mxc_nand_devtype_data imx53_nand_devtype_data = { .check_int = check_int_v3, .irq_control = irq_control_v3, .get_ecc_status = get_ecc_status_v3, - .ecclayout_512 = &nandv2_hw_eccoob_smallpage, - .ecclayout_2k = &nandv2_hw_eccoob_largepage, - .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */ + .ooblayout = &mxc_v2_ooblayout_ops, .select_chip = mxc_nand_select_chip_v1_v3, .correct_data = mxc_nand_correct_data_v2_v3, .irqpending_quirk = 0, @@ -1461,25 +1459,12 @@ MODULE_DEVICE_TABLE(of, mxcnd_dt_ids); static int __init mxcnd_probe_dt(struct mxc_nand_host *host) { struct device_node *np = host->dev->of_node; - struct mxc_nand_platform_data *pdata = &host->pdata; const struct of_device_id *of_id = of_match_device(mxcnd_dt_ids, host->dev); - int buswidth; if (!np) return 1; - if (of_get_nand_ecc_mode(np) >= 0) - pdata->hw_ecc = 1; - - pdata->flash_bbt = of_get_nand_on_flash_bbt(np); - - buswidth = of_get_nand_bus_width(np); - if (buswidth < 0) - return buswidth; - - pdata->width = buswidth / 8; - host->devtype_data = of_id->data; return 0; @@ -1576,27 +1561,22 @@ static int mxcnd_probe(struct platform_device *pdev) this->select_chip = host->devtype_data->select_chip; this->ecc.size = 512; - this->ecc.layout = host->devtype_data->ecclayout_512; + mtd_set_ooblayout(mtd, host->devtype_data->ooblayout); if (host->pdata.hw_ecc) { - this->ecc.calculate = mxc_nand_calculate_ecc; - this->ecc.hwctl = mxc_nand_enable_hwecc; - this->ecc.correct = host->devtype_data->correct_data; this->ecc.mode = NAND_ECC_HW; } else { this->ecc.mode = NAND_ECC_SOFT; + this->ecc.algo = NAND_ECC_HAMMING; } /* NAND bus width determines access functions used by upper layer */ if (host->pdata.width == 2) this->options |= NAND_BUSWIDTH_16; - if (host->pdata.flash_bbt) { - this->bbt_td = &bbt_main_descr; - this->bbt_md = &bbt_mirror_descr; - /* update flash based bbt */ + /* update flash based bbt */ + if (host->pdata.flash_bbt) this->bbt_options |= NAND_BBT_USE_FLASH; - } init_completion(&host->op_completion); @@ -1637,6 +1617,26 @@ static int mxcnd_probe(struct platform_device *pdev) goto escan; } + switch (this->ecc.mode) { + case NAND_ECC_HW: + this->ecc.calculate = mxc_nand_calculate_ecc; + this->ecc.hwctl = mxc_nand_enable_hwecc; + this->ecc.correct = host->devtype_data->correct_data; + break; + + case NAND_ECC_SOFT: + break; + + default: + err = -EINVAL; + goto escan; + } + + if (this->bbt_options & NAND_BBT_USE_FLASH) { + this->bbt_td = &bbt_main_descr; + this->bbt_md = &bbt_mirror_descr; + } + /* allocate the right size buffer now */ devm_kfree(&pdev->dev, (void *)host->data_buf); host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize, @@ -1649,12 +1649,11 @@ static int mxcnd_probe(struct platform_device *pdev) /* Call preset again, with correct writesize this time */ host->devtype_data->preset(mtd); - if (mtd->writesize == 2048) - this->ecc.layout = host->devtype_data->ecclayout_2k; - else if (mtd->writesize == 4096) { - this->ecc.layout = host->devtype_data->ecclayout_4k; - if (get_eccsize(mtd) == 8) - ecc_8bit_layout_4k(this->ecc.layout); + if (!this->ecc.bytes) { + if (host->eccsize == 8) + this->ecc.bytes = 18; + else if (host->eccsize == 4) + this->ecc.bytes = 9; } /* diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index ba4f603e0537..0b0dc29d2af7 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -45,56 +45,98 @@ #include <linux/bitops.h> #include <linux/io.h> #include <linux/mtd/partitions.h> -#include <linux/of_mtd.h> +#include <linux/of.h> + +static int nand_get_device(struct mtd_info *mtd, int new_state); + +static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops); /* Define default oob placement schemes for large and small page devices */ -static struct nand_ecclayout nand_oob_8 = { - .eccbytes = 3, - .eccpos = {0, 1, 2}, - .oobfree = { - {.offset = 3, - .length = 2}, - {.offset = 6, - .length = 2} } -}; +static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; -static struct nand_ecclayout nand_oob_16 = { - .eccbytes = 6, - .eccpos = {0, 1, 2, 3, 6, 7}, - .oobfree = { - {.offset = 8, - . length = 8} } -}; + if (section > 1) + return -ERANGE; -static struct nand_ecclayout nand_oob_64 = { - .eccbytes = 24, - .eccpos = { - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63}, - .oobfree = { - {.offset = 2, - .length = 38} } -}; + if (!section) { + oobregion->offset = 0; + oobregion->length = 4; + } else { + oobregion->offset = 6; + oobregion->length = ecc->total - 4; + } + + return 0; +} + +static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 1) + return -ERANGE; + + if (mtd->oobsize == 16) { + if (section) + return -ERANGE; + + oobregion->length = 8; + oobregion->offset = 8; + } else { + oobregion->length = 2; + if (!section) + oobregion->offset = 3; + else + oobregion->offset = 6; + } + + return 0; +} -static struct nand_ecclayout nand_oob_128 = { - .eccbytes = 48, - .eccpos = { - 80, 81, 82, 83, 84, 85, 86, 87, - 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127}, - .oobfree = { - {.offset = 2, - .length = 78} } +const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = { + .ecc = nand_ooblayout_ecc_sp, + .free = nand_ooblayout_free_sp, }; +EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops); -static int nand_get_device(struct mtd_info *mtd, int new_state); +static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; -static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, - struct mtd_oob_ops *ops); + if (section) + return -ERANGE; + + oobregion->length = ecc->total; + oobregion->offset = mtd->oobsize - oobregion->length; + + return 0; +} + +static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section) + return -ERANGE; + + oobregion->length = mtd->oobsize - ecc->total - 2; + oobregion->offset = 2; + + return 0; +} + +const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = { + .ecc = nand_ooblayout_ecc_lp, + .free = nand_ooblayout_free_lp, +}; +EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops); static int check_offs_len(struct mtd_info *mtd, loff_t ofs, uint64_t len) @@ -1279,13 +1321,12 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { - int i, eccsize = chip->ecc.size; + int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_calc = chip->buffers->ecccalc; uint8_t *ecc_code = chip->buffers->ecccode; - uint32_t *eccpos = chip->ecc.layout->eccpos; unsigned int max_bitflips = 0; chip->ecc.read_page_raw(mtd, chip, buf, 1, page); @@ -1293,8 +1334,10 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) chip->ecc.calculate(mtd, p, &ecc_calc[i]); - for (i = 0; i < chip->ecc.total; i++) - ecc_code[i] = chip->oob_poi[eccpos[i]]; + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; eccsteps = chip->ecc.steps; p = buf; @@ -1326,14 +1369,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi, int page) { - int start_step, end_step, num_steps; - uint32_t *eccpos = chip->ecc.layout->eccpos; + int start_step, end_step, num_steps, ret; uint8_t *p; int data_col_addr, i, gaps = 0; int datafrag_len, eccfrag_len, aligned_len, aligned_pos; int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; - int index; + int index, section = 0; unsigned int max_bitflips = 0; + struct mtd_oob_region oobregion = { }; /* Column address within the page aligned to ECC size (256bytes) */ start_step = data_offs / chip->ecc.size; @@ -1361,12 +1404,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, * The performance is faster if we position offsets according to * ecc.pos. Let's make sure that there are no gaps in ECC positions. */ - for (i = 0; i < eccfrag_len - 1; i++) { - if (eccpos[i + index] + 1 != eccpos[i + index + 1]) { - gaps = 1; - break; - } - } + ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion); + if (ret) + return ret; + + if (oobregion.length < eccfrag_len) + gaps = 1; + if (gaps) { chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -1375,20 +1419,23 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, * Send the command to read the particular ECC bytes take care * about buswidth alignment in read_buf. */ - aligned_pos = eccpos[index] & ~(busw - 1); + aligned_pos = oobregion.offset & ~(busw - 1); aligned_len = eccfrag_len; - if (eccpos[index] & (busw - 1)) + if (oobregion.offset & (busw - 1)) aligned_len++; - if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1)) + if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & + (busw - 1)) aligned_len++; chip->cmdfunc(mtd, NAND_CMD_RNDOUT, - mtd->writesize + aligned_pos, -1); + mtd->writesize + aligned_pos, -1); chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len); } - for (i = 0; i < eccfrag_len; i++) - chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]]; + ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode, + chip->oob_poi, index, eccfrag_len); + if (ret) + return ret; p = bufpoi + data_col_addr; for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { @@ -1429,13 +1476,12 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { - int i, eccsize = chip->ecc.size; + int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_calc = chip->buffers->ecccalc; uint8_t *ecc_code = chip->buffers->ecccode; - uint32_t *eccpos = chip->ecc.layout->eccpos; unsigned int max_bitflips = 0; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { @@ -1445,8 +1491,10 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, } chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); - for (i = 0; i < chip->ecc.total; i++) - ecc_code[i] = chip->oob_poi[eccpos[i]]; + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; eccsteps = chip->ecc.steps; p = buf; @@ -1491,12 +1539,11 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { - int i, eccsize = chip->ecc.size; + int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_code = chip->buffers->ecccode; - uint32_t *eccpos = chip->ecc.layout->eccpos; uint8_t *ecc_calc = chip->buffers->ecccalc; unsigned int max_bitflips = 0; @@ -1505,8 +1552,10 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); - for (i = 0; i < chip->ecc.total; i++) - ecc_code[i] = chip->oob_poi[eccpos[i]]; + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { int stat; @@ -1607,14 +1656,17 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, /** * nand_transfer_oob - [INTERN] Transfer oob to client buffer - * @chip: nand chip structure + * @mtd: mtd info structure * @oob: oob destination address * @ops: oob ops structure * @len: size of oob to transfer */ -static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, +static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob, struct mtd_oob_ops *ops, size_t len) { + struct nand_chip *chip = mtd_to_nand(mtd); + int ret; + switch (ops->mode) { case MTD_OPS_PLACE_OOB: @@ -1622,31 +1674,12 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, memcpy(oob, chip->oob_poi + ops->ooboffs, len); return oob + len; - case MTD_OPS_AUTO_OOB: { - struct nand_oobfree *free = chip->ecc.layout->oobfree; - uint32_t boffs = 0, roffs = ops->ooboffs; - size_t bytes = 0; - - for (; free->length && len; free++, len -= bytes) { - /* Read request not from offset 0? */ - if (unlikely(roffs)) { - if (roffs >= free->length) { - roffs -= free->length; - continue; - } - boffs = free->offset + roffs; - bytes = min_t(size_t, len, - (free->length - roffs)); - roffs = 0; - } else { - bytes = min_t(size_t, len, free->length); - boffs = free->offset; - } - memcpy(oob, chip->oob_poi + boffs, bytes); - oob += bytes; - } - return oob; - } + case MTD_OPS_AUTO_OOB: + ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, + ops->ooboffs, len); + BUG_ON(ret); + return oob + len; + default: BUG(); } @@ -1780,7 +1813,7 @@ read_retry: int toread = min(oobreadlen, max_oobsize); if (toread) { - oob = nand_transfer_oob(chip, + oob = nand_transfer_oob(mtd, oob, ops, toread); oobreadlen -= toread; } @@ -1893,13 +1926,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, * @chip: nand chip info structure * @page: page number to read */ -static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, - int page) +int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page) { chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } +EXPORT_SYMBOL(nand_read_oob_std); /** * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC @@ -1908,8 +1941,8 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, * @chip: nand chip info structure * @page: page number to read */ -static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - int page) +int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page) { int length = mtd->oobsize; int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; @@ -1937,6 +1970,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, return 0; } +EXPORT_SYMBOL(nand_read_oob_syndrome); /** * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function @@ -1944,8 +1978,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, * @chip: nand chip info structure * @page: page number to write */ -static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, - int page) +int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page) { int status = 0; const uint8_t *buf = chip->oob_poi; @@ -1960,6 +1993,7 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, return status & NAND_STATUS_FAIL ? -EIO : 0; } +EXPORT_SYMBOL(nand_write_oob_std); /** * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC @@ -1968,8 +2002,8 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, * @chip: nand chip info structure * @page: page number to write */ -static int nand_write_oob_syndrome(struct mtd_info *mtd, - struct nand_chip *chip, int page) +int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, + int page) { int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; int eccsize = chip->ecc.size, length = mtd->oobsize; @@ -2019,6 +2053,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd, return status & NAND_STATUS_FAIL ? -EIO : 0; } +EXPORT_SYMBOL(nand_write_oob_syndrome); /** * nand_do_read_oob - [INTERN] NAND read out-of-band @@ -2078,7 +2113,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, break; len = min(len, readlen); - buf = nand_transfer_oob(chip, buf, ops, len); + buf = nand_transfer_oob(mtd, buf, ops, len); if (chip->options & NAND_NEED_READRDY) { /* Apply delay or wait for ready/busy pin */ @@ -2237,19 +2272,20 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { - int i, eccsize = chip->ecc.size; + int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *ecc_calc = chip->buffers->ecccalc; const uint8_t *p = buf; - uint32_t *eccpos = chip->ecc.layout->eccpos; /* Software ECC calculation */ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) chip->ecc.calculate(mtd, p, &ecc_calc[i]); - for (i = 0; i < chip->ecc.total; i++) - chip->oob_poi[eccpos[i]] = ecc_calc[i]; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; return chip->ecc.write_page_raw(mtd, chip, buf, 1, page); } @@ -2266,12 +2302,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { - int i, eccsize = chip->ecc.size; + int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *ecc_calc = chip->buffers->ecccalc; const uint8_t *p = buf; - uint32_t *eccpos = chip->ecc.layout->eccpos; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { chip->ecc.hwctl(mtd, NAND_ECC_WRITE); @@ -2279,8 +2314,10 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, chip->ecc.calculate(mtd, p, &ecc_calc[i]); } - for (i = 0; i < chip->ecc.total; i++) - chip->oob_poi[eccpos[i]] = ecc_calc[i]; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -2308,11 +2345,10 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd, int ecc_size = chip->ecc.size; int ecc_bytes = chip->ecc.bytes; int ecc_steps = chip->ecc.steps; - uint32_t *eccpos = chip->ecc.layout->eccpos; uint32_t start_step = offset / ecc_size; uint32_t end_step = (offset + data_len - 1) / ecc_size; int oob_bytes = mtd->oobsize / ecc_steps; - int step, i; + int step, ret; for (step = 0; step < ecc_steps; step++) { /* configure controller for WRITE access */ @@ -2340,8 +2376,10 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd, /* copy calculated ECC for whole page to chip->buffer->oob */ /* this include masked-value(0xFF) for unwritten subpages */ ecc_calc = chip->buffers->ecccalc; - for (i = 0; i < chip->ecc.total; i++) - chip->oob_poi[eccpos[i]] = ecc_calc[i]; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; /* write OOB buffer to NAND device */ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -2478,6 +2516,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); + int ret; /* * Initialise to all 0xFF, to avoid the possibility of left over OOB @@ -2492,31 +2531,12 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, memcpy(chip->oob_poi + ops->ooboffs, oob, len); return oob + len; - case MTD_OPS_AUTO_OOB: { - struct nand_oobfree *free = chip->ecc.layout->oobfree; - uint32_t boffs = 0, woffs = ops->ooboffs; - size_t bytes = 0; - - for (; free->length && len; free++, len -= bytes) { - /* Write request not from offset 0? */ - if (unlikely(woffs)) { - if (woffs >= free->length) { - woffs -= free->length; - continue; - } - boffs = free->offset + woffs; - bytes = min_t(size_t, len, - (free->length - woffs)); - woffs = 0; - } else { - bytes = min_t(size_t, len, free->length); - boffs = free->offset; - } - memcpy(chip->oob_poi + boffs, oob, bytes); - oob += bytes; - } - return oob; - } + case MTD_OPS_AUTO_OOB: + ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, + ops->ooboffs, len); + BUG_ON(ret); + return oob + len; + default: BUG(); } @@ -3951,10 +3971,115 @@ ident_done: return type; } +static const char * const nand_ecc_modes[] = { + [NAND_ECC_NONE] = "none", + [NAND_ECC_SOFT] = "soft", + [NAND_ECC_HW] = "hw", + [NAND_ECC_HW_SYNDROME] = "hw_syndrome", + [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", +}; + +static int of_get_nand_ecc_mode(struct device_node *np) +{ + const char *pm; + int err, i; + + err = of_property_read_string(np, "nand-ecc-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) + if (!strcasecmp(pm, nand_ecc_modes[i])) + return i; + + /* + * For backward compatibility we support few obsoleted values that don't + * have their mappings into nand_ecc_modes_t anymore (they were merged + * with other enums). + */ + if (!strcasecmp(pm, "soft_bch")) + return NAND_ECC_SOFT; + + return -ENODEV; +} + +static const char * const nand_ecc_algos[] = { + [NAND_ECC_HAMMING] = "hamming", + [NAND_ECC_BCH] = "bch", +}; + +static int of_get_nand_ecc_algo(struct device_node *np) +{ + const char *pm; + int err, i; + + err = of_property_read_string(np, "nand-ecc-algo", &pm); + if (!err) { + for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++) + if (!strcasecmp(pm, nand_ecc_algos[i])) + return i; + return -ENODEV; + } + + /* + * For backward compatibility we also read "nand-ecc-mode" checking + * for some obsoleted values that were specifying ECC algorithm. + */ + err = of_property_read_string(np, "nand-ecc-mode", &pm); + if (err < 0) + return err; + + if (!strcasecmp(pm, "soft")) + return NAND_ECC_HAMMING; + else if (!strcasecmp(pm, "soft_bch")) + return NAND_ECC_BCH; + + return -ENODEV; +} + +static int of_get_nand_ecc_step_size(struct device_node *np) +{ + int ret; + u32 val; + + ret = of_property_read_u32(np, "nand-ecc-step-size", &val); + return ret ? ret : val; +} + +static int of_get_nand_ecc_strength(struct device_node *np) +{ + int ret; + u32 val; + + ret = of_property_read_u32(np, "nand-ecc-strength", &val); + return ret ? ret : val; +} + +static int of_get_nand_bus_width(struct device_node *np) +{ + u32 val; + + if (of_property_read_u32(np, "nand-bus-width", &val)) + return 8; + + switch (val) { + case 8: + case 16: + return val; + default: + return -EIO; + } +} + +static bool of_get_nand_on_flash_bbt(struct device_node *np) +{ + return of_property_read_bool(np, "nand-on-flash-bbt"); +} + static int nand_dt_init(struct nand_chip *chip) { struct device_node *dn = nand_get_flash_node(chip); - int ecc_mode, ecc_strength, ecc_step; + int ecc_mode, ecc_algo, ecc_strength, ecc_step; if (!dn) return 0; @@ -3966,6 +4091,7 @@ static int nand_dt_init(struct nand_chip *chip) chip->bbt_options |= NAND_BBT_USE_FLASH; ecc_mode = of_get_nand_ecc_mode(dn); + ecc_algo = of_get_nand_ecc_algo(dn); ecc_strength = of_get_nand_ecc_strength(dn); ecc_step = of_get_nand_ecc_step_size(dn); @@ -3978,6 +4104,9 @@ static int nand_dt_init(struct nand_chip *chip) if (ecc_mode >= 0) chip->ecc.mode = ecc_mode; + if (ecc_algo >= 0) + chip->ecc.algo = ecc_algo; + if (ecc_strength >= 0) chip->ecc.strength = ecc_strength; @@ -4054,6 +4183,82 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, } EXPORT_SYMBOL(nand_scan_ident); +static int nand_set_ecc_soft_ops(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (WARN_ON(ecc->mode != NAND_ECC_SOFT)) + return -EINVAL; + + switch (ecc->algo) { + case NAND_ECC_HAMMING: + ecc->calculate = nand_calculate_ecc; + ecc->correct = nand_correct_data; + ecc->read_page = nand_read_page_swecc; + ecc->read_subpage = nand_read_subpage; + ecc->write_page = nand_write_page_swecc; + ecc->read_page_raw = nand_read_page_raw; + ecc->write_page_raw = nand_write_page_raw; + ecc->read_oob = nand_read_oob_std; + ecc->write_oob = nand_write_oob_std; + if (!ecc->size) + ecc->size = 256; + ecc->bytes = 3; + ecc->strength = 1; + return 0; + case NAND_ECC_BCH: + if (!mtd_nand_has_bch()) { + WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n"); + return -EINVAL; + } + ecc->calculate = nand_bch_calculate_ecc; + ecc->correct = nand_bch_correct_data; + ecc->read_page = nand_read_page_swecc; + ecc->read_subpage = nand_read_subpage; + ecc->write_page = nand_write_page_swecc; + ecc->read_page_raw = nand_read_page_raw; + ecc->write_page_raw = nand_write_page_raw; + ecc->read_oob = nand_read_oob_std; + ecc->write_oob = nand_write_oob_std; + /* + * Board driver should supply ecc.size and ecc.strength + * values to select how many bits are correctable. + * Otherwise, default to 4 bits for large page devices. + */ + if (!ecc->size && (mtd->oobsize >= 64)) { + ecc->size = 512; + ecc->strength = 4; + } + + /* + * if no ecc placement scheme was provided pickup the default + * large page one. + */ + if (!mtd->ooblayout) { + /* handle large page devices only */ + if (mtd->oobsize < 64) { + WARN(1, "OOB layout is required when using software BCH on small pages\n"); + return -EINVAL; + } + + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + } + + /* See nand_bch_init() for details. */ + ecc->bytes = 0; + ecc->priv = nand_bch_init(mtd); + if (!ecc->priv) { + WARN(1, "BCH ECC initialization failed!\n"); + return -EINVAL; + } + return 0; + default: + WARN(1, "Unsupported ECC algorithm!\n"); + return -EINVAL; + } +} + /* * Check if the chip configuration meet the datasheet requirements. @@ -4098,14 +4303,15 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd) */ int nand_scan_tail(struct mtd_info *mtd) { - int i; struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; struct nand_buffers *nbuf; + int ret; /* New bad blocks should be marked in OOB, flash-based BBT, or both */ - BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && - !(chip->bbt_options & NAND_BBT_USE_FLASH)); + if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && + !(chip->bbt_options & NAND_BBT_USE_FLASH))) + return -EINVAL; if (!(chip->options & NAND_OWN_BUFFERS)) { nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize @@ -4128,24 +4334,22 @@ int nand_scan_tail(struct mtd_info *mtd) /* * If no default placement scheme is given, select an appropriate one. */ - if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) { + if (!mtd->ooblayout && + !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) { switch (mtd->oobsize) { case 8: - ecc->layout = &nand_oob_8; - break; case 16: - ecc->layout = &nand_oob_16; + mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops); break; case 64: - ecc->layout = &nand_oob_64; - break; case 128: - ecc->layout = &nand_oob_128; + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); break; default: - pr_warn("No oob scheme defined for oobsize %d\n", - mtd->oobsize); - BUG(); + WARN(1, "No oob scheme defined for oobsize %d\n", + mtd->oobsize); + ret = -EINVAL; + goto err_free; } } @@ -4161,8 +4365,9 @@ int nand_scan_tail(struct mtd_info *mtd) case NAND_ECC_HW_OOB_FIRST: /* Similar to NAND_ECC_HW, but a separate read_page handle */ if (!ecc->calculate || !ecc->correct || !ecc->hwctl) { - pr_warn("No ECC functions supplied; hardware ECC not possible\n"); - BUG(); + WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); + ret = -EINVAL; + goto err_free; } if (!ecc->read_page) ecc->read_page = nand_read_page_hwecc_oob_first; @@ -4192,8 +4397,9 @@ int nand_scan_tail(struct mtd_info *mtd) ecc->read_page == nand_read_page_hwecc || !ecc->write_page || ecc->write_page == nand_write_page_hwecc)) { - pr_warn("No ECC functions supplied; hardware ECC not possible\n"); - BUG(); + WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); + ret = -EINVAL; + goto err_free; } /* Use standard syndrome read/write page function? */ if (!ecc->read_page) @@ -4211,61 +4417,22 @@ int nand_scan_tail(struct mtd_info *mtd) if (mtd->writesize >= ecc->size) { if (!ecc->strength) { - pr_warn("Driver must set ecc.strength when using hardware ECC\n"); - BUG(); + WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); + ret = -EINVAL; + goto err_free; } break; } pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", ecc->size, mtd->writesize); ecc->mode = NAND_ECC_SOFT; + ecc->algo = NAND_ECC_HAMMING; case NAND_ECC_SOFT: - ecc->calculate = nand_calculate_ecc; - ecc->correct = nand_correct_data; - ecc->read_page = nand_read_page_swecc; - ecc->read_subpage = nand_read_subpage; - ecc->write_page = nand_write_page_swecc; - ecc->read_page_raw = nand_read_page_raw; - ecc->write_page_raw = nand_write_page_raw; - ecc->read_oob = nand_read_oob_std; - ecc->write_oob = nand_write_oob_std; - if (!ecc->size) - ecc->size = 256; - ecc->bytes = 3; - ecc->strength = 1; - break; - - case NAND_ECC_SOFT_BCH: - if (!mtd_nand_has_bch()) { - pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n"); - BUG(); - } - ecc->calculate = nand_bch_calculate_ecc; - ecc->correct = nand_bch_correct_data; - ecc->read_page = nand_read_page_swecc; - ecc->read_subpage = nand_read_subpage; - ecc->write_page = nand_write_page_swecc; - ecc->read_page_raw = nand_read_page_raw; - ecc->write_page_raw = nand_write_page_raw; - ecc->read_oob = nand_read_oob_std; - ecc->write_oob = nand_write_oob_std; - /* - * Board driver should supply ecc.size and ecc.strength values - * to select how many bits are correctable. Otherwise, default - * to 4 bits for large page devices. - */ - if (!ecc->size && (mtd->oobsize >= 64)) { - ecc->size = 512; - ecc->strength = 4; - } - - /* See nand_bch_init() for details. */ - ecc->bytes = 0; - ecc->priv = nand_bch_init(mtd); - if (!ecc->priv) { - pr_warn("BCH ECC initialization failed!\n"); - BUG(); + ret = nand_set_ecc_soft_ops(mtd); + if (ret) { + ret = -EINVAL; + goto err_free; } break; @@ -4283,8 +4450,9 @@ int nand_scan_tail(struct mtd_info *mtd) break; default: - pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode); - BUG(); + WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode); + ret = -EINVAL; + goto err_free; } /* For many systems, the standard OOB write also works for raw */ @@ -4293,20 +4461,9 @@ int nand_scan_tail(struct mtd_info *mtd) if (!ecc->write_oob_raw) ecc->write_oob_raw = ecc->write_oob; - /* - * The number of bytes available for a client to place data into - * the out of band area. - */ - mtd->oobavail = 0; - if (ecc->layout) { - for (i = 0; ecc->layout->oobfree[i].length; i++) - mtd->oobavail += ecc->layout->oobfree[i].length; - } - - /* ECC sanity check: warn if it's too weak */ - if (!nand_ecc_strength_good(mtd)) - pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", - mtd->name); + /* propagate ecc info to mtd_info */ + mtd->ecc_strength = ecc->strength; + mtd->ecc_step_size = ecc->size; /* * Set the number of read / write steps for one page depending on ECC @@ -4314,11 +4471,27 @@ int nand_scan_tail(struct mtd_info *mtd) */ ecc->steps = mtd->writesize / ecc->size; if (ecc->steps * ecc->size != mtd->writesize) { - pr_warn("Invalid ECC parameters\n"); - BUG(); + WARN(1, "Invalid ECC parameters\n"); + ret = -EINVAL; + goto err_free; } ecc->total = ecc->steps * ecc->bytes; + /* + * The number of bytes available for a client to place data into + * the out of band area. + */ + ret = mtd_ooblayout_count_freebytes(mtd); + if (ret < 0) + ret = 0; + + mtd->oobavail = ret; + + /* ECC sanity check: warn if it's too weak */ + if (!nand_ecc_strength_good(mtd)) + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", + mtd->name); + /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { switch (ecc->steps) { @@ -4343,7 +4516,6 @@ int nand_scan_tail(struct mtd_info *mtd) /* Large page NAND with SOFT_ECC should support subpage reads */ switch (ecc->mode) { case NAND_ECC_SOFT: - case NAND_ECC_SOFT_BCH: if (chip->page_shift > 9) chip->options |= NAND_SUBPAGE_READ; break; @@ -4375,10 +4547,6 @@ int nand_scan_tail(struct mtd_info *mtd) mtd->_block_markbad = nand_block_markbad; mtd->writebufsize = mtd->writesize; - /* propagate ecc info to mtd_info */ - mtd->ecclayout = ecc->layout; - mtd->ecc_strength = ecc->strength; - mtd->ecc_step_size = ecc->size; /* * Initialize bitflip_threshold to its default prior scan_bbt() call. * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be @@ -4393,6 +4561,10 @@ int nand_scan_tail(struct mtd_info *mtd) /* Build bad block table */ return chip->scan_bbt(mtd); +err_free: + if (!(chip->options & NAND_OWN_BUFFERS)) + kfree(chip->buffers); + return ret; } EXPORT_SYMBOL(nand_scan_tail); @@ -4436,7 +4608,8 @@ void nand_release(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); - if (chip->ecc.mode == NAND_ECC_SOFT_BCH) + if (chip->ecc.mode == NAND_ECC_SOFT && + chip->ecc.algo == NAND_ECC_BCH) nand_bch_free((struct nand_bch_control *)chip->ecc.priv); mtd_device_unregister(mtd); diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c index b585bae37929..44763f87eae4 100644 --- a/drivers/mtd/nand/nand_bch.c +++ b/drivers/mtd/nand/nand_bch.c @@ -32,13 +32,11 @@ /** * struct nand_bch_control - private NAND BCH control structure * @bch: BCH control structure - * @ecclayout: private ecc layout for this BCH configuration * @errloc: error location array * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid */ struct nand_bch_control { struct bch_control *bch; - struct nand_ecclayout ecclayout; unsigned int *errloc; unsigned char *eccmask; }; @@ -124,7 +122,6 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); unsigned int m, t, eccsteps, i; - struct nand_ecclayout *layout = nand->ecc.layout; struct nand_bch_control *nbc = NULL; unsigned char *erased_page; unsigned int eccsize = nand->ecc.size; @@ -161,34 +158,10 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) eccsteps = mtd->writesize/eccsize; - /* if no ecc placement scheme was provided, build one */ - if (!layout) { - - /* handle large page devices only */ - if (mtd->oobsize < 64) { - printk(KERN_WARNING "must provide an oob scheme for " - "oobsize %d\n", mtd->oobsize); - goto fail; - } - - layout = &nbc->ecclayout; - layout->eccbytes = eccsteps*eccbytes; - - /* reserve 2 bytes for bad block marker */ - if (layout->eccbytes+2 > mtd->oobsize) { - printk(KERN_WARNING "no suitable oob scheme available " - "for oobsize %d eccbytes %u\n", mtd->oobsize, - eccbytes); - goto fail; - } - /* put ecc bytes at oob tail */ - for (i = 0; i < layout->eccbytes; i++) - layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; - - layout->oobfree[0].offset = 2; - layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; - - nand->ecc.layout = layout; + /* Check that we have an oob layout description. */ + if (!mtd->ooblayout) { + pr_warn("missing oob scheme"); + goto fail; } /* sanity checks */ @@ -196,7 +169,18 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) printk(KERN_WARNING "eccsize %u is too large\n", eccsize); goto fail; } - if (layout->eccbytes != (eccsteps*eccbytes)) { + + /* + * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(), + * which is called by mtd_ooblayout_count_eccbytes(). + * Make sure they are properly initialized before calling + * mtd_ooblayout_count_eccbytes(). + * FIXME: we should probably rework the sequencing in nand_scan_tail() + * to avoid setting those fields twice. + */ + nand->ecc.steps = eccsteps; + nand->ecc.total = eccsteps * eccbytes; + if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) { printk(KERN_WARNING "invalid ecc layout\n"); goto fail; } diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index a58169a28741..1eb934414eb5 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -569,7 +569,7 @@ static void nandsim_debugfs_remove(struct nandsim *ns) * * RETURNS: 0 if success, -ENOMEM if memory alloc fails. */ -static int alloc_device(struct nandsim *ns) +static int __init alloc_device(struct nandsim *ns) { struct file *cfile; int i, err; @@ -654,7 +654,7 @@ static void free_device(struct nandsim *ns) } } -static char *get_partition_name(int i) +static char __init *get_partition_name(int i) { return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i); } @@ -664,7 +664,7 @@ static char *get_partition_name(int i) * * RETURNS: 0 if success, -ERRNO if failure. */ -static int init_nandsim(struct mtd_info *mtd) +static int __init init_nandsim(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); struct nandsim *ns = nand_get_controller_data(chip); @@ -2261,6 +2261,7 @@ static int __init ns_init_module(void) chip->read_buf = ns_nand_read_buf; chip->read_word = ns_nand_read_word; chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ /* and 'badblocks' parameters to work */ chip->options |= NAND_SKIP_BBTSCAN; @@ -2338,7 +2339,8 @@ static int __init ns_init_module(void) retval = -EINVAL; goto error; } - chip->ecc.mode = NAND_ECC_SOFT_BCH; + chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_BCH; chip->ecc.size = 512; chip->ecc.strength = bch; chip->ecc.bytes = eccbytes; diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index dbc5b571c2bb..8f64011d32ef 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -261,6 +261,7 @@ static int nuc900_nand_probe(struct platform_device *pdev) chip->chip_delay = 50; chip->options = 0; chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 0749ca1a1456..08e158895635 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -12,6 +12,7 @@ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/jiffies.h> @@ -28,6 +29,7 @@ #include <linux/mtd/nand_bch.h> #include <linux/platform_data/elm.h> +#include <linux/omap-gpmc.h> #include <linux/platform_data/mtd-nand-omap2.h> #define DRIVER_NAME "omap2-nand" @@ -151,13 +153,17 @@ static struct nand_hw_control omap_gpmc_controller = { }; struct omap_nand_info { - struct omap_nand_platform_data *pdata; struct nand_chip nand; struct platform_device *pdev; int gpmc_cs; - unsigned long phys_base; + bool dev_ready; + enum nand_io xfer_type; + int devsize; enum omap_ecc ecc_opt; + struct device_node *elm_of_node; + + unsigned long phys_base; struct completion comp; struct dma_chan *dma; int gpmc_irq_fifo; @@ -168,12 +174,14 @@ struct omap_nand_info { } iomode; u_char *buf; int buf_len; + /* Interface to GPMC */ struct gpmc_nand_regs reg; - /* generated at runtime depending on ECC algorithm and layout selected */ - struct nand_ecclayout oobinfo; + struct gpmc_nand_ops *ops; + bool flash_bbt; /* fields specific for BCHx_HW ECC scheme */ struct device *elm_dev; - struct device_node *of_node; + /* NAND ready gpio */ + struct gpio_desc *ready_gpiod; }; static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd) @@ -208,7 +216,7 @@ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, */ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) | PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH | - (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write)); + (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1)); writel(val, info->reg.gpmc_prefetch_config1); /* Start the prefetch engine */ @@ -288,14 +296,13 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) { struct omap_nand_info *info = mtd_to_omap(mtd); u_char *p = (u_char *)buf; - u32 status = 0; + bool status; while (len--) { iowrite8(*p++, info->nand.IO_ADDR_W); /* wait until buffer is available for write */ do { - status = readl(info->reg.gpmc_status) & - STATUS_BUFF_EMPTY; + status = info->ops->nand_writebuffer_empty(); } while (!status); } } @@ -323,7 +330,7 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) { struct omap_nand_info *info = mtd_to_omap(mtd); u16 *p = (u16 *) buf; - u32 status = 0; + bool status; /* FIXME try bursts of writesw() or DMA ... */ len >>= 1; @@ -331,8 +338,7 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) iowrite16(*p++, info->nand.IO_ADDR_W); /* wait until buffer is available for write */ do { - status = readl(info->reg.gpmc_status) & - STATUS_BUFF_EMPTY; + status = info->ops->nand_writebuffer_empty(); } while (!status); } } @@ -467,17 +473,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, int ret; u32 val; - if (addr >= high_memory) { - struct page *p1; - - if (((size_t)addr & PAGE_MASK) != - ((size_t)(addr + len - 1) & PAGE_MASK)) - goto out_copy; - p1 = vmalloc_to_page(addr); - if (!p1) - goto out_copy; - addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); - } + if (!virt_addr_valid(addr)) + goto out_copy; sg_init_one(&sg, addr, len); n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); @@ -497,6 +494,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, tx->callback_param = &info->comp; dmaengine_submit(tx); + init_completion(&info->comp); + + /* setup and start DMA using dma_addr */ + dma_async_issue_pending(info->dma); + /* configure and start prefetch transfer */ ret = omap_prefetch_enable(info->gpmc_cs, PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info); @@ -504,10 +506,6 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* PFPW engine is busy, use cpu copy method */ goto out_copy_unmap; - init_completion(&info->comp); - dma_async_issue_pending(info->dma); - - /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); tim = 0; limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); @@ -1017,21 +1015,16 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) } /** - * omap_dev_ready - calls the platform specific dev_ready function + * omap_dev_ready - checks the NAND Ready GPIO line * @mtd: MTD device structure + * + * Returns true if ready and false if busy. */ static int omap_dev_ready(struct mtd_info *mtd) { - unsigned int val = 0; struct omap_nand_info *info = mtd_to_omap(mtd); - val = readl(info->reg.gpmc_status); - - if ((val & 0x100) == 0x100) { - return 1; - } else { - return 0; - } + return gpiod_get_value(info->ready_gpiod); } /** @@ -1495,9 +1488,8 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data, static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { - int i; + int ret; uint8_t *ecc_calc = chip->buffers->ecccalc; - uint32_t *eccpos = chip->ecc.layout->eccpos; /* Enable GPMC ecc engine */ chip->ecc.hwctl(mtd, NAND_ECC_WRITE); @@ -1508,8 +1500,10 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, /* Update ecc vector from GPMC result registers */ chip->ecc.calculate(mtd, buf, &ecc_calc[0]); - for (i = 0; i < chip->ecc.total; i++) - chip->oob_poi[eccpos[i]] = ecc_calc[i]; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; /* Write ecc vector to OOB area */ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -1536,10 +1530,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, { uint8_t *ecc_calc = chip->buffers->ecccalc; uint8_t *ecc_code = chip->buffers->ecccode; - uint32_t *eccpos = chip->ecc.layout->eccpos; - uint8_t *oob = &chip->oob_poi[eccpos[0]]; - uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0]; - int stat; + int stat, ret; unsigned int max_bitflips = 0; /* Enable GPMC ecc engine */ @@ -1549,13 +1540,18 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->read_buf(mtd, buf, mtd->writesize); /* Read oob bytes */ - chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1); - chip->read_buf(mtd, oob, chip->ecc.total); + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, + mtd->writesize + BADBLOCK_MARKER_LENGTH, -1); + chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH, + chip->ecc.total); /* Calculate ecc bytes */ chip->ecc.calculate(mtd, buf, ecc_calc); - memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total); + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc); @@ -1630,7 +1626,7 @@ static bool omap2_nand_ecc_check(struct omap_nand_info *info, "CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); return false; } - if (ecc_needs_elm && !is_elm_present(info, pdata->elm_of_node)) { + if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) { dev_err(&info->pdev->dev, "ELM not available\n"); return false; } @@ -1638,43 +1634,227 @@ static bool omap2_nand_ecc_check(struct omap_nand_info *info, return true; } +static const char * const nand_xfer_types[] = { + [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", + [NAND_OMAP_POLLED] = "polled", + [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma", + [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq", +}; + +static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info) +{ + struct device_node *child = dev->of_node; + int i; + const char *s; + u32 cs; + + if (of_property_read_u32(child, "reg", &cs) < 0) { + dev_err(dev, "reg not found in DT\n"); + return -EINVAL; + } + + info->gpmc_cs = cs; + + /* detect availability of ELM module. Won't be present pre-OMAP4 */ + info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0); + if (!info->elm_of_node) + dev_dbg(dev, "ti,elm-id not in DT\n"); + + /* select ecc-scheme for NAND */ + if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) { + dev_err(dev, "ti,nand-ecc-opt not found\n"); + return -EINVAL; + } + + if (!strcmp(s, "sw")) { + info->ecc_opt = OMAP_ECC_HAM1_CODE_SW; + } else if (!strcmp(s, "ham1") || + !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) { + info->ecc_opt = OMAP_ECC_HAM1_CODE_HW; + } else if (!strcmp(s, "bch4")) { + if (info->elm_of_node) + info->ecc_opt = OMAP_ECC_BCH4_CODE_HW; + else + info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW; + } else if (!strcmp(s, "bch8")) { + if (info->elm_of_node) + info->ecc_opt = OMAP_ECC_BCH8_CODE_HW; + else + info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; + } else if (!strcmp(s, "bch16")) { + info->ecc_opt = OMAP_ECC_BCH16_CODE_HW; + } else { + dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n"); + return -EINVAL; + } + + /* select data transfer mode */ + if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) { + for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) { + if (!strcasecmp(s, nand_xfer_types[i])) { + info->xfer_type = i; + return 0; + } + } + + dev_err(dev, "unrecognized value for ti,nand-xfer-type\n"); + return -EINVAL; + } + + return 0; +} + +static int omap_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct omap_nand_info *info = mtd_to_omap(mtd); + struct nand_chip *chip = &info->nand; + int off = BADBLOCK_MARKER_LENGTH; + + if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW && + !(chip->options & NAND_BUSWIDTH_16)) + off = 1; + + if (section) + return -ERANGE; + + oobregion->offset = off; + oobregion->length = chip->ecc.total; + + return 0; +} + +static int omap_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct omap_nand_info *info = mtd_to_omap(mtd); + struct nand_chip *chip = &info->nand; + int off = BADBLOCK_MARKER_LENGTH; + + if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW && + !(chip->options & NAND_BUSWIDTH_16)) + off = 1; + + if (section) + return -ERANGE; + + off += chip->ecc.total; + if (off >= mtd->oobsize) + return -ERANGE; + + oobregion->offset = off; + oobregion->length = mtd->oobsize - off; + + return 0; +} + +static const struct mtd_ooblayout_ops omap_ooblayout_ops = { + .ecc = omap_ooblayout_ecc, + .free = omap_ooblayout_free, +}; + +static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + int off = BADBLOCK_MARKER_LENGTH; + + if (section >= chip->ecc.steps) + return -ERANGE; + + /* + * When SW correction is employed, one OMAP specific marker byte is + * reserved after each ECC step. + */ + oobregion->offset = off + (section * (chip->ecc.bytes + 1)); + oobregion->length = chip->ecc.bytes; + + return 0; +} + +static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + int off = BADBLOCK_MARKER_LENGTH; + + if (section) + return -ERANGE; + + /* + * When SW correction is employed, one OMAP specific marker byte is + * reserved after each ECC step. + */ + off += ((chip->ecc.bytes + 1) * chip->ecc.steps); + if (off >= mtd->oobsize) + return -ERANGE; + + oobregion->offset = off; + oobregion->length = mtd->oobsize - off; + + return 0; +} + +static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = { + .ecc = omap_sw_ooblayout_ecc, + .free = omap_sw_ooblayout_free, +}; + static int omap_nand_probe(struct platform_device *pdev) { struct omap_nand_info *info; - struct omap_nand_platform_data *pdata; + struct omap_nand_platform_data *pdata = NULL; struct mtd_info *mtd; struct nand_chip *nand_chip; - struct nand_ecclayout *ecclayout; int err; - int i; dma_cap_mask_t mask; unsigned sig; - unsigned oob_index; struct resource *res; - - pdata = dev_get_platdata(&pdev->dev); - if (pdata == NULL) { - dev_err(&pdev->dev, "platform data missing\n"); - return -ENODEV; - } + struct device *dev = &pdev->dev; + int min_oobbytes = BADBLOCK_MARKER_LENGTH; + int oobbytes_per_step; info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info), GFP_KERNEL); if (!info) return -ENOMEM; + info->pdev = pdev; + + if (dev->of_node) { + if (omap_get_dt_info(dev, info)) + return -EINVAL; + } else { + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "platform data missing\n"); + return -EINVAL; + } + + info->gpmc_cs = pdata->cs; + info->reg = pdata->reg; + info->ecc_opt = pdata->ecc_opt; + if (pdata->dev_ready) + dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n"); + + info->xfer_type = pdata->xfer_type; + info->devsize = pdata->devsize; + info->elm_of_node = pdata->elm_of_node; + info->flash_bbt = pdata->flash_bbt; + } + platform_set_drvdata(pdev, info); + info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); + if (!info->ops) { + dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); + return -ENODEV; + } - info->pdev = pdev; - info->gpmc_cs = pdata->cs; - info->reg = pdata->reg; - info->of_node = pdata->of_node; - info->ecc_opt = pdata->ecc_opt; nand_chip = &info->nand; mtd = nand_to_mtd(nand_chip); mtd->dev.parent = &pdev->dev; nand_chip->ecc.priv = NULL; - nand_set_flash_node(nand_chip, pdata->of_node); + nand_set_flash_node(nand_chip, dev->of_node); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); @@ -1688,6 +1868,13 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R; nand_chip->cmd_ctrl = omap_hwcontrol; + info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb", + GPIOD_IN); + if (IS_ERR(info->ready_gpiod)) { + dev_err(dev, "failed to get ready gpio\n"); + return PTR_ERR(info->ready_gpiod); + } + /* * If RDY/BSY line is connected to OMAP then use the omap ready * function and the generic nand_wait function which reads the status @@ -1695,7 +1882,7 @@ static int omap_nand_probe(struct platform_device *pdev) * chip delay which is slightly more than tR (AC Timing) of the NAND * device and read status register until you get a failure or success */ - if (pdata->dev_ready) { + if (info->ready_gpiod) { nand_chip->dev_ready = omap_dev_ready; nand_chip->chip_delay = 0; } else { @@ -1703,21 +1890,25 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->chip_delay = 50; } - if (pdata->flash_bbt) - nand_chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; - else - nand_chip->options |= NAND_SKIP_BBTSCAN; + if (info->flash_bbt) + nand_chip->bbt_options |= NAND_BBT_USE_FLASH; /* scan NAND device connected to chip controller */ - nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16; + nand_chip->options |= info->devsize & NAND_BUSWIDTH_16; if (nand_scan_ident(mtd, 1, NULL)) { - dev_err(&info->pdev->dev, "scan failed, may be bus-width mismatch\n"); + dev_err(&info->pdev->dev, + "scan failed, may be bus-width mismatch\n"); err = -ENXIO; goto return_error; } + if (nand_chip->bbt_options & NAND_BBT_USE_FLASH) + nand_chip->bbt_options |= NAND_BBT_NO_OOB; + else + nand_chip->options |= NAND_SKIP_BBTSCAN; + /* re-populate low-level callbacks based on xfer modes */ - switch (pdata->xfer_type) { + switch (info->xfer_type) { case NAND_OMAP_PREFETCH_POLLED: nand_chip->read_buf = omap_read_buf_pref; nand_chip->write_buf = omap_write_buf_pref; @@ -1797,7 +1988,7 @@ static int omap_nand_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, - "xfer_type(%d) not supported!\n", pdata->xfer_type); + "xfer_type(%d) not supported!\n", info->xfer_type); err = -EINVAL; goto return_error; } @@ -1809,16 +2000,15 @@ static int omap_nand_probe(struct platform_device *pdev) /* * Bail out earlier to let NAND_ECC_SOFT code create its own - * ecclayout instead of using ours. + * ooblayout instead of using ours. */ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) { nand_chip->ecc.mode = NAND_ECC_SOFT; + nand_chip->ecc.algo = NAND_ECC_HAMMING; goto scan_tail; } /* populate MTD interface based on ECC scheme */ - ecclayout = &info->oobinfo; - nand_chip->ecc.layout = ecclayout; switch (info->ecc_opt) { case OMAP_ECC_HAM1_CODE_HW: pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); @@ -1829,19 +2019,12 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.calculate = omap_calculate_ecc; nand_chip->ecc.hwctl = omap_enable_hwecc; nand_chip->ecc.correct = omap_correct_data; - /* define ECC layout */ - ecclayout->eccbytes = nand_chip->ecc.bytes * - (mtd->writesize / - nand_chip->ecc.size); - if (nand_chip->options & NAND_BUSWIDTH_16) - oob_index = BADBLOCK_MARKER_LENGTH; - else - oob_index = 1; - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) - ecclayout->eccpos[i] = oob_index; - /* no reserved-marker in ecclayout for this ecc-scheme */ - ecclayout->oobfree->offset = - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); + oobbytes_per_step = nand_chip->ecc.bytes; + + if (!(nand_chip->options & NAND_BUSWIDTH_16)) + min_oobbytes = 1; + break; case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: @@ -1853,19 +2036,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; nand_chip->ecc.calculate = omap_calculate_ecc_bch; - /* define ECC layout */ - ecclayout->eccbytes = nand_chip->ecc.bytes * - (mtd->writesize / - nand_chip->ecc.size); - oob_index = BADBLOCK_MARKER_LENGTH; - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { - ecclayout->eccpos[i] = oob_index; - if (((i + 1) % nand_chip->ecc.bytes) == 0) - oob_index++; - } - /* include reserved-marker in ecclayout->oobfree calculation */ - ecclayout->oobfree->offset = 1 + - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); + /* Reserve one byte for the OMAP marker */ + oobbytes_per_step = nand_chip->ecc.bytes + 1; /* software bch library is used for locating errors */ nand_chip->ecc.priv = nand_bch_init(mtd); if (!nand_chip->ecc.priv) { @@ -1887,16 +2060,8 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; - /* define ECC layout */ - ecclayout->eccbytes = nand_chip->ecc.bytes * - (mtd->writesize / - nand_chip->ecc.size); - oob_index = BADBLOCK_MARKER_LENGTH; - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) - ecclayout->eccpos[i] = oob_index; - /* reserved marker already included in ecclayout->eccbytes */ - ecclayout->oobfree->offset = - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); + oobbytes_per_step = nand_chip->ecc.bytes; err = elm_config(info->elm_dev, BCH4_ECC, mtd->writesize / nand_chip->ecc.size, @@ -1914,19 +2079,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; nand_chip->ecc.calculate = omap_calculate_ecc_bch; - /* define ECC layout */ - ecclayout->eccbytes = nand_chip->ecc.bytes * - (mtd->writesize / - nand_chip->ecc.size); - oob_index = BADBLOCK_MARKER_LENGTH; - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { - ecclayout->eccpos[i] = oob_index; - if (((i + 1) % nand_chip->ecc.bytes) == 0) - oob_index++; - } - /* include reserved-marker in ecclayout->oobfree calculation */ - ecclayout->oobfree->offset = 1 + - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; + mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); + /* Reserve one byte for the OMAP marker */ + oobbytes_per_step = nand_chip->ecc.bytes + 1; /* software bch library is used for locating errors */ nand_chip->ecc.priv = nand_bch_init(mtd); if (!nand_chip->ecc.priv) { @@ -1948,6 +2103,8 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); + oobbytes_per_step = nand_chip->ecc.bytes; err = elm_config(info->elm_dev, BCH8_ECC, mtd->writesize / nand_chip->ecc.size, @@ -1955,16 +2112,6 @@ static int omap_nand_probe(struct platform_device *pdev) if (err < 0) goto return_error; - /* define ECC layout */ - ecclayout->eccbytes = nand_chip->ecc.bytes * - (mtd->writesize / - nand_chip->ecc.size); - oob_index = BADBLOCK_MARKER_LENGTH; - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) - ecclayout->eccpos[i] = oob_index; - /* reserved marker already included in ecclayout->eccbytes */ - ecclayout->oobfree->offset = - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; break; case OMAP_ECC_BCH16_CODE_HW: @@ -1978,6 +2125,8 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + mtd_set_ooblayout(mtd, &omap_ooblayout_ops); + oobbytes_per_step = nand_chip->ecc.bytes; err = elm_config(info->elm_dev, BCH16_ECC, mtd->writesize / nand_chip->ecc.size, @@ -1985,16 +2134,6 @@ static int omap_nand_probe(struct platform_device *pdev) if (err < 0) goto return_error; - /* define ECC layout */ - ecclayout->eccbytes = nand_chip->ecc.bytes * - (mtd->writesize / - nand_chip->ecc.size); - oob_index = BADBLOCK_MARKER_LENGTH; - for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) - ecclayout->eccpos[i] = oob_index; - /* reserved marker already included in ecclayout->eccbytes */ - ecclayout->oobfree->offset = - ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; break; default: dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n"); @@ -2002,13 +2141,13 @@ static int omap_nand_probe(struct platform_device *pdev) goto return_error; } - /* all OOB bytes from oobfree->offset till end off OOB are free */ - ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; /* check if NAND device's OOB is enough to store ECC signatures */ - if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) { + min_oobbytes += (oobbytes_per_step * + (mtd->writesize / nand_chip->ecc.size)); + if (mtd->oobsize < min_oobbytes) { dev_err(&info->pdev->dev, "not enough OOB bytes required = %d, available=%d\n", - ecclayout->eccbytes, mtd->oobsize); + min_oobbytes, mtd->oobsize); err = -EINVAL; goto return_error; } @@ -2020,7 +2159,10 @@ scan_tail: goto return_error; } - mtd_device_register(mtd, pdata->parts, pdata->nr_parts); + if (dev->of_node) + mtd_device_register(mtd, NULL, 0); + else + mtd_device_register(mtd, pdata->parts, pdata->nr_parts); platform_set_drvdata(pdev, mtd); @@ -2051,11 +2193,17 @@ static int omap_nand_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id omap_nand_ids[] = { + { .compatible = "ti,omap2-nand", }, + {}, +}; + static struct platform_driver omap_nand_driver = { .probe = omap_nand_probe, .remove = omap_nand_remove, .driver = { .name = DRIVER_NAME, + .of_match_table = of_match_ptr(omap_nand_ids), }, }; diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index d4614bfbfed6..40a7c4a2cf0d 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -130,6 +130,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) nc->cmd_ctrl = orion_nand_cmd_ctrl; nc->read_buf = orion_nand_read_buf; nc->ecc.mode = NAND_ECC_SOFT; + nc->ecc.algo = NAND_ECC_HAMMING; if (board->chip_delay) nc->chip_delay = board->chip_delay; diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index 3ab53ca53cca..5de7591b0510 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -92,8 +92,9 @@ int pasemi_device_ready(struct mtd_info *mtd) static int pasemi_nand_probe(struct platform_device *ofdev) { + struct device *dev = &ofdev->dev; struct pci_dev *pdev; - struct device_node *np = ofdev->dev.of_node; + struct device_node *np = dev->of_node; struct resource res; struct nand_chip *chip; int err = 0; @@ -107,13 +108,11 @@ static int pasemi_nand_probe(struct platform_device *ofdev) if (pasemi_nand_mtd) return -ENODEV; - pr_debug("pasemi_nand at %pR\n", &res); + dev_dbg(dev, "pasemi_nand at %pR\n", &res); /* Allocate memory for MTD device structure and private data */ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); if (!chip) { - printk(KERN_WARNING - "Unable to allocate PASEMI NAND MTD device structure\n"); err = -ENOMEM; goto out; } @@ -121,7 +120,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) pasemi_nand_mtd = nand_to_mtd(chip); /* Link the private data with the MTD structure */ - pasemi_nand_mtd->dev.parent = &ofdev->dev; + pasemi_nand_mtd->dev.parent = dev; chip->IO_ADDR_R = of_iomap(np, 0); chip->IO_ADDR_W = chip->IO_ADDR_R; @@ -151,6 +150,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) chip->write_buf = pasemi_write_buf; chip->chip_delay = 0; chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; /* Enable the following for a flash based bad block table */ chip->bbt_options = NAND_BBT_USE_FLASH; @@ -162,13 +162,13 @@ static int pasemi_nand_probe(struct platform_device *ofdev) } if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { - printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); + dev_err(dev, "Unable to register MTD device\n"); err = -ENODEV; goto out_lpc; } - printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n", - res.start, lpcctl); + dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, + lpcctl); return 0; diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index e4e50da30444..415a53a0deeb 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -74,6 +74,7 @@ static int plat_nand_probe(struct platform_device *pdev) data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; data->chip.ecc.mode = NAND_ECC_SOFT; + data->chip.ecc.algo = NAND_ECC_HAMMING; platform_set_drvdata(pdev, data); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index d6508856da99..436dd6dc11f4 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -29,7 +29,6 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_mtd.h> #include <linux/platform_data/mtd-nand-pxa3xx.h> #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) @@ -324,6 +323,62 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = { { 0xba20, 16, 16, &timing[3] }, }; +static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct pxa3xx_nand_host *host = nand_get_controller_data(chip); + struct pxa3xx_nand_info *info = host->info_data; + int nchunks = mtd->writesize / info->chunk_size; + + if (section >= nchunks) + return -ERANGE; + + oobregion->offset = ((info->ecc_size + info->spare_size) * section) + + info->spare_size; + oobregion->length = info->ecc_size; + + return 0; +} + +static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct pxa3xx_nand_host *host = nand_get_controller_data(chip); + struct pxa3xx_nand_info *info = host->info_data; + int nchunks = mtd->writesize / info->chunk_size; + + if (section >= nchunks) + return -ERANGE; + + if (!info->spare_size) + return 0; + + oobregion->offset = section * (info->ecc_size + info->spare_size); + oobregion->length = info->spare_size; + if (!section) { + /* + * Bootrom looks in bytes 0 & 5 for bad blocks for the + * 4KB page / 4bit BCH combination. + */ + if (mtd->writesize == 4096 && info->chunk_size == 2048) { + oobregion->offset += 6; + oobregion->length -= 6; + } else { + oobregion->offset += 2; + oobregion->length -= 2; + } + } + + return 0; +} + +static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = { + .ecc = pxa3xx_ooblayout_ecc, + .free = pxa3xx_ooblayout_free, +}; + static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' }; static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' }; @@ -347,41 +402,6 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = bbt_mirror_pattern }; -static struct nand_ecclayout ecc_layout_2KB_bch4bit = { - .eccbytes = 32, - .eccpos = { - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63}, - .oobfree = { {2, 30} } -}; - -static struct nand_ecclayout ecc_layout_4KB_bch4bit = { - .eccbytes = 64, - .eccpos = { - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, - 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127}, - /* Bootrom looks in bytes 0 & 5 for bad blocks */ - .oobfree = { {6, 26}, { 64, 32} } -}; - -static struct nand_ecclayout ecc_layout_4KB_bch8bit = { - .eccbytes = 128, - .eccpos = { - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63}, - .oobfree = { } -}; - #define NDTR0_tCH(c) (min((c), 7) << 19) #define NDTR0_tCS(c) (min((c), 7) << 16) #define NDTR0_tWH(c) (min((c), 7) << 11) @@ -1546,9 +1566,12 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) } static int pxa_ecc_init(struct pxa3xx_nand_info *info, - struct nand_ecc_ctrl *ecc, + struct mtd_info *mtd, int strength, int ecc_stepsize, int page_size) { + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { info->nfullchunks = 1; info->ntotalchunks = 1; @@ -1582,7 +1605,7 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, info->ecc_size = 32; ecc->mode = NAND_ECC_HW; ecc->size = info->chunk_size; - ecc->layout = &ecc_layout_2KB_bch4bit; + mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops); ecc->strength = 16; } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { @@ -1594,7 +1617,7 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, info->ecc_size = 32; ecc->mode = NAND_ECC_HW; ecc->size = info->chunk_size; - ecc->layout = &ecc_layout_4KB_bch4bit; + mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops); ecc->strength = 16; /* @@ -1612,7 +1635,7 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, info->ecc_size = 32; ecc->mode = NAND_ECC_HW; ecc->size = info->chunk_size; - ecc->layout = &ecc_layout_4KB_bch8bit; + mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops); ecc->strength = 16; } else { dev_err(&info->pdev->dev, @@ -1651,6 +1674,12 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) nand_writel(info, NDECCCTRL, 0x0); + if (pdata->flash_bbt) + chip->bbt_options |= NAND_BBT_USE_FLASH; + + chip->ecc.strength = pdata->ecc_strength; + chip->ecc.size = pdata->ecc_step_size; + if (nand_scan_ident(mtd, 1, NULL)) return -ENODEV; @@ -1663,13 +1692,12 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) } } - if (pdata->flash_bbt) { + if (chip->bbt_options & NAND_BBT_USE_FLASH) { /* * We'll use a bad block table stored in-flash and don't * allow writing the bad block marker to the flash. */ - chip->bbt_options |= NAND_BBT_USE_FLASH | - NAND_BBT_NO_OOB_BBM; + chip->bbt_options |= NAND_BBT_NO_OOB_BBM; chip->bbt_td = &bbt_main_descr; chip->bbt_md = &bbt_mirror_descr; } @@ -1689,10 +1717,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) } } - if (pdata->ecc_strength && pdata->ecc_step_size) { - ecc_strength = pdata->ecc_strength; - ecc_step = pdata->ecc_step_size; - } else { + ecc_strength = chip->ecc.strength; + ecc_step = chip->ecc.size; + if (!ecc_strength || !ecc_step) { ecc_strength = chip->ecc_strength_ds; ecc_step = chip->ecc_step_ds; } @@ -1703,7 +1730,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) ecc_step = 512; } - ret = pxa_ecc_init(info, &chip->ecc, ecc_strength, + ret = pxa_ecc_init(info, mtd, ecc_strength, ecc_step, mtd->writesize); if (ret) return ret; @@ -1903,15 +1930,6 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev) if (of_get_property(np, "marvell,nand-keep-config", NULL)) pdata->keep_config = 1; of_property_read_u32(np, "num-cs", &pdata->num_cs); - pdata->flash_bbt = of_get_nand_on_flash_bbt(np); - - pdata->ecc_strength = of_get_nand_ecc_strength(np); - if (pdata->ecc_strength < 0) - pdata->ecc_strength = 0; - - pdata->ecc_step_size = of_get_nand_ecc_step_size(np); - if (pdata->ecc_step_size < 0) - pdata->ecc_step_size = 0; pdev->dev.platform_data = pdata; diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index f550a57e6eea..de7d28e62d4e 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c @@ -21,7 +21,6 @@ #include <linux/mtd/partitions.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_mtd.h> #include <linux/delay.h> /* NANDc reg offsets */ @@ -1437,7 +1436,6 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); struct nand_ecc_ctrl *ecc = &chip->ecc; u8 *oob = chip->oob_poi; - int free_boff; int data_size, oob_size; int ret, status = 0; @@ -1451,12 +1449,11 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, /* calculate the data and oob size for the last codeword/step */ data_size = ecc->size - ((ecc->steps - 1) << 2); - oob_size = ecc->steps << 2; - - free_boff = ecc->layout->oobfree[0].offset; + oob_size = mtd->oobavail; /* override new oob content to last codeword */ - memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size); + mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob, + 0, mtd->oobavail); set_address(host, host->cw_size * (ecc->steps - 1), page); update_rw_regs(host, 1, false); @@ -1710,61 +1707,52 @@ static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr) * This layout is read as is when ECC is disabled. When ECC is enabled, the * inaccessible Bad Block byte(s) are ignored when we write to a page/oob, * and assumed as 0xffs when we read a page/oob. The ECC, unused and - * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e, - * ecc->bytes is the sum of the three). + * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is + * the sum of the three). */ - -static struct nand_ecclayout * -qcom_nand_create_layout(struct qcom_nand_host *host) +static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) { - struct nand_chip *chip = &host->chip; - struct mtd_info *mtd = nand_to_mtd(chip); - struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_host *host = to_qcom_nand_host(chip); struct nand_ecc_ctrl *ecc = &chip->ecc; - struct nand_ecclayout *layout; - int i, j, steps, pos = 0, shift = 0; - layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL); - if (!layout) - return NULL; - - steps = mtd->writesize / ecc->size; - layout->eccbytes = steps * ecc->bytes; + if (section > 1) + return -ERANGE; - layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size; - layout->oobfree[0].length = steps << 2; - - /* - * the oob bytes in the first n - 1 codewords are all grouped together - * in the format: - * DUMMY_BBM + UNUSED + ECC - */ - for (i = 0; i < steps - 1; i++) { - for (j = 0; j < ecc->bytes; j++) - layout->eccpos[pos++] = i * ecc->bytes + j; + if (!section) { + oobregion->length = (ecc->bytes * (ecc->steps - 1)) + + host->bbm_size; + oobregion->offset = 0; + } else { + oobregion->length = host->ecc_bytes_hw + host->spare_bytes; + oobregion->offset = mtd->oobsize - oobregion->length; } - /* - * the oob bytes in the last codeword are grouped in the format: - * BBM + FREE OOB + UNUSED + ECC - */ + return 0; +} - /* fill up the bbm positions */ - for (j = 0; j < host->bbm_size; j++) - layout->eccpos[pos++] = i * ecc->bytes + j; +static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; - /* - * fill up the ecc and reserved positions, their indices are offseted - * by the free oob region - */ - shift = layout->oobfree[0].length + host->bbm_size; + if (section) + return -ERANGE; - for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++) - layout->eccpos[pos++] = i * ecc->bytes + shift + j; + oobregion->length = ecc->steps * 4; + oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size; - return layout; + return 0; } +static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = { + .ecc = qcom_nand_ooblayout_ecc, + .free = qcom_nand_ooblayout_free, +}; + static int qcom_nand_host_setup(struct qcom_nand_host *host) { struct nand_chip *chip = &host->chip; @@ -1851,9 +1839,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host) ecc->mode = NAND_ECC_HW; - ecc->layout = qcom_nand_create_layout(host); - if (!ecc->layout) - return -ENOMEM; + mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops); cwperpage = mtd->writesize / ecc->size; diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 9c9397b54b2c..d9309cf0ce2e 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -84,11 +84,33 @@ /* new oob placement block for use with hardware ecc generation */ +static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 0; + oobregion->length = 3; + + return 0; +} + +static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 8; + oobregion->length = 8; + + return 0; +} -static struct nand_ecclayout nand_hw_eccoob = { - .eccbytes = 3, - .eccpos = {0, 1, 2}, - .oobfree = {{8, 8}} +static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = { + .ecc = s3c2410_ooblayout_ecc, + .free = s3c2410_ooblayout_free, }; /* controller and mtd information */ @@ -542,7 +564,8 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, diff0 |= (diff1 << 8); diff0 |= (diff2 << 16); - if ((diff0 & ~(1<<fls(diff0))) == 0) + /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */ + if ((diff0 & (diff0 - 1)) == 0) return 1; return -1; @@ -859,6 +882,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, } #else chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; #endif if (set->disable_ecc) @@ -919,7 +943,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info, } else { chip->ecc.size = 512; chip->ecc.bytes = 3; - chip->ecc.layout = &nand_hw_eccoob; + mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops); } } diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 4814402902f9..6fa3bcd59769 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -31,7 +31,6 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_mtd.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sh_dma.h> @@ -43,26 +42,73 @@ #include <linux/mtd/partitions.h> #include <linux/mtd/sh_flctl.h> -static struct nand_ecclayout flctl_4secc_oob_16 = { - .eccbytes = 10, - .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - .oobfree = { - {.offset = 12, - . length = 4} }, +static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = 0; + oobregion->length = chip->ecc.bytes; + + return 0; +} + +static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->offset = 12; + oobregion->length = 4; + + return 0; +} + +static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = { + .ecc = flctl_4secc_ooblayout_sp_ecc, + .free = flctl_4secc_ooblayout_sp_free, }; -static struct nand_ecclayout flctl_4secc_oob_64 = { - .eccbytes = 4 * 10, - .eccpos = { - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, - .oobfree = { - {.offset = 2, .length = 4}, - {.offset = 16, .length = 6}, - {.offset = 32, .length = 6}, - {.offset = 48, .length = 6} }, +static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section >= chip->ecc.steps) + return -ERANGE; + + oobregion->offset = (section * 16) + 6; + oobregion->length = chip->ecc.bytes; + + return 0; +} + +static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section >= chip->ecc.steps) + return -ERANGE; + + oobregion->offset = section * 16; + oobregion->length = 6; + + if (!section) { + oobregion->offset += 2; + oobregion->length -= 2; + } + + return 0; +} + +static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = { + .ecc = flctl_4secc_ooblayout_lp_ecc, + .free = flctl_4secc_ooblayout_lp_free, }; static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; @@ -987,10 +1033,10 @@ static int flctl_chip_init_tail(struct mtd_info *mtd) if (flctl->hwecc) { if (mtd->writesize == 512) { - chip->ecc.layout = &flctl_4secc_oob_16; + mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops); chip->badblock_pattern = &flctl_4secc_smallpage; } else { - chip->ecc.layout = &flctl_4secc_oob_64; + mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops); chip->badblock_pattern = &flctl_4secc_largepage; } @@ -1005,6 +1051,7 @@ static int flctl_chip_init_tail(struct mtd_info *mtd) flctl->flcmncr_base |= _4ECCEN; } else { chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; } return 0; @@ -1044,8 +1091,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) const struct of_device_id *match; struct flctl_soc_config *config; struct sh_flctl_platform_data *pdata; - struct device_node *dn = dev->of_node; - int ret; match = of_match_device(of_flctl_match, dev); if (match) @@ -1065,15 +1110,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) pdata->has_hwecc = config->has_hwecc; pdata->use_holden = config->use_holden; - /* parse user defined options */ - ret = of_get_nand_bus_width(dn); - if (ret == 16) - pdata->flcmncr_val |= SEL_16BIT; - else if (ret != 8) { - dev_err(dev, "%s: invalid bus width\n", __func__); - return NULL; - } - return pdata; } @@ -1136,15 +1172,14 @@ static int flctl_probe(struct platform_device *pdev) nand->chip_delay = 20; nand->read_byte = flctl_read_byte; + nand->read_word = flctl_read_word; nand->write_buf = flctl_write_buf; nand->read_buf = flctl_read_buf; nand->select_chip = flctl_select_chip; nand->cmdfunc = flctl_cmdfunc; - if (pdata->flcmncr_val & SEL_16BIT) { + if (pdata->flcmncr_val & SEL_16BIT) nand->options |= NAND_BUSWIDTH_16; - nand->read_word = flctl_read_word; - } pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); @@ -1155,6 +1190,16 @@ static int flctl_probe(struct platform_device *pdev) if (ret) goto err_chip; + if (nand->options & NAND_BUSWIDTH_16) { + /* + * NAND_BUSWIDTH_16 may have been set by nand_scan_ident(). + * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign + * flctl->flcmncr_base to pdata->flcmncr_val. + */ + pdata->flcmncr_val |= SEL_16BIT; + flctl->flcmncr_base = pdata->flcmncr_val; + } + ret = flctl_chip_init_tail(flctl_mtd); if (ret) goto err_chip; diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index b7d1b55a160b..064ca1757589 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c @@ -148,6 +148,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) /* Link the private data with the MTD structure */ mtd = nand_to_mtd(this); mtd->dev.parent = &pdev->dev; + mtd_set_ooblayout(mtd, data->ecc_layout); platform_set_drvdata(pdev, sharpsl); @@ -170,7 +171,6 @@ static int sharpsl_nand_probe(struct platform_device *pdev) this->ecc.bytes = 3; this->ecc.strength = 1; this->badblock_pattern = data->badblock_pattern; - this->ecc.layout = data->ecc_layout; this->ecc.hwctl = sharpsl_nand_enable_hwecc; this->ecc.calculate = sharpsl_nand_calculate_ecc; this->ecc.correct = nand_correct_data; diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c index c514740f9a83..5939dff253c2 100644 --- a/drivers/mtd/nand/sm_common.c +++ b/drivers/mtd/nand/sm_common.c @@ -12,14 +12,47 @@ #include <linux/sizes.h> #include "sm_common.h" -static struct nand_ecclayout nand_oob_sm = { - .eccbytes = 6, - .eccpos = {8, 9, 10, 13, 14, 15}, - .oobfree = { - {.offset = 0 , .length = 4}, /* reserved */ - {.offset = 6 , .length = 2}, /* LBA1 */ - {.offset = 11, .length = 2} /* LBA2 */ +static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 1) + return -ERANGE; + + oobregion->length = 3; + oobregion->offset = ((section + 1) * 8) - 3; + + return 0; +} + +static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + switch (section) { + case 0: + /* reserved */ + oobregion->offset = 0; + oobregion->length = 4; + break; + case 1: + /* LBA1 */ + oobregion->offset = 6; + oobregion->length = 2; + break; + case 2: + /* LBA2 */ + oobregion->offset = 11; + oobregion->length = 2; + break; + default: + return -ERANGE; } + + return 0; +} + +static const struct mtd_ooblayout_ops oob_sm_ops = { + .ecc = oob_sm_ooblayout_ecc, + .free = oob_sm_ooblayout_free, }; /* NOTE: This layout is is not compatabable with SmartMedia, */ @@ -28,15 +61,43 @@ static struct nand_ecclayout nand_oob_sm = { /* If you use smftl, it will bypass this and work correctly */ /* If you not, then you break SmartMedia compliance anyway */ -static struct nand_ecclayout nand_oob_sm_small = { - .eccbytes = 3, - .eccpos = {0, 1, 2}, - .oobfree = { - {.offset = 3 , .length = 2}, /* reserved */ - {.offset = 6 , .length = 2}, /* LBA1 */ +static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section) + return -ERANGE; + + oobregion->length = 3; + oobregion->offset = 0; + + return 0; +} + +static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + switch (section) { + case 0: + /* reserved */ + oobregion->offset = 3; + oobregion->length = 2; + break; + case 1: + /* LBA1 */ + oobregion->offset = 6; + oobregion->length = 2; + break; + default: + return -ERANGE; } -}; + return 0; +} + +static const struct mtd_ooblayout_ops oob_sm_small_ops = { + .ecc = oob_sm_small_ooblayout_ecc, + .free = oob_sm_small_ooblayout_free, +}; static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) { @@ -121,9 +182,9 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia) /* ECC layout */ if (mtd->writesize == SM_SECTOR_SIZE) - chip->ecc.layout = &nand_oob_sm; + mtd_set_ooblayout(mtd, &oob_sm_ops); else if (mtd->writesize == SM_SMALL_PAGE) - chip->ecc.layout = &nand_oob_sm_small; + mtd_set_ooblayout(mtd, &oob_sm_small_ops); else return -ENODEV; diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index e3305f9dd6fb..888fd314c62a 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c @@ -180,6 +180,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) nand_chip->dev_ready = socrates_nand_device_ready; nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ + nand_chip->ecc.algo = NAND_ECC_HAMMING; /* TODO: I have no idea what real delay is. */ nand_chip->chip_delay = 20; /* 20us command delay time */ diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 1c03eee44f3d..a83a690688b4 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -30,7 +30,6 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <linux/of_mtd.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> @@ -39,7 +38,7 @@ #include <linux/dmaengine.h> #include <linux/gpio.h> #include <linux/interrupt.h> -#include <linux/io.h> +#include <linux/iopoll.h> #define NFC_REG_CTL 0x0000 #define NFC_REG_ST 0x0004 @@ -155,7 +154,7 @@ /* define bit use in NFC_ECC_ST */ #define NFC_ECC_ERR(x) BIT(x) #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) -#define NFC_ECC_ERR_CNT(b, x) (((x) >> ((b) * 8)) & 0xff) +#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) #define NFC_DEFAULT_TIMEOUT_MS 1000 @@ -212,12 +211,9 @@ struct sunxi_nand_chip_sel { * sunxi HW ECC infos: stores information related to HW ECC support * * @mode: the sunxi ECC mode field deduced from ECC requirements - * @layout: the OOB layout depending on the ECC requirements and the - * selected ECC mode */ struct sunxi_nand_hw_ecc { int mode; - struct nand_ecclayout layout; }; /* @@ -239,6 +235,10 @@ struct sunxi_nand_chip { u32 timing_cfg; u32 timing_ctl; int selected; + int addr_cycles; + u32 addr[2]; + int cmd_cycles; + u8 cmd[2]; int nsels; struct sunxi_nand_chip_sel sels[0]; }; @@ -298,54 +298,71 @@ static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags, - unsigned int timeout_ms) +static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events, + bool use_polling, unsigned int timeout_ms) { - init_completion(&nfc->complete); + int ret; - writel(flags, nfc->regs + NFC_REG_INT); + if (events & ~NFC_INT_MASK) + return -EINVAL; if (!timeout_ms) timeout_ms = NFC_DEFAULT_TIMEOUT_MS; - if (!wait_for_completion_timeout(&nfc->complete, - msecs_to_jiffies(timeout_ms))) { - dev_err(nfc->dev, "wait interrupt timedout\n"); - return -ETIMEDOUT; + if (!use_polling) { + init_completion(&nfc->complete); + + writel(events, nfc->regs + NFC_REG_INT); + + ret = wait_for_completion_timeout(&nfc->complete, + msecs_to_jiffies(timeout_ms)); + + writel(0, nfc->regs + NFC_REG_INT); + } else { + u32 status; + + ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status, + (status & events) == events, 1, + timeout_ms * 1000); } - return 0; + writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST); + + if (ret) + dev_err(nfc->dev, "wait interrupt timedout\n"); + + return ret; } static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc) { - unsigned long timeout = jiffies + - msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS); + u32 status; + int ret; - do { - if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS)) - return 0; - } while (time_before(jiffies, timeout)); + ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status, + !(status & NFC_CMD_FIFO_STATUS), 1, + NFC_DEFAULT_TIMEOUT_MS * 1000); + if (ret) + dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n"); - dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n"); - return -ETIMEDOUT; + return ret; } static int sunxi_nfc_rst(struct sunxi_nfc *nfc) { - unsigned long timeout = jiffies + - msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS); + u32 ctl; + int ret; writel(0, nfc->regs + NFC_REG_ECC_CTL); writel(NFC_RESET, nfc->regs + NFC_REG_CTL); - do { - if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET)) - return 0; - } while (time_before(jiffies, timeout)); + ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl, + !(ctl & NFC_RESET), 1, + NFC_DEFAULT_TIMEOUT_MS * 1000); + if (ret) + dev_err(nfc->dev, "wait for NAND controller reset timedout\n"); - dev_err(nfc->dev, "wait for NAND controller reset timedout\n"); - return -ETIMEDOUT; + return ret; } static int sunxi_nfc_dev_ready(struct mtd_info *mtd) @@ -354,7 +371,6 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd) struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); struct sunxi_nand_rb *rb; - unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20); int ret; if (sunxi_nand->selected < 0) @@ -366,12 +382,6 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd) case RB_NATIVE: ret = !!(readl(nfc->regs + NFC_REG_ST) & NFC_RB_STATE(rb->info.nativeid)); - if (ret) - break; - - sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo); - ret = !!(readl(nfc->regs + NFC_REG_ST) & - NFC_RB_STATE(rb->info.nativeid)); break; case RB_GPIO: ret = gpio_get_value(rb->info.gpio); @@ -407,7 +417,7 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip) sel = &sunxi_nand->sels[chip]; ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | - NFC_PAGE_SHIFT(nand->page_shift - 10); + NFC_PAGE_SHIFT(nand->page_shift); if (sel->rb.type == RB_NONE) { nand->dev_ready = NULL; } else { @@ -452,7 +462,7 @@ static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD; writel(tmp, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); if (ret) break; @@ -487,7 +497,7 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, NFC_ACCESS_DIR; writel(tmp, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); if (ret) break; @@ -511,32 +521,54 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); int ret; - u32 tmp; ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); if (ret) return; - if (ctrl & NAND_CTRL_CHANGE) { - tmp = readl(nfc->regs + NFC_REG_CTL); - if (ctrl & NAND_NCE) - tmp |= NFC_CE_CTL; - else - tmp &= ~NFC_CE_CTL; - writel(tmp, nfc->regs + NFC_REG_CTL); - } + if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) && + !(ctrl & (NAND_CLE | NAND_ALE))) { + u32 cmd = 0; - if (dat == NAND_CMD_NONE) - return; + if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles) + return; - if (ctrl & NAND_CLE) { - writel(NFC_SEND_CMD1 | dat, nfc->regs + NFC_REG_CMD); - } else { - writel(dat, nfc->regs + NFC_REG_ADDR_LOW); - writel(NFC_SEND_ADR, nfc->regs + NFC_REG_CMD); + if (sunxi_nand->cmd_cycles--) + cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0]; + + if (sunxi_nand->cmd_cycles--) { + cmd |= NFC_SEND_CMD2; + writel(sunxi_nand->cmd[1], + nfc->regs + NFC_REG_RCMD_SET); + } + + sunxi_nand->cmd_cycles = 0; + + if (sunxi_nand->addr_cycles) { + cmd |= NFC_SEND_ADR | + NFC_ADR_NUM(sunxi_nand->addr_cycles); + writel(sunxi_nand->addr[0], + nfc->regs + NFC_REG_ADDR_LOW); + } + + if (sunxi_nand->addr_cycles > 4) + writel(sunxi_nand->addr[1], + nfc->regs + NFC_REG_ADDR_HIGH); + + writel(cmd, nfc->regs + NFC_REG_CMD); + sunxi_nand->addr[0] = 0; + sunxi_nand->addr[1] = 0; + sunxi_nand->addr_cycles = 0; + sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); } - sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + if (ctrl & NAND_CLE) { + sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat; + } else if (ctrl & NAND_ALE) { + sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |= + dat << ((sunxi_nand->addr_cycles % 4) * 8); + sunxi_nand->addr_cycles++; + } } /* These seed values have been extracted from Allwinner's BSP */ @@ -717,7 +749,8 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE_MSK); - ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION; + ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION | + NFC_ECC_PIPELINE; writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); } @@ -739,18 +772,106 @@ static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf) buf[3] = user_data >> 24; } +static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) +{ + return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); +} + +static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob, + int step, bool bbm, int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + + sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)), + oob); + + /* De-randomize the Bad Block Marker. */ + if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) + sunxi_nfc_randomize_bbm(mtd, page, oob); +} + +static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd, + const u8 *oob, int step, + bool bbm, int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + u8 user_data[4]; + + /* Randomize the Bad Block Marker. */ + if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) { + memcpy(user_data, oob, sizeof(user_data)); + sunxi_nfc_randomize_bbm(mtd, page, user_data); + oob = user_data; + } + + writel(sunxi_nfc_buf_to_user_data(oob), + nfc->regs + NFC_REG_USER_DATA(step)); +} + +static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, + unsigned int *max_bitflips, int ret) +{ + if (ret < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += ret; + *max_bitflips = max_t(unsigned int, *max_bitflips, ret); + } +} + +static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, + int step, bool *erased) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct nand_ecc_ctrl *ecc = &nand->ecc; + u32 status, tmp; + + *erased = false; + + status = readl(nfc->regs + NFC_REG_ECC_ST); + + if (status & NFC_ECC_ERR(step)) + return -EBADMSG; + + if (status & NFC_ECC_PAT_FOUND(step)) { + u8 pattern; + + if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) { + pattern = 0x0; + } else { + pattern = 0xff; + *erased = true; + } + + if (data) + memset(data, pattern, ecc->size); + + if (oob) + memset(oob, pattern, ecc->bytes + 4); + + return 0; + } + + tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step)); + + return NFC_ECC_ERR_CNT(step, tmp); +} + static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, u8 *data, int data_off, u8 *oob, int oob_off, int *cur_off, unsigned int *max_bitflips, - bool bbm, int page) + bool bbm, bool oob_required, int page) { struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; int raw_mode = 0; - u32 status; + bool erased; int ret; if (*cur_off != data_off) @@ -769,34 +890,19 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); sunxi_nfc_randomizer_disable(mtd); if (ret) return ret; *cur_off = oob_off + ecc->bytes + 4; - status = readl(nfc->regs + NFC_REG_ECC_ST); - if (status & NFC_ECC_PAT_FOUND(0)) { - u8 pattern = 0xff; - - if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) - pattern = 0x0; - - memset(data, pattern, ecc->size); - memset(oob, pattern, ecc->bytes + 4); - + ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, + &erased); + if (erased) return 1; - } - - ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0))); - - memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); - - nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); - sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page); - if (status & NFC_ECC_ERR(0)) { + if (ret < 0) { /* * Re-read the data with the randomizer disabled to identify * bitflips in erased pages. @@ -804,35 +910,34 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (nand->options & NAND_NEED_SCRAMBLING) { nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); nand->read_buf(mtd, data, ecc->size); - nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); - nand->read_buf(mtd, oob, ecc->bytes + 4); + } else { + memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, + ecc->size); } + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); + nand->read_buf(mtd, oob, ecc->bytes + 4); + ret = nand_check_erased_ecc_chunk(data, ecc->size, oob, ecc->bytes + 4, NULL, 0, ecc->strength); if (ret >= 0) raw_mode = 1; } else { - /* - * The engine protects 4 bytes of OOB data per chunk. - * Retrieve the corrected OOB bytes. - */ - sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)), - oob); + memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); - /* De-randomize the Bad Block Marker. */ - if (bbm && nand->options & NAND_NEED_SCRAMBLING) - sunxi_nfc_randomize_bbm(mtd, page, oob); - } + if (oob_required) { + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); + sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, + true, page); - if (ret < 0) { - mtd->ecc_stats.failed++; - } else { - mtd->ecc_stats.corrected += ret; - *max_bitflips = max_t(unsigned int, *max_bitflips, ret); + sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0, + bbm, page); + } } + sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret); + return raw_mode; } @@ -848,7 +953,7 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, if (len <= 0) return; - if (*cur_off != offset) + if (!cur_off || *cur_off != offset) nand->cmdfunc(mtd, NAND_CMD_RNDOUT, offset + mtd->writesize, -1); @@ -858,12 +963,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len, false, page); - *cur_off = mtd->oobsize + mtd->writesize; -} - -static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) -{ - return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); + if (cur_off) + *cur_off = mtd->oobsize + mtd->writesize; } static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, @@ -882,19 +983,6 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page); - /* Fill OOB data in */ - if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) { - u8 user_data[4]; - - memcpy(user_data, oob, 4); - sunxi_nfc_randomize_bbm(mtd, page, user_data); - writel(sunxi_nfc_buf_to_user_data(user_data), - nfc->regs + NFC_REG_USER_DATA(0)); - } else { - writel(sunxi_nfc_buf_to_user_data(oob), - nfc->regs + NFC_REG_USER_DATA(0)); - } - if (data_off + ecc->size != oob_off) nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1); @@ -903,11 +991,13 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, return ret; sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page); + writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); sunxi_nfc_randomizer_disable(mtd); if (ret) return ret; @@ -929,13 +1019,14 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, if (len <= 0) return; - if (*cur_off != offset) + if (!cur_off || *cur_off != offset) nand->cmdfunc(mtd, NAND_CMD_RNDIN, offset + mtd->writesize, -1); sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page); - *cur_off = mtd->oobsize + mtd->writesize; + if (cur_off) + *cur_off = mtd->oobsize + mtd->writesize; } static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, @@ -958,7 +1049,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, oob_off + mtd->writesize, &cur_off, &max_bitflips, - !i, page); + !i, oob_required, page); if (ret < 0) return ret; else if (ret) @@ -974,6 +1065,39 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, return max_bitflips; } +static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, + struct nand_chip *chip, + u32 data_offs, u32 readlen, + u8 *bufpoi, int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret, i, cur_off = 0; + unsigned int max_bitflips = 0; + + sunxi_nfc_hw_ecc_enable(mtd); + + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + for (i = data_offs / ecc->size; + i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + u8 *data = bufpoi + data_off; + u8 *oob = chip->oob_poi + oob_off; + + ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, + oob, + oob_off + mtd->writesize, + &cur_off, &max_bitflips, !i, + false, page); + if (ret < 0) + return ret; + } + + sunxi_nfc_hw_ecc_disable(mtd); + + return max_bitflips; +} + static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required, @@ -1026,7 +1150,9 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, oob_off, &cur_off, - &max_bitflips, !i, page); + &max_bitflips, !i, + oob_required, + page); if (ret < 0) return ret; else if (ret) @@ -1074,6 +1200,40 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, return 0; } +static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd, + struct nand_chip *chip, + int page) +{ + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + + chip->pagebuf = -1; + + return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page); +} + +static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd, + struct nand_chip *chip, + int page) +{ + int ret, status; + + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page); + + chip->pagebuf = -1; + + memset(chip->buffers->databuf, 0xff, mtd->writesize); + ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page); + if (ret) + return ret; + + /* Send command to program the OOB data */ + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + + status = chip->waitfunc(mtd, chip); + + return status & NAND_STATUS_FAIL ? -EIO : 0; +} + static const s32 tWB_lut[] = {6, 12, 16, 20}; static const s32 tRHW_lut[] = {4, 8, 12, 20}; @@ -1101,6 +1261,7 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip, struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller); u32 min_clk_period = 0; s32 tWB, tADL, tWHR, tRHW, tCAD; + long real_clk_rate; /* T1 <=> tCLS */ if (timings->tCLS_min > min_clk_period) @@ -1163,6 +1324,18 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip, min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2); /* T16 - T19 + tCAD */ + if (timings->tWB_max > (min_clk_period * 20)) + min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20); + + if (timings->tADL_min > (min_clk_period * 32)) + min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32); + + if (timings->tWHR_min > (min_clk_period * 32)) + min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32); + + if (timings->tRHW_min > (min_clk_period * 20)) + min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20); + tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max, min_clk_period); if (tWB < 0) { @@ -1198,23 +1371,26 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip, /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */ chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); - /* - * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data - * output cycle timings shall be used if the host drives tRC less than - * 30 ns. - */ - chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0; - /* Convert min_clk_period from picoseconds to nanoseconds */ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000); /* - * Convert min_clk_period into a clk frequency, then get the - * appropriate rate for the NAND controller IP given this formula - * (specified in the datasheet): - * nand clk_rate = 2 * min_clk_rate + * Unlike what is stated in Allwinner datasheet, the clk_rate should + * be set to (1 / min_clk_period), and not (2 / min_clk_period). + * This new formula was verified with a scope and validated by + * Allwinner engineers. */ - chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period; + chip->clk_rate = NSEC_PER_SEC / min_clk_period; + real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); + + /* + * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data + * output cycle timings shall be used if the host drives tRC less than + * 30 ns. + */ + min_clk_period = NSEC_PER_SEC / real_clk_rate; + chip->timing_ctl = ((min_clk_period * 2) < 30) ? + NFC_TIMING_CTL_EDO : 0; return 0; } @@ -1257,6 +1433,57 @@ static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip, return sunxi_nand_chip_set_timings(chip, timings); } +static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &nand->ecc; + + if (section >= ecc->steps) + return -ERANGE; + + oobregion->offset = section * (ecc->bytes + 4) + 4; + oobregion->length = ecc->bytes; + + return 0; +} + +static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &nand->ecc; + + if (section > ecc->steps) + return -ERANGE; + + /* + * The first 2 bytes are used for BB markers, hence we + * only have 2 bytes available in the first user data + * section. + */ + if (!section && ecc->mode == NAND_ECC_HW) { + oobregion->offset = 2; + oobregion->length = 2; + + return 0; + } + + oobregion->offset = section * (ecc->bytes + 4); + + if (section < ecc->steps) + oobregion->length = 4; + else + oobregion->offset = mtd->oobsize - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = { + .ecc = sunxi_nand_ooblayout_ecc, + .free = sunxi_nand_ooblayout_free, +}; + static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc, struct device_node *np) @@ -1266,7 +1493,6 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); struct sunxi_nand_hw_ecc *data; - struct nand_ecclayout *layout; int nsectors; int ret; int i; @@ -1295,7 +1521,6 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, /* HW ECC always work with even numbers of ECC bytes */ ecc->bytes = ALIGN(ecc->bytes, 2); - layout = &data->layout; nsectors = mtd->writesize / ecc->size; if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) { @@ -1303,9 +1528,9 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, goto err; } - layout->eccbytes = (ecc->bytes * nsectors); - - ecc->layout = layout; + ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob; + ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob; + mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops); ecc->priv = data; return 0; @@ -1325,9 +1550,6 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc, struct device_node *np) { - struct nand_ecclayout *layout; - int nsectors; - int i, j; int ret; ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); @@ -1336,40 +1558,9 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, ecc->read_page = sunxi_nfc_hw_ecc_read_page; ecc->write_page = sunxi_nfc_hw_ecc_write_page; - layout = ecc->layout; - nsectors = mtd->writesize / ecc->size; - - for (i = 0; i < nsectors; i++) { - if (i) { - layout->oobfree[i].offset = - layout->oobfree[i - 1].offset + - layout->oobfree[i - 1].length + - ecc->bytes; - layout->oobfree[i].length = 4; - } else { - /* - * The first 2 bytes are used for BB markers, hence we - * only have 2 bytes available in the first user data - * section. - */ - layout->oobfree[i].length = 2; - layout->oobfree[i].offset = 2; - } - - for (j = 0; j < ecc->bytes; j++) - layout->eccpos[(ecc->bytes * i) + j] = - layout->oobfree[i].offset + - layout->oobfree[i].length + j; - } - - if (mtd->oobsize > (ecc->bytes + 4) * nsectors) { - layout->oobfree[nsectors].offset = - layout->oobfree[nsectors - 1].offset + - layout->oobfree[nsectors - 1].length + - ecc->bytes; - layout->oobfree[nsectors].length = mtd->oobsize - - ((ecc->bytes + 4) * nsectors); - } + ecc->read_oob_raw = nand_read_oob_std; + ecc->write_oob_raw = nand_write_oob_std; + ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; return 0; } @@ -1378,9 +1569,6 @@ static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc, struct device_node *np) { - struct nand_ecclayout *layout; - int nsectors; - int i; int ret; ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); @@ -1390,15 +1578,8 @@ static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd, ecc->prepad = 4; ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page; ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page; - - layout = ecc->layout; - nsectors = mtd->writesize / ecc->size; - - for (i = 0; i < (ecc->bytes * nsectors); i++) - layout->eccpos[i] = i; - - layout->oobfree[0].length = mtd->oobsize - i; - layout->oobfree[0].offset = i; + ecc->read_oob_raw = nand_read_oob_syndrome; + ecc->write_oob_raw = nand_write_oob_syndrome; return 0; } @@ -1411,7 +1592,6 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc) sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc); break; case NAND_ECC_NONE: - kfree(ecc->layout); default: break; } @@ -1432,8 +1612,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc, return -EINVAL; switch (ecc->mode) { - case NAND_ECC_SOFT_BCH: - break; case NAND_ECC_HW: ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np); if (ret) @@ -1445,10 +1623,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc, return ret; break; case NAND_ECC_NONE: - ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL); - if (!ecc->layout) - return -ENOMEM; - ecc->layout->oobfree[0].length = mtd->oobsize; case NAND_ECC_SOFT: break; default: @@ -1536,21 +1710,6 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, } } - timings = onfi_async_timing_mode_to_sdr_timings(0); - if (IS_ERR(timings)) { - ret = PTR_ERR(timings); - dev_err(dev, - "could not retrieve timings for ONFI mode 0: %d\n", - ret); - return ret; - } - - ret = sunxi_nand_chip_set_timings(chip, timings); - if (ret) { - dev_err(dev, "could not configure chip timings: %d\n", ret); - return ret; - } - nand = &chip->nand; /* Default tR value specified in the ONFI spec (chapter 4.15.1) */ nand->chip_delay = 200; @@ -1570,6 +1729,21 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, mtd = nand_to_mtd(nand); mtd->dev.parent = dev; + timings = onfi_async_timing_mode_to_sdr_timings(0); + if (IS_ERR(timings)) { + ret = PTR_ERR(timings); + dev_err(dev, + "could not retrieve timings for ONFI mode 0: %d\n", + ret); + return ret; + } + + ret = sunxi_nand_chip_set_timings(chip, timings); + if (ret) { + dev_err(dev, "could not configure chip timings: %d\n", ret); + return ret; + } + ret = nand_scan_ident(mtd, nsels, NULL); if (ret) return ret; @@ -1580,6 +1754,8 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, if (nand->options & NAND_NEED_SCRAMBLING) nand->options |= NAND_NO_SUBPAGE_WRITE; + nand->options |= NAND_SUBPAGE_READ; + ret = sunxi_nand_chip_init_timings(chip, np); if (ret) { dev_err(dev, "could not configure chip timings: %d\n", ret); @@ -1728,6 +1904,8 @@ static int sunxi_nfc_remove(struct platform_device *pdev) struct sunxi_nfc *nfc = platform_get_drvdata(pdev); sunxi_nand_chips_cleanup(nfc); + clk_disable_unprepare(nfc->mod_clk); + clk_disable_unprepare(nfc->ahb_clk); return 0; } diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 293feb19b0b1..3ad514c44dcb 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -33,7 +33,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> -#include <linux/of_mtd.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> @@ -175,34 +174,6 @@ static inline struct vf610_nfc *mtd_to_nfc(struct mtd_info *mtd) return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip); } -static struct nand_ecclayout vf610_nfc_ecc45 = { - .eccbytes = 45, - .eccpos = {19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63}, - .oobfree = { - {.offset = 2, - .length = 17} } -}; - -static struct nand_ecclayout vf610_nfc_ecc60 = { - .eccbytes = 60, - .eccpos = { 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63 }, - .oobfree = { - {.offset = 2, - .length = 2} } -}; - static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg) { return readl(nfc->regs + reg); @@ -781,14 +752,16 @@ static int vf610_nfc_probe(struct platform_device *pdev) if (mtd->oobsize > 64) mtd->oobsize = 64; + /* + * mtd->ecclayout is not specified here because we're using the + * default large page ECC layout defined in NAND core. + */ if (chip->ecc.strength == 32) { nfc->ecc_mode = ECC_60_BYTE; chip->ecc.bytes = 60; - chip->ecc.layout = &vf610_nfc_ecc60; } else if (chip->ecc.strength == 24) { nfc->ecc_mode = ECC_45_BYTE; chip->ecc.bytes = 45; - chip->ecc.layout = &vf610_nfc_ecc45; } else { dev_err(nfc->dev, "Unsupported ECC strength\n"); err = -ENXIO; diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index af28bb3ae7cf..a4b029a417f0 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -68,21 +68,33 @@ MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP" * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page * For now, we expose only 64 out of 80 ecc bytes */ -static struct nand_ecclayout flexonenand_oob_128 = { - .eccbytes = 64, - .eccpos = { - 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 102, 103, 104, 105 - }, - .oobfree = { - {2, 4}, {18, 4}, {34, 4}, {50, 4}, - {66, 4}, {82, 4}, {98, 4}, {114, 4} - } +static int flexonenand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 7) + return -ERANGE; + + oobregion->offset = (section * 16) + 6; + oobregion->length = 10; + + return 0; +} + +static int flexonenand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 7) + return -ERANGE; + + oobregion->offset = (section * 16) + 2; + oobregion->length = 4; + + return 0; +} + +static const struct mtd_ooblayout_ops flexonenand_ooblayout_ops = { + .ecc = flexonenand_ooblayout_ecc, + .free = flexonenand_ooblayout_free, }; /* @@ -91,56 +103,77 @@ static struct nand_ecclayout flexonenand_oob_128 = { * Based on specification: * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010 * - * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout) - * - * oobfree uses the spare area fields marked as - * "Managed by internal ECC logic for Logical Sector Number area" */ -static struct nand_ecclayout onenand_oob_128 = { - .eccbytes = 64, - .eccpos = { - 7, 8, 9, 10, 11, 12, 13, 14, 15, - 23, 24, 25, 26, 27, 28, 29, 30, 31, - 39, 40, 41, 42, 43, 44, 45, 46, 47, - 55, 56, 57, 58, 59, 60, 61, 62, 63, - 71, 72, 73, 74, 75, 76, 77, 78, 79, - 87, 88, 89, 90, 91, 92, 93, 94, 95, - 103, 104, 105, 106, 107, 108, 109, 110, 111, - 119 - }, - .oobfree = { - {2, 3}, {18, 3}, {34, 3}, {50, 3}, - {66, 3}, {82, 3}, {98, 3}, {114, 3} - } +static int onenand_ooblayout_128_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 7) + return -ERANGE; + + oobregion->offset = (section * 16) + 7; + oobregion->length = 9; + + return 0; +} + +static int onenand_ooblayout_128_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section >= 8) + return -ERANGE; + + /* + * free bytes are using the spare area fields marked as + * "Managed by internal ECC logic for Logical Sector Number area" + */ + oobregion->offset = (section * 16) + 2; + oobregion->length = 3; + + return 0; +} + +static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = { + .ecc = onenand_ooblayout_128_ecc, + .free = onenand_ooblayout_128_free, }; /** - * onenand_oob_64 - oob info for large (2KB) page + * onenand_oob_32_64 - oob info for large (2KB) page */ -static struct nand_ecclayout onenand_oob_64 = { - .eccbytes = 20, - .eccpos = { - 8, 9, 10, 11, 12, - 24, 25, 26, 27, 28, - 40, 41, 42, 43, 44, - 56, 57, 58, 59, 60, - }, - .oobfree = { - {2, 3}, {14, 2}, {18, 3}, {30, 2}, - {34, 3}, {46, 2}, {50, 3}, {62, 2} +static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 3) + return -ERANGE; + + oobregion->offset = (section * 16) + 8; + oobregion->length = 5; + + return 0; +} + +static int onenand_ooblayout_32_64_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + int sections = (mtd->oobsize / 32) * 2; + + if (section >= sections) + return -ERANGE; + + if (section & 1) { + oobregion->offset = ((section - 1) * 16) + 14; + oobregion->length = 2; + } else { + oobregion->offset = (section * 16) + 2; + oobregion->length = 3; } -}; -/** - * onenand_oob_32 - oob info for middle (1KB) page - */ -static struct nand_ecclayout onenand_oob_32 = { - .eccbytes = 10, - .eccpos = { - 8, 9, 10, 11, 12, - 24, 25, 26, 27, 28, - }, - .oobfree = { {2, 3}, {14, 2}, {18, 3}, {30, 2} } + return 0; +} + +static const struct mtd_ooblayout_ops onenand_oob_32_64_ooblayout_ops = { + .ecc = onenand_ooblayout_32_64_ecc, + .free = onenand_ooblayout_32_64_free, }; static const unsigned char ffchars[] = { @@ -1024,34 +1057,15 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col int thislen) { struct onenand_chip *this = mtd->priv; - struct nand_oobfree *free; - int readcol = column; - int readend = column + thislen; - int lastgap = 0; - unsigned int i; - uint8_t *oob_buf = this->oob_buf; - - free = this->ecclayout->oobfree; - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { - if (readcol >= lastgap) - readcol += free->offset - lastgap; - if (readend >= lastgap) - readend += free->offset - lastgap; - lastgap = free->offset + free->length; - } - this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize); - free = this->ecclayout->oobfree; - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { - int free_end = free->offset + free->length; - if (free->offset < readend && free_end > readcol) { - int st = max_t(int,free->offset,readcol); - int ed = min_t(int,free_end,readend); - int n = ed - st; - memcpy(buf, oob_buf + st, n); - buf += n; - } else if (column == 0) - break; - } + int ret; + + this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0, + mtd->oobsize); + ret = mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf, + column, thislen); + if (ret) + return ret; + return 0; } @@ -1808,34 +1822,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf, const u_char *buf, int column, int thislen) { - struct onenand_chip *this = mtd->priv; - struct nand_oobfree *free; - int writecol = column; - int writeend = column + thislen; - int lastgap = 0; - unsigned int i; - - free = this->ecclayout->oobfree; - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { - if (writecol >= lastgap) - writecol += free->offset - lastgap; - if (writeend >= lastgap) - writeend += free->offset - lastgap; - lastgap = free->offset + free->length; - } - free = this->ecclayout->oobfree; - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) { - int free_end = free->offset + free->length; - if (free->offset < writeend && free_end > writecol) { - int st = max_t(int,free->offset,writecol); - int ed = min_t(int,free_end,writeend); - int n = ed - st; - memcpy(oob_buf + st, buf, n); - buf += n; - } else if (column == 0) - break; - } - return 0; + return mtd_ooblayout_set_databytes(mtd, buf, oob_buf, column, thislen); } /** @@ -4003,22 +3990,22 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) switch (mtd->oobsize) { case 128: if (FLEXONENAND(this)) { - this->ecclayout = &flexonenand_oob_128; + mtd_set_ooblayout(mtd, &flexonenand_ooblayout_ops); mtd->subpage_sft = 0; } else { - this->ecclayout = &onenand_oob_128; + mtd_set_ooblayout(mtd, &onenand_oob_128_ooblayout_ops); mtd->subpage_sft = 2; } if (ONENAND_IS_NOP_1(this)) mtd->subpage_sft = 0; break; case 64: - this->ecclayout = &onenand_oob_64; + mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops); mtd->subpage_sft = 2; break; case 32: - this->ecclayout = &onenand_oob_32; + mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops); mtd->subpage_sft = 1; break; @@ -4027,7 +4014,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) __func__, mtd->oobsize); mtd->subpage_sft = 0; /* To prevent kernel oops */ - this->ecclayout = &onenand_oob_32; + mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops); break; } @@ -4037,12 +4024,12 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) * The number of bytes available for a client to place data into * the out of band area */ - mtd->oobavail = 0; - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && - this->ecclayout->oobfree[i].length; i++) - mtd->oobavail += this->ecclayout->oobfree[i].length; + ret = mtd_ooblayout_count_freebytes(mtd); + if (ret < 0) + ret = 0; + + mtd->oobavail = ret; - mtd->ecclayout = this->ecclayout; mtd->ecc_strength = 1; /* Fill in remaining MTD driver data */ diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 157841dc3e99..c52e45594bfd 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -832,6 +832,7 @@ static const struct flash_info spi_nor_ids[] = { /* GigaDevice */ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, + { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) }, /* Intel/Numonyx -- xxxs33b */ diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index a7d1febf667a..16baeb51b2bd 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -149,6 +149,8 @@ static struct device_attribute dev_bgt_enabled = __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); static struct device_attribute dev_mtd_num = __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_ro_mode = + __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL); /** * ubi_volume_notify - send a volume change notification. @@ -385,6 +387,8 @@ static ssize_t dev_attribute_show(struct device *dev, ret = sprintf(buf, "%d\n", ubi->thread_enabled); else if (attr == &dev_mtd_num) ret = sprintf(buf, "%d\n", ubi->mtd->index); + else if (attr == &dev_ro_mode) + ret = sprintf(buf, "%d\n", ubi->ro_mode); else ret = -EINVAL; @@ -404,6 +408,7 @@ static struct attribute *ubi_dev_attrs[] = { &dev_min_io_size.attr, &dev_bgt_enabled.attr, &dev_mtd_num.attr, + &dev_ro_mode.attr, NULL }; ATTRIBUTE_GROUPS(ubi_dev); diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index c4cb15a3098c..f101a4985a7c 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -352,7 +352,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf, } else if (dent == d->dfs_emulate_power_cut) { if (kstrtoint(buf, 0, &val) != 0) count = -EINVAL; - d->emulate_power_cut = val; + else + d->emulate_power_cut = val; goto out; } diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 5b9834cf2820..5780dd1ba79d 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -426,8 +426,25 @@ retry: pnum, vol_id, lnum); err = -EBADMSG; } else { - err = -EINVAL; - ubi_ro_mode(ubi); + /* + * Ending up here in the non-Fastmap case + * is a clear bug as the VID header had to + * be present at scan time to have it referenced. + * With fastmap the story is more complicated. + * Fastmap has the mapping info without the need + * of a full scan. So the LEB could have been + * unmapped, Fastmap cannot know this and keeps + * the LEB referenced. + * This is valid and works as the layer above UBI + * has to do bookkeeping about used/referenced + * LEBs in any case. + */ + if (ubi->fast_attach) { + err = -EBADMSG; + } else { + err = -EINVAL; + ubi_ro_mode(ubi); + } } } goto out_free; @@ -1202,32 +1219,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, } cond_resched(); - - /* - * We've written the data and are going to read it back to make - * sure it was written correctly. - */ - memset(ubi->peb_buf, 0xFF, aldata_size); - err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); - if (err) { - if (err != UBI_IO_BITFLIPS) { - ubi_warn(ubi, "error %d while reading data back from PEB %d", - err, to); - if (is_error_sane(err)) - err = MOVE_TARGET_RD_ERR; - } else - err = MOVE_TARGET_BITFLIPS; - goto out_unlock_buf; - } - - cond_resched(); - - if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { - ubi_warn(ubi, "read data back from PEB %d and it is different", - to); - err = -EINVAL; - goto out_unlock_buf; - } } ubi_assert(vol->eba_tbl[lnum] == from); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 263b439e21a8..990898b9dc72 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, ubi_msg(ubi, "fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); ubi->fm_disabled = 0; + ubi->fast_attach = 1; ubi_free_vid_hdr(ubi, vh); kfree(ech); diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 437757c89b9e..348dbbcbedc8 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -705,7 +705,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum) struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; - dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); + dbg_gen("map LEB %d:%d", vol->vol_id, lnum); if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index dadc6a9d5755..61d4e99755a4 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -466,6 +466,7 @@ struct ubi_debug_info { * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled + * @fast_attach: non-zero if UBI was attached by fastmap * * @used: RB-tree of used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks @@ -574,6 +575,7 @@ struct ubi_device { size_t fm_size; struct work_struct fm_work; int fm_work_scheduled; + int fast_attach; /* Wear-leveling sub-system's stuff */ struct rb_root used; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 1ae17bb9b889..10059dfdc1b6 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -405,7 +405,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) if (!no_vtbl) self_check_volumes(ubi); - return err; + return 0; out_err: ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 17ec948ac40e..959c7b12e0b1 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1534,6 +1534,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) INIT_LIST_HEAD(&ubi->pq[i]); ubi->pq_head = 0; + ubi->free_count = 0; list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { cond_resched(); @@ -1552,7 +1553,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) found_pebs++; } - ubi->free_count = 0; list_for_each_entry(aeb, &ai->free, u.list) { cond_resched(); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 16419f550eff..058460bdd5a6 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) priv->bus = bus; bus->priv = priv; bus->parent = priv->dev; - bus->name = "Synopsys MII Bus", + bus->name = "Synopsys MII Bus"; bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; bus->reset = &arc_mdio_reset; diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 8fc93c5f6abc..d02c4240b7df 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -96,6 +96,10 @@ struct alx_priv { unsigned int rx_ringsz; unsigned int rxbuf_size; + struct page *rx_page; + unsigned int rx_page_offset; + unsigned int rx_frag_size; + struct napi_struct napi; struct alx_tx_queue txq; struct alx_rx_queue rxq; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 9fe8b5e310d1..c98acdc0d14f 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry) } } +static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp) +{ + struct sk_buff *skb; + struct page *page; + + if (alx->rx_frag_size > PAGE_SIZE) + return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + + page = alx->rx_page; + if (!page) { + alx->rx_page = page = alloc_page(gfp); + if (unlikely(!page)) + return NULL; + alx->rx_page_offset = 0; + } + + skb = build_skb(page_address(page) + alx->rx_page_offset, + alx->rx_frag_size); + if (likely(skb)) { + alx->rx_page_offset += alx->rx_frag_size; + if (alx->rx_page_offset >= PAGE_SIZE) + alx->rx_page = NULL; + else + get_page(page); + } + return skb; +} + + static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) { struct alx_rx_queue *rxq = &alx->rxq; @@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) while (!cur_buf->skb && next != rxq->read_idx) { struct alx_rfd *rfd = &rxq->rfd[cur]; - skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + skb = alx_alloc_skb(alx, gfp); if (!skb) break; dma = dma_map_single(&alx->hw.pdev->dev, @@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); } + return count; } @@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx) kfree(alx->txq.bufs); kfree(alx->rxq.bufs); + if (alx->rx_page) { + put_page(alx->rx_page); + alx->rx_page = NULL; + } + dma_free_coherent(&alx->hw.pdev->dev, alx->descmem.size, alx->descmem.virt, @@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx) alx->dev->name, alx); if (!err) goto out; + /* fall back to legacy interrupt */ pci_disable_msi(alx->hw.pdev); } @@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx) struct pci_dev *pdev = alx->hw.pdev; struct alx_hw *hw = &alx->hw; int err; + unsigned int head_size; err = alx_identify_hw(alx); if (err) { @@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx) hw->smb_timer = 400; hw->mtu = alx->dev->mtu; + alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); + head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + alx->rx_frag_size = roundup_pow_of_two(head_size); + alx->tx_ringsz = 256; alx->rx_ringsz = 512; hw->imt = 200; @@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) { struct alx_priv *alx = netdev_priv(netdev); int max_frame = ALX_MAX_FRAME_LEN(mtu); + unsigned int head_size; if ((max_frame < ALX_MIN_FRAME_SIZE) || (max_frame > ALX_MAX_FRAME_SIZE)) @@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) netdev->mtu = mtu; alx->hw.mtu = mtu; alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); + head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + alx->rx_frag_size = roundup_pow_of_two(head_size); netdev_update_features(netdev); if (netif_running(netdev)) alx_reinit(alx); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 0a5b770cefaa..c5fe915870ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev, bp->doorbells = bnx2x_vf_doorbells(bp); rc = bnx2x_vf_pci_alloc(bp); if (rc) - goto init_one_exit; + goto init_one_freemem; } else { doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); if (doorbell_size > pci_resource_len(pdev, 2)) { dev_err(&bp->pdev->dev, "Cannot map doorbells, bar size too small, aborting\n"); rc = -ENOMEM; - goto init_one_exit; + goto init_one_freemem; } bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), doorbell_size); @@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev, dev_err(&bp->pdev->dev, "Cannot map doorbell space, aborting\n"); rc = -ENOMEM; - goto init_one_exit; + goto init_one_freemem; } if (IS_VF(bp)) { rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); if (rc) - goto init_one_exit; + goto init_one_freemem; } /* Enable SRIOV if capability found in configuration space */ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); if (rc) - goto init_one_exit; + goto init_one_freemem; /* calc qm_cid_count */ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); @@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = bnx2x_set_int_mode(bp); if (rc) { dev_err(&pdev->dev, "Cannot set interrupts\n"); - goto init_one_exit; + goto init_one_freemem; } BNX2X_DEV_INFO("set interrupts successfully\n"); @@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); - goto init_one_exit; + goto init_one_freemem; } BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); @@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, return 0; +init_one_freemem: + bnx2x_free_mem_bp(bp); + init_one_exit: bnx2x_disable_pcie_error_reporting(bp); diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 085f9125cf42..06f031715b57 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) * re-adding ourselves to the poll list. */ - if (priv->tx_skb && !tx_ctrl_ct) + if (priv->tx_skb && !tx_ctrl_ct) { + nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); napi_reschedule(napi); + } } return work_done; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ca2cccc594fd..3c0255e98535 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); bdp->cbd_bufaddr = cpu_to_fec32(0); - if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, &txq->bd); - continue; - } + if (!skb) + goto skb_done; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - +skb_done: /* Make sure the update to bdp and tx_skbuff are performed * before dirty_tx */ diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index bcb9dccada4d..1de2e1e51c2b 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -615,7 +615,7 @@ struct fman { struct fman_cfg *cfg; struct muram_info *muram; /* cam section in muram */ - int cam_offset; + unsigned long cam_offset; size_t cam_size; /* Fifo in MURAM */ int fifo_offset; diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c index 4eb0e9ac7182..47394c45b6e8 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c @@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, * * Return: address of the allocated memory; NULL otherwise. */ -int fman_muram_alloc(struct muram_info *muram, size_t size) +unsigned long fman_muram_alloc(struct muram_info *muram, size_t size) { unsigned long vaddr; @@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size) * * Free an allocated memory from FM-MURAM partition. */ -void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size) +void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size) { unsigned long addr = fman_muram_offset_to_vbase(muram, offset); diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h index dbf0af9e5bb5..889649ad8931 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.h +++ b/drivers/net/ethernet/freescale/fman/fman_muram.h @@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size); unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, unsigned long offset); -int fman_muram_alloc(struct muram_info *muram, size_t size); +unsigned long fman_muram_alloc(struct muram_info *muram, size_t size); -void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size); +void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size); #endif /* __FM_MURAM_EXT */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3d746c887873..67a648c7d3a9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev) u32 link_stat = priv->link; struct hnae_handle *h; - assert(priv && priv->ae_handle); h = priv->ae_handle; if (priv->phy) { @@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, { struct hns_nic_priv *priv = netdev_priv(net_dev); - assert(priv); - strncpy(drvinfo->version, HNAE_DRIVER_VERSION, sizeof(drvinfo->version)); drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; @@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev, struct hnae_handle *h; struct hnae_ae_ops *ops; - assert(priv || priv->ae_handle); - h = priv->ae_handle; ops = h->dev->ops; @@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev, struct hnae_ae_ops *ops; int ret; - assert(priv || priv->ae_handle); - ops = priv->ae_handle->dev->ops; if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) @@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; - assert(priv || priv->ae_handle); - ops = priv->ae_handle->dev->ops; cmd->version = HNS_CHIP_VERSION; @@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev) struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; - assert(priv || priv->ae_handle); - ops = priv->ae_handle->dev->ops; if (!ops->get_regs_len) { netdev_err(net_dev, "ops->get_regs_len is null!\n"); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 01fccec632ec..466939f8f0cf 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); hwbm_pool->construct = mvneta_bm_construct; hwbm_pool->priv = new_pool; + spin_lock_init(&hwbm_pool->lock); /* Create new pool */ err = mvneta_bm_pool_create(priv, new_pool); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c761194bb323..fc95affaf76b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - data[index++] = ((unsigned long *)&priv->stats)[i]; + data[index++] = ((unsigned long *)&dev->stats)[i]; for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 92e0624f4cf0..19ceced6736c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } -static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 * +mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx4_en_priv *priv = netdev_priv(dev); spin_lock_bh(&priv->stats_lock); - memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); + netdev_stats_to_stats64(stats, &dev->stats); spin_unlock_bh(&priv->stats_lock); - return &priv->ret_stats; + return stats; } static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) @@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev) if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) en_dbg(HW, priv, "Failed dumping statistics\n"); - memset(&priv->stats, 0, sizeof(priv->stats)); memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); memset(&priv->port_stats, 0, sizeof(priv->port_stats)); @@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev) priv->tx_ring[i]->bytes = 0; priv->tx_ring[i]->packets = 0; priv->tx_ring[i]->tx_csum = 0; + priv->tx_ring[i]->tx_dropped = 0; + priv->tx_ring[i]->queue_stopped = 0; + priv->tx_ring[i]->wake_queue = 0; + priv->tx_ring[i]->tso_packets = 0; + priv->tx_ring[i]->xmit_more = 0; } for (i = 0; i < priv->rx_ring_num; i++) { priv->rx_ring[i]->bytes = 0; @@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_stop = mlx4_en_close, .ndo_start_xmit = mlx4_en_xmit, .ndo_select_queue = mlx4_en_select_queue, - .ndo_get_stats = mlx4_en_get_stats, + .ndo_get_stats64 = mlx4_en_get_stats64, .ndo_set_rx_mode = mlx4_en_set_rx_mode, .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, @@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_stop = mlx4_en_close, .ndo_start_xmit = mlx4_en_xmit, .ndo_select_queue = mlx4_en_select_queue, - .ndo_get_stats = mlx4_en_get_stats, + .ndo_get_stats64 = mlx4_en_get_stats64, .ndo_set_rx_mode = mlx4_en_set_rx_mode, .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 20b6c2e678b8..5aa8b751f417 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) struct mlx4_counter tmp_counter_stats; struct mlx4_en_stat_out_mbox *mlx4_en_stats; struct mlx4_en_stat_out_flow_control_mbox *flowstats; - struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); - struct net_device_stats *stats = &priv->stats; + struct net_device *dev = mdev->pndev[port]; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; struct mlx4_cmd_mailbox *mailbox; u64 in_mod = reset << 8 | port; int err; @@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) } stats->tx_packets = 0; stats->tx_bytes = 0; + stats->tx_dropped = 0; priv->port_stats.tx_chksum_offload = 0; priv->port_stats.queue_stopped = 0; priv->port_stats.wake_queue = 0; @@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->tx_packets += ring->packets; stats->tx_bytes += ring->bytes; + stats->tx_dropped += ring->tx_dropped; priv->port_stats.tx_chksum_offload += ring->tx_csum; priv->port_stats.queue_stopped += ring->queue_stopped; priv->port_stats.wake_queue += ring->wake_queue; @@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, &mlx4_en_stats->MCAST_prio_1, NUM_PRIORITIES); - stats->collisions = 0; stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + sw_rx_dropped; stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); - stats->rx_over_errors = 0; stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); - stats->rx_frame_errors = 0; stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); - stats->rx_missed_errors = 0; - stats->tx_aborted_errors = 0; - stats->tx_carrier_errors = 0; - stats->tx_fifo_errors = 0; - stats->tx_heartbeat_errors = 0; - stats->tx_window_errors = 0; - stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); + stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP); /* RX stats */ priv->pkstats.rx_multicast_packets = stats->multicast; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f6e61570cb2c..76aa4d27183c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool inline_ok; u32 ring_cons; - if (!priv->port_up) - goto tx_drop; - tx_ind = skb_get_queue_mapping(skb); ring = priv->tx_ring[tx_ind]; + if (!priv->port_up) + goto tx_drop; + /* fetch ring->cons far ahead before needing it to avoid stall */ ring_cons = ACCESS_ONCE(ring->cons); @@ -1030,7 +1030,7 @@ tx_drop_unmap: tx_drop: dev_kfree_skb_any(skb); - priv->stats.tx_dropped++; + ring->tx_dropped++; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index cc84e09f324a..467d47ed2c39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -270,6 +270,7 @@ struct mlx4_en_tx_ring { unsigned long tx_csum; unsigned long tso_packets; unsigned long xmit_more; + unsigned int tx_dropped; struct mlx4_bf bf; unsigned long queue_stopped; @@ -482,8 +483,6 @@ struct mlx4_en_priv { struct mlx4_en_port_profile *prof; struct net_device *dev; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct net_device_stats stats; - struct net_device_stats ret_stats; struct mlx4_en_port_state port_state; spinlock_t stats_lock; struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index cbf58e1f9333..21ec1c2df2c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, bool dcbx_enabled) { - u8 tc, priority, priority_map; + u8 tc, priority_map; enum dcbx_protocol_type type; u16 protocol_id; + int priority; bool enable; int i; @@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, * indication, but we only got here if there was an * app tlv for the protocol, so dcbx must be enabled. */ - enable = !!(type == DCBX_PROTOCOL_ETH); + enable = !(type == DCBX_PROTOCOL_ETH); qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, priority, tc, type); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 089016f46f26..2d89e8c16b32 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev) } } -static int qed_init_qm_info(struct qed_hwfn *p_hwfn) +static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) { u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_port_params *p_qm_port; u16 num_pqs, multi_cos_tcs = 1; + u8 pf_wfq = qm_info->pf_wfq; + u32 pf_rl = qm_info->pf_rl; u16 num_vfs = 0; #ifdef CONFIG_QED_SRIOV @@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. */ - qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * - num_pqs, GFP_KERNEL); + qm_info->qm_pq_params = kcalloc(num_pqs, + sizeof(struct init_qm_pq_params), + b_sleepable ? GFP_KERNEL : GFP_ATOMIC); if (!qm_info->qm_pq_params) goto alloc_err; - qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * - num_vports, GFP_KERNEL); + qm_info->qm_vport_params = kcalloc(num_vports, + sizeof(struct init_qm_vport_params), + b_sleepable ? GFP_KERNEL + : GFP_ATOMIC); if (!qm_info->qm_vport_params) goto alloc_err; - qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * - MAX_NUM_PORTS, GFP_KERNEL); + qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS, + sizeof(struct init_qm_port_params), + b_sleepable ? GFP_KERNEL + : GFP_ATOMIC); if (!qm_info->qm_port_params) goto alloc_err; - qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), - GFP_KERNEL); + qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data), + b_sleepable ? GFP_KERNEL : GFP_ATOMIC); if (!qm_info->wfq_data) goto alloc_err; @@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) for (i = 0; i < qm_info->num_vports; i++) qm_info->qm_vport_params[i].vport_wfq = 1; - qm_info->pf_wfq = 0; - qm_info->pf_rl = 0; qm_info->vport_rl_en = 1; qm_info->vport_wfq_en = 1; + qm_info->pf_rl = pf_rl; + qm_info->pf_wfq = pf_wfq; return 0; @@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_qm_info_free(p_hwfn); /* initialize qed's qm data structure */ - rc = qed_init_qm_info(p_hwfn); + rc = qed_init_qm_info(p_hwfn, false); if (rc) return rc; @@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; /* Prepare and process QM requirements */ - rc = qed_init_qm_info(p_hwfn); + rc = qed_init_qm_info(p_hwfn, true); if (rc) goto alloc_err; @@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) hw_mode |= 1 << MODE_ASIC; + if (p_hwfn->cdev->num_hwfns > 1) + hw_mode |= 1 << MODE_100G; + p_hwfn->hw_info.hw_mode = hw_mode; + + DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), + "Configuring function for hw_mode: 0x%08x\n", + p_hwfn->hw_info.hw_mode); } /* Init run time data for all PFs on an engine. */ @@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev, u32 load_code, param; int rc, mfw_rc, i; + if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); + return -EINVAL; + } + if (IS_PF(cdev)) { rc = qed_init_fw_data(cdev, bin_fw_data); if (rc != 0) @@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) { int i; + if (cdev->num_hwfns > 1) { + DP_VERBOSE(cdev, + NETIF_MSG_LINK, + "WFQ configuration is not supported for this device\n"); + return; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 8b22f87033ce..753064679bde 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) /* Fallthrough */ case QED_INT_MODE_MSI: - rc = pci_enable_msi(cdev->pdev); - if (!rc) { - int_params->out.int_mode = QED_INT_MODE_MSI; - goto out; - } + if (cdev->num_hwfns == 1) { + rc = pci_enable_msi(cdev->pdev); + if (!rc) { + int_params->out.int_mode = QED_INT_MODE_MSI; + goto out; + } - DP_NOTICE(cdev, "Failed to enable MSI\n"); - if (force_mode) - goto out; + DP_NOTICE(cdev, "Failed to enable MSI\n"); + if (force_mode) + goto out; + } /* Fallthrough */ case QED_INT_MODE_INTA: diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1bc75358cbc4..ad3cae3b7243 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; case ETH_SS_TEST: - return QEDE_ETHTOOL_TEST_MAX; + if (!IS_VF(edev)) + return QEDE_ETHTOOL_TEST_MAX; + else + return 0; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 337e839ca586..5d00d1404bfc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx, { struct qede_dev *edev = netdev_priv(dev); - return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, + return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, max_tx_rate); } @@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) edev->accept_any_vlan = false; } +int qede_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qede_dev *edev = netdev_priv(dev); + netdev_features_t changes = features ^ dev->features; + bool need_reload = false; + + /* No action needed if hardware GRO is disabled during driver load */ + if (changes & NETIF_F_GRO) { + if (dev->features & NETIF_F_GRO) + need_reload = !edev->gro_disable; + else + need_reload = edev->gro_disable; + } + + if (need_reload && netif_running(edev->ndev)) { + dev->features = features; + qede_reload(edev, NULL, NULL); + return 1; + } + + return 0; +} + #ifdef CONFIG_QEDE_VXLAN static void qede_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, __be16 port) @@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = { #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_link_state = qede_set_vf_link_state, diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 83d72106471c..fd5d1c93b55b 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev) } /* Disabling the timer */ - del_timer_sync(&qdev->timer); ql_cancel_all_work_sync(qdev); for (i = 0; i < qdev->rss_ring_count; i++) @@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: netif_device_detach(ndev); + del_timer_sync(&qdev->timer); if (netif_running(ndev)) ql_eeh_close(ndev); pci_disable_device(pdev); @@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, case pci_channel_io_perm_failure: dev_err(&pdev->dev, "%s: pci_channel_io_perm_failure.\n", __func__); + del_timer_sync(&qdev->timer); ql_eeh_close(ndev); set_bit(QL_EEH_FATAL, &qdev->flags); return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 1681084cc96f..1f309127457d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -619,6 +619,17 @@ fail: return rc; } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + /* All our existing PIO buffers went away */ + efx_for_each_channel(channel, efx) + efx_for_each_channel_tx_queue(tx_queue, channel) + tx_queue->piobuf = NULL; +} + #else /* !EFX_USE_PIO */ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) @@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx) { } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ +} + #endif /* EFX_USE_PIO */ static void efx_ef10_remove(struct efx_nic *efx) @@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; + efx_ef10_forget_old_piobufs(efx); nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; /* Driver-created vswitches and vports must be re-created */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0705ec869487..097f363f1630 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx) #ifdef CONFIG_RFS_ACCEL if (efx->type->offload_features & NETIF_F_NTUPLE) { - efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, - sizeof(*efx->rps_flow_id), - GFP_KERNEL); - if (!efx->rps_flow_id) { + struct efx_channel *channel; + int i, success = 1; + + efx_for_each_channel(channel, efx) { + channel->rps_flow_id = + kcalloc(efx->type->max_rx_ip_filters, + sizeof(*channel->rps_flow_id), + GFP_KERNEL); + if (!channel->rps_flow_id) + success = 0; + else + for (i = 0; + i < efx->type->max_rx_ip_filters; + ++i) + channel->rps_flow_id[i] = + RPS_FLOW_ID_INVALID; + } + + if (!success) { + efx_for_each_channel(channel, efx) + kfree(channel->rps_flow_id); efx->type->filter_table_remove(efx); rc = -ENOMEM; goto out_unlock; } + + efx->rps_expire_index = efx->rps_expire_channel = 0; } #endif out_unlock: @@ -1744,7 +1763,10 @@ out_unlock: static void efx_remove_filters(struct efx_nic *efx) { #ifdef CONFIG_RFS_ACCEL - kfree(efx->rps_flow_id); + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + kfree(channel->rps_flow_id); #endif down_write(&efx->filter_sem); efx->type->filter_table_remove(efx); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 38c422321cda..d13ddf9703ff 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -403,6 +403,8 @@ enum efx_sync_events_state { * @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @irq_count: Number of IRQs since last adaptive moderation decision * @irq_mod_score: IRQ moderation score + * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, + * indexed by filter ID * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors @@ -446,6 +448,8 @@ struct efx_channel { unsigned int irq_mod_score; #ifdef CONFIG_RFS_ACCEL unsigned int rfs_filters_added; +#define RPS_FLOW_ID_INVALID 0xFFFFFFFF + u32 *rps_flow_id; #endif unsigned n_rx_tobe_disc; @@ -889,9 +893,9 @@ struct vfdi_status; * @filter_sem: Filter table rw_semaphore, for freeing the table * @filter_lock: Filter table lock, for mere content changes * @filter_state: Architecture-dependent filter table state - * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, - * indexed by filter ID - * @rps_expire_index: Next index to check for expiry in @rps_flow_id + * @rps_expire_channel: Next channel to check for expiry + * @rps_expire_index: Next index to check for expiry in + * @rps_expire_channel's @rps_flow_id * @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. @@ -1035,7 +1039,7 @@ struct efx_nic { spinlock_t filter_lock; void *filter_state; #ifdef CONFIG_RFS_ACCEL - u32 *rps_flow_id; + unsigned int rps_expire_channel; unsigned int rps_expire_index; #endif diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8956995b2fe7..02b0b5272c14 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_filter_spec spec; - const __be16 *ports; - __be16 ether_type; - int nhoff; + struct flow_keys fk; int rc; - /* The core RPS/RFS code has already parsed and validated - * VLAN, IP and transport headers. We assume they are in the - * header area. - */ - - if (skb->protocol == htons(ETH_P_8021Q)) { - const struct vlan_hdr *vh = - (const struct vlan_hdr *)skb->data; + if (flow_id == RPS_FLOW_ID_INVALID) + return -EINVAL; - /* We can't filter on the IP 5-tuple and the vlan - * together, so just strip the vlan header and filter - * on the IP part. - */ - EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); - ether_type = vh->h_vlan_encapsulated_proto; - nhoff = sizeof(struct vlan_hdr); - } else { - ether_type = skb->protocol; - nhoff = 0; - } + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; - if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) return -EPROTONOSUPPORT; efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, @@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; - spec.ether_type = ether_type; - - if (ether_type == htons(ETH_P_IP)) { - const struct iphdr *ip = - (const struct iphdr *)(skb->data + nhoff); - - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); - if (ip_is_fragment(ip)) - return -EPROTONOSUPPORT; - spec.ip_proto = ip->protocol; - spec.rem_host[0] = ip->saddr; - spec.loc_host[0] = ip->daddr; - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); - ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + spec.ether_type = fk.basic.n_proto; + spec.ip_proto = fk.basic.ip_proto; + + if (fk.basic.n_proto == htons(ETH_P_IP)) { + spec.rem_host[0] = fk.addrs.v4addrs.src; + spec.loc_host[0] = fk.addrs.v4addrs.dst; } else { - const struct ipv6hdr *ip6 = - (const struct ipv6hdr *)(skb->data + nhoff); - - EFX_BUG_ON_PARANOID(skb_headlen(skb) < - nhoff + sizeof(*ip6) + 4); - spec.ip_proto = ip6->nexthdr; - memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); - memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); - ports = (const __be16 *)(ip6 + 1); + memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); + memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); } - spec.rem_port = ports[0]; - spec.loc_port = ports[1]; + spec.rem_port = fk.ports.src; + spec.loc_port = fk.ports.dst; rc = efx->type->filter_rfs_insert(efx, &spec); if (rc < 0) return rc; /* Remember this so we can check whether to expire the filter later */ - efx->rps_flow_id[rc] = flow_id; - channel = efx_get_channel(efx, skb_get_rx_queue(skb)); + channel = efx_get_channel(efx, rxq_index); + channel->rps_flow_id[rc] = flow_id; ++channel->rfs_filters_added; - if (ether_type == htons(ETH_P_IP)) + if (spec.ether_type == htons(ETH_P_IP)) netif_info(efx, rx_status, efx->net_dev, "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - spec.rem_host, ntohs(ports[0]), spec.loc_host, - ntohs(ports[1]), rxq_index, flow_id, rc); + spec.rem_host, ntohs(spec.rem_port), spec.loc_host, + ntohs(spec.loc_port), rxq_index, flow_id, rc); else netif_info(efx, rx_status, efx->net_dev, "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - spec.rem_host, ntohs(ports[0]), spec.loc_host, - ntohs(ports[1]), rxq_index, flow_id, rc); + spec.rem_host, ntohs(spec.rem_port), spec.loc_host, + ntohs(spec.loc_port), rxq_index, flow_id, rc); return rc; } @@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) { bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); - unsigned int index, size; + unsigned int channel_idx, index, size; u32 flow_id; if (!spin_trylock_bh(&efx->filter_lock)) return false; expire_one = efx->type->filter_rfs_expire_one; + channel_idx = efx->rps_expire_channel; index = efx->rps_expire_index; size = efx->type->max_rx_ip_filters; while (quota--) { - flow_id = efx->rps_flow_id[index]; - if (expire_one(efx, flow_id, index)) + struct efx_channel *channel = efx_get_channel(efx, channel_idx); + flow_id = channel->rps_flow_id[index]; + + if (flow_id != RPS_FLOW_ID_INVALID && + expire_one(efx, flow_id, index)) { netif_info(efx, rx_status, efx->net_dev, - "expired filter %d [flow %u]\n", - index, flow_id); - if (++index == size) + "expired filter %d [queue %u flow %u]\n", + index, channel_idx, flow_id); + channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + } + if (++index == size) { + if (++channel_idx == efx->n_channels) + channel_idx = 0; index = 0; + } } + efx->rps_expire_channel = channel_idx; efx->rps_expire_index = index; spin_unlock_bh(&efx->filter_lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 3f83c369f56c..ec295851812b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev) return -ENOMEM; if (mdio_bus_data->irqs) - memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); + memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq)); #ifdef CONFIG_OF if (priv->device->of_node) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a0f64cba86ba..2ace126533cd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -990,7 +990,7 @@ static void team_port_disable(struct team *team, #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) -static void __team_compute_features(struct team *team) +static void ___team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; @@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team) team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; +} +static void __team_compute_features(struct team *team) +{ + ___team_compute_features(team); netdev_change_features(team->dev); } static void team_compute_features(struct team *team) { mutex_lock(&team->lock); - __team_compute_features(team); + ___team_compute_features(team); mutex_unlock(&team->lock); + netdev_change_features(team->dev); } static int team_port_enter(struct team *team, struct team_port *port) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36cd7f016a8d..9bbe0161a2f4 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb) goto goon; } - if (!count || count < 4) + if (count < 4) goto goon; rx_status = buf[count - 2]; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d9d2806a47b1..dc989a8b5afb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -61,6 +61,8 @@ #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) +#define CARRIER_CHECK_DELAY (2 * HZ) + struct smsc95xx_priv { u32 mac_cr; u32 hash_hi; @@ -69,6 +71,9 @@ struct smsc95xx_priv { spinlock_t mac_cr_lock; u8 features; u8 suspend_flags; + bool link_ok; + struct delayed_work carrier_check; + struct usbnet *dev; }; static bool turbo_mode = true; @@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) intdata); } +static void set_carrier(struct usbnet *dev, bool link) +{ + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + + if (pdata->link_ok == link) + return; + + pdata->link_ok = link; + + if (link) + usbnet_link_change(dev, 1, 0); + else + usbnet_link_change(dev, 0, 0); +} + +static void check_carrier(struct work_struct *work) +{ + struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv, + carrier_check.work); + struct usbnet *dev = pdata->dev; + int ret; + + if (pdata->suspend_flags != 0) + return; + + ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR); + if (ret < 0) { + netdev_warn(dev->net, "Failed to read MII_BMSR\n"); + return; + } + if (ret & BMSR_LSTATUS) + set_carrier(dev, 1); + else + set_carrier(dev, 0); + + schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); +} + /* Enable or disable Tx & Rx checksum offload engines */ static int smsc95xx_set_features(struct net_device *netdev, netdev_features_t features) @@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + pdata->dev = dev; + INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier); + schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); + return 0; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + if (pdata) { + cancel_delayed_work(&pdata->carrier_check); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; @@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf) /* do this first to ensure it's cleared even in error case */ pdata->suspend_flags = 0; + schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); if (suspend_flags & SUSPEND_ALLMODES) { /* clear wake-up sources */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 49d84e540343..e0638e556fe7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); - /* Last of all, set up some receive buffers. */ - for (i = 0; i < vi->curr_queue_pairs; i++) { - try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); - - /* If we didn't even get one input buffer, we're useless. */ - if (vi->rq[i].vq->num_free == - virtqueue_get_vring_size(vi->rq[i].vq)) { - free_unused_bufs(vi); - err = -ENOMEM; - goto free_recv_bufs; - } - } - vi->nb.notifier_call = &virtnet_cpu_callback; err = register_hotcpu_notifier(&vi->nb); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); - goto free_recv_bufs; + goto free_unregister_netdev; } /* Assume link up if device can't report link status, @@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev) return 0; -free_recv_bufs: +free_unregister_netdev: vi->vdev->config->reset(vdev); - free_receive_bufs(vi); unregister_netdev(dev); free_vqs: cancel_delayed_work_sync(&vi->refill); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8ff30c3bdfce..f999db2f97b4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; + if (tb[IFLA_MTU]) + conf.mtu = nla_get_u32(tb[IFLA_MTU]); + err = vxlan_dev_configure(src_net, dev, &conf); switch (err) { case -ENODEV: diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 020ac1a4b408..cea9443c22a6 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -382,7 +382,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, ret = of_property_read_u32(dt_node, "ref-clock-frequency", &pdev_data->ref_clock_freq); - if (IS_ERR_VALUE(ret)) { + if (ret) { dev_err(glue->dev, "can't get reference clock frequency (%d)\n", ret); return ret; @@ -425,7 +425,7 @@ static int wl1271_probe(struct spi_device *spi) } ret = wlcore_probe_of(spi, glue, &pdev_data); - if (IS_ERR_VALUE(ret)) { + if (ret) { dev_err(glue->dev, "can't get device tree parameters (%d)\n", ret); return ret; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 042baec56931..608fc4464574 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -164,14 +164,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, } static long pmem_direct_access(struct block_device *bdev, sector_t sector, - void __pmem **kaddr, pfn_t *pfn) + void __pmem **kaddr, pfn_t *pfn, long size) { struct pmem_device *pmem = bdev->bd_queue->queuedata; resource_size_t offset = sector * 512 + pmem->data_offset; + if (unlikely(is_bad_pmem(&pmem->bb, sector, size))) + return -EIO; *kaddr = pmem->virt_addr + offset; *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); + /* + * If badblocks are present, limit known good range to the + * requested range. + */ + if (unlikely(pmem->bb.count)) + return size; return pmem->size - pmem->pfn_pad - offset; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2de248bd462b..1a51584a382b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -95,6 +95,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, break; } break; + case NVME_CTRL_DEAD: + switch (old_state) { + case NVME_CTRL_DELETING: + changed = true; + /* FALLTHRU */ + default: + break; + } + break; default: break; } @@ -720,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns) switch (ns->pi_type) { case NVME_NS_DPS_PI_TYPE3: integrity.profile = &t10_pi_type3_crc; + integrity.tag_size = sizeof(u16) + sizeof(u32); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: integrity.profile = &t10_pi_type1_crc; + integrity.tag_size = sizeof(u16); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: integrity.profile = NULL; @@ -1212,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, return ctrl->ops->reset_ctrl(ctrl); case NVME_IOCTL_SUBSYS_RESET: return nvme_reset_subsystem(ctrl); + case NVME_IOCTL_RESCAN: + nvme_queue_scan(ctrl); + return 0; default: return -ENOTTY; } @@ -1239,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev, } static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); +static ssize_t nvme_sysfs_rescan(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + nvme_queue_scan(ctrl); + return count; +} +static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); + static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1342,6 +1369,7 @@ nvme_show_int_function(cntlid); static struct attribute *nvme_dev_attrs[] = { &dev_attr_reset_controller.attr, + &dev_attr_rescan_controller.attr, &dev_attr_model.attr, &dev_attr_serial.attr, &dev_attr_firmware_rev.attr, @@ -1580,6 +1608,15 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) { struct nvme_ns *ns, *next; + /* + * The dead states indicates the controller was not gracefully + * disconnected. In that case, we won't be able to flush any data while + * removing the namespaces' disks; fail all the queues now to avoid + * potentially having to clean up the failed sync later. + */ + if (ctrl->state == NVME_CTRL_DEAD) + nvme_kill_queues(ctrl); + mutex_lock(&ctrl->namespaces_mutex); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) nvme_ns_remove(ns); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 114b92873894..1daa0482de0e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -72,6 +72,7 @@ enum nvme_ctrl_state { NVME_CTRL_LIVE, NVME_CTRL_RESETTING, NVME_CTRL_DELETING, + NVME_CTRL_DEAD, }; struct nvme_ctrl { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0f093f14d348..78dca3193ca4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1394,7 +1394,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) struct pci_dev *pdev = to_pci_dev(dev->dev); int result, i, vecs, nr_io_queues, size; - nr_io_queues = num_possible_cpus(); + nr_io_queues = num_online_cpus(); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) return result; @@ -1551,12 +1551,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) static void nvme_disable_io_queues(struct nvme_dev *dev) { - int pass; + int pass, queues = dev->online_queues - 1; unsigned long timeout; u8 opcode = nvme_admin_delete_sq; for (pass = 0; pass < 2; pass++) { - int sent = 0, i = dev->queue_count - 1; + int sent = 0, i = queues; reinit_completion(&dev->ioq_wait); retry: @@ -1857,7 +1857,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work) nvme_kill_queues(&dev->ctrl); if (pci_get_drvdata(pdev)) - pci_stop_and_remove_bus_device_locked(pdev); + device_release_driver(&pdev->dev); nvme_put_ctrl(&dev->ctrl); } @@ -2017,6 +2017,10 @@ static void nvme_remove(struct pci_dev *pdev) nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); pci_set_drvdata(pdev, NULL); + + if (!pci_device_is_present(pdev)) + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); + flush_work(&dev->reset_work); nvme_uninit_ctrl(&dev->ctrl); nvme_dev_disable(dev, true); @@ -2060,14 +2064,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, * shutdown the controller to quiesce. The controller will be restarted * after the slot reset through driver's slot_reset callback. */ - dev_warn(dev->ctrl.device, "error detected: state:%d\n", state); switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: + dev_warn(dev->ctrl.device, + "frozen state error detected, reset controller\n"); nvme_dev_disable(dev, false); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: + dev_warn(dev->ctrl.device, + "failure state error detected, request disconnect\n"); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; @@ -2102,6 +2109,12 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x0953), .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DISCARD_ZEROES, }, + { PCI_VDEVICE(INTEL, 0x0a53), + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DISCARD_ZEROES, }, + { PCI_VDEVICE(INTEL, 0x0a54), + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DISCARD_ZEROES, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index bb4ea123547f..965911d9b36a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -113,7 +113,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, rc = nvmem_reg_read(nvmem, pos, buf, count); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; return count; @@ -147,7 +147,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, rc = nvmem_reg_write(nvmem, pos, buf, count); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; return count; @@ -366,7 +366,7 @@ static int nvmem_add_cells(struct nvmem_device *nvmem, } rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); - if (IS_ERR_VALUE(rval)) { + if (rval) { kfree(cells[i]); goto err; } @@ -963,7 +963,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; /* shift bits in-place */ @@ -998,7 +998,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) return ERR_PTR(-ENOMEM); rc = __nvmem_cell_read(nvmem, cell, buf, len); - if (IS_ERR_VALUE(rc)) { + if (rc) { kfree(buf); return ERR_PTR(rc); } @@ -1083,7 +1083,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) if (cell->bit_offset || cell->nbits) kfree(buf); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; return len; @@ -1111,11 +1111,11 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, return -EINVAL; rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; rc = __nvmem_cell_read(nvmem, &cell, buf, &len); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; return len; @@ -1141,7 +1141,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, return -EINVAL; rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; return nvmem_cell_write(&cell, buf, cell.bytes); @@ -1170,7 +1170,7 @@ int nvmem_device_read(struct nvmem_device *nvmem, rc = nvmem_reg_read(nvmem, offset, buf, bytes); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; return bytes; @@ -1198,7 +1198,7 @@ int nvmem_device_write(struct nvmem_device *nvmem, rc = nvmem_reg_write(nvmem, offset, buf, bytes); - if (IS_ERR_VALUE(rc)) + if (rc) return rc; diff --git a/drivers/of/Makefile b/drivers/of/Makefile index bee3fa96b981..d7efd9d458aa 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_OF_UNITTEST) += unittest.o obj-$(CONFIG_OF_MDIO) += of_mdio.o obj-$(CONFIG_OF_PCI) += of_pci.o obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o -obj-$(CONFIG_OF_MTD) += of_mtd.o obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o obj-$(CONFIG_OF_RESOLVE) += resolver.o obj-$(CONFIG_OF_OVERLAY) += overlay.o diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c deleted file mode 100644 index b7361ed70537..000000000000 --- a/drivers/of/of_mtd.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> - * - * OF helpers for mtd. - * - * This file is released under the GPLv2 - * - */ -#include <linux/kernel.h> -#include <linux/of_mtd.h> -#include <linux/mtd/nand.h> -#include <linux/export.h> - -/** - * It maps 'enum nand_ecc_modes_t' found in include/linux/mtd/nand.h - * into the device tree binding of 'nand-ecc', so that MTD - * device driver can get nand ecc from device tree. - */ -static const char *nand_ecc_modes[] = { - [NAND_ECC_NONE] = "none", - [NAND_ECC_SOFT] = "soft", - [NAND_ECC_HW] = "hw", - [NAND_ECC_HW_SYNDROME] = "hw_syndrome", - [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", - [NAND_ECC_SOFT_BCH] = "soft_bch", -}; - -/** - * of_get_nand_ecc_mode - Get nand ecc mode for given device_node - * @np: Pointer to the given device_node - * - * The function gets ecc mode string from property 'nand-ecc-mode', - * and return its index in nand_ecc_modes table, or errno in error case. - */ -int of_get_nand_ecc_mode(struct device_node *np) -{ - const char *pm; - int err, i; - - err = of_property_read_string(np, "nand-ecc-mode", &pm); - if (err < 0) - return err; - - for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) - if (!strcasecmp(pm, nand_ecc_modes[i])) - return i; - - return -ENODEV; -} -EXPORT_SYMBOL_GPL(of_get_nand_ecc_mode); - -/** - * of_get_nand_ecc_step_size - Get ECC step size associated to - * the required ECC strength (see below). - * @np: Pointer to the given device_node - * - * return the ECC step size, or errno in error case. - */ -int of_get_nand_ecc_step_size(struct device_node *np) -{ - int ret; - u32 val; - - ret = of_property_read_u32(np, "nand-ecc-step-size", &val); - return ret ? ret : val; -} -EXPORT_SYMBOL_GPL(of_get_nand_ecc_step_size); - -/** - * of_get_nand_ecc_strength - Get required ECC strength over the - * correspnding step size as defined by 'nand-ecc-size' - * @np: Pointer to the given device_node - * - * return the ECC strength, or errno in error case. - */ -int of_get_nand_ecc_strength(struct device_node *np) -{ - int ret; - u32 val; - - ret = of_property_read_u32(np, "nand-ecc-strength", &val); - return ret ? ret : val; -} -EXPORT_SYMBOL_GPL(of_get_nand_ecc_strength); - -/** - * of_get_nand_bus_width - Get nand bus witdh for given device_node - * @np: Pointer to the given device_node - * - * return bus width option, or errno in error case. - */ -int of_get_nand_bus_width(struct device_node *np) -{ - u32 val; - - if (of_property_read_u32(np, "nand-bus-width", &val)) - return 8; - - switch(val) { - case 8: - case 16: - return val; - default: - return -EIO; - } -} -EXPORT_SYMBOL_GPL(of_get_nand_bus_width); - -/** - * of_get_nand_on_flash_bbt - Get nand on flash bbt for given device_node - * @np: Pointer to the given device_node - * - * return true if present false other wise - */ -bool of_get_nand_on_flash_bbt(struct device_node *np) -{ - return of_property_read_bool(np, "nand-on-flash-bbt"); -} -EXPORT_SYMBOL_GPL(of_get_nand_on_flash_bbt); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f2d01d4d9364..1b8304e1efaa 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) /* For SPIs, we need to track the affinity per IRQ */ if (using_spi) { - if (i >= pdev->num_resources) { - of_node_put(dn); + if (i >= pdev->num_resources) break; - } irqs[i] = cpu; } /* Keep track of the CPUs containing this PMU type */ cpumask_set_cpu(cpu, &pmu->supported_cpus); - of_node_put(dn); i++; } while (1); @@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev, armpmu_init(pmu); - if (!__oprofile_cpu_pmu) - __oprofile_cpu_pmu = pmu; - pmu->plat_device = pdev; if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { @@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_destroy; + if (!__oprofile_cpu_pmu) + __oprofile_cpu_pmu = pmu; + pr_info("enabled with %s PMU driver, %d counters available\n", pmu->name, pmu->num_events); @@ -1043,6 +1040,7 @@ out_destroy: out_free: pr_info("%s: failed to register PMU devices!\n", of_node_full_name(node)); + kfree(pmu->irq_affinity); kfree(pmu); return ret; } diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 55182fc58c6a..677a811b3a6f 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -153,8 +153,10 @@ struct byt_community { .name = (n), \ .pins = (p), \ .npins = ARRAY_SIZE((p)), \ - .has_simple_funcs = 1, \ - .simple_funcs = (f), \ + .has_simple_funcs = 1, \ + { \ + .simple_funcs = (f), \ + }, \ .nfuncs = ARRAY_SIZE((f)), \ } #define PIN_GROUP_MIXED(n, p, f) \ @@ -163,7 +165,9 @@ struct byt_community { .pins = (p), \ .npins = ARRAY_SIZE((p)), \ .has_simple_funcs = 0, \ - .mixed_funcs = (f), \ + { \ + .mixed_funcs = (f), \ + }, \ .nfuncs = ARRAY_SIZE((f)), \ } diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 207b13b618cf..a607655d7830 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) const struct mtk_desc_pin *pin; chained_irq_enter(chip, desc); - for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { + for (eint_num = 0; + eint_num < pctl->devdata->ap_num; + eint_num += 32, reg += 4) { status = readl(reg); - reg += 4; while (status) { offset = __ffs(status); index = eint_num + offset; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index ccbfc325c778..38faceff2f08 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset) clk_enable(nmk_chip->clk); - dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); + dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); clk_disable(nmk_chip->clk); diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index d03df4a60d05..76bdae1a93bb 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -64,4 +64,14 @@ config CROS_EC_PROTO help ChromeOS EC communication protocol helpers. +config CROS_KBD_LED_BACKLIGHT + tristate "Backlight LED support for Chrome OS keyboards" + depends on LEDS_CLASS && ACPI + help + This option enables support for the keyboard backlight LEDs on + select Chrome OS systems. + + To compile this driver as a module, choose M here: the + module will be called cros_kbd_led_backlight. + endif # CHROMEOS_PLATFORMS diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index bc498bda8211..4f3462783a3c 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -1,8 +1,9 @@ -obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o -obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o -cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \ - cros_ec_lightbar.o cros_ec_vbc.o -obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o -obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o -obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o +obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o +obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o +cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \ + cros_ec_lightbar.o cros_ec_vbc.o +obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o +obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o +obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o +obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 2b441e9ae593..e8a44a9bc916 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -34,6 +34,7 @@ #define ATMEL_TS_I2C_ADDR 0x4a #define ATMEL_TS_I2C_BL_ADDR 0x26 #define CYAPA_TP_I2C_ADDR 0x67 +#define ELAN_TP_I2C_ADDR 0x15 #define ISL_ALS_I2C_ADDR 0x44 #define TAOS_ALS_I2C_ADDR 0x29 @@ -73,7 +74,7 @@ struct i2c_peripheral { int tries; }; -#define MAX_I2C_PERIPHERALS 3 +#define MAX_I2C_PERIPHERALS 4 struct chromeos_laptop { struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS]; @@ -86,6 +87,11 @@ static struct i2c_board_info cyapa_device = { .flags = I2C_CLIENT_WAKE, }; +static struct i2c_board_info elantech_device = { + I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR), + .flags = I2C_CLIENT_WAKE, +}; + static struct i2c_board_info isl_als_device = { I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR), }; @@ -306,6 +312,16 @@ static int setup_atmel_224s_tp(enum i2c_adapter_type type) return (!tp) ? -EAGAIN : 0; } +static int setup_elantech_tp(enum i2c_adapter_type type) +{ + if (tp) + return 0; + + /* add elantech touchpad */ + tp = add_i2c_device("trackpad", type, &elantech_device); + return (!tp) ? -EAGAIN : 0; +} + static int setup_atmel_1664s_ts(enum i2c_adapter_type type) { const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR, @@ -445,6 +461,8 @@ static struct chromeos_laptop dell_chromebook_11 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, + /* Elan Touchpad option. */ + { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 }, }, }; @@ -475,6 +493,8 @@ static struct chromeos_laptop acer_c720 = { { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, + /* Elan Touchpad option. */ + { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 }, /* Light Sensor. */ { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 }, }, diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index 34749200e4ab..308a853ac4f1 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -8,6 +8,7 @@ * the Free Software Foundation, version 2 of the License. */ +#include <linux/acpi.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -58,7 +59,7 @@ MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table); static struct ramoops_platform_data chromeos_ramoops_data = { .mem_size = 0x100000, .mem_address = 0xf00000, - .record_size = 0x20000, + .record_size = 0x40000, .console_size = 0x20000, .ftrace_size = 0x20000, .dump_oops = 1, @@ -71,9 +72,59 @@ static struct platform_device chromeos_ramoops = { }, }; +#ifdef CONFIG_ACPI +static const struct acpi_device_id cros_ramoops_acpi_match[] = { + { "GOOG9999", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match); + +static struct platform_driver chromeos_ramoops_acpi = { + .driver = { + .name = "chromeos_pstore", + .acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match), + }, +}; + +static int __init chromeos_probe_acpi(struct platform_device *pdev) +{ + struct resource *res; + resource_size_t len; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOMEM; + + len = resource_size(res); + if (!res->start || !len) + return -ENOMEM; + + pr_info("chromeos ramoops using acpi device.\n"); + + chromeos_ramoops_data.mem_size = len; + chromeos_ramoops_data.mem_address = res->start; + + return 0; +} + +static bool __init chromeos_check_acpi(void) +{ + if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi)) + return true; + return false; +} +#else +static inline bool chromeos_check_acpi(void) { return false; } +#endif + static int __init chromeos_pstore_init(void) { - if (dmi_check_system(chromeos_pstore_dmi_table)) + bool acpi_dev_found; + + /* First check ACPI for non-hardcoded values from firmware. */ + acpi_dev_found = chromeos_check_acpi(); + + if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table)) return platform_device_register(&chromeos_ramoops); return -ENODEV; diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c index d45cd254ed1c..6d8ee3b15872 100644 --- a/drivers/platform/chrome/cros_ec_dev.c +++ b/drivers/platform/chrome/cros_ec_dev.c @@ -137,6 +137,10 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) if (copy_from_user(&u_cmd, arg, sizeof(u_cmd))) return -EFAULT; + if ((u_cmd.outsize > EC_MAX_MSG_BYTES) || + (u_cmd.insize > EC_MAX_MSG_BYTES)) + return -EINVAL; + s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), GFP_KERNEL); if (!s_cmd) @@ -208,6 +212,9 @@ static const struct file_operations fops = { .release = ec_device_release, .read = ec_device_read, .unlocked_ioctl = ec_device_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ec_device_ioctl, +#endif }; static void __remove(struct device *dev) diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c index ff7640575c75..8df3d447cacf 100644 --- a/drivers/platform/chrome/cros_ec_lightbar.c +++ b/drivers/platform/chrome/cros_ec_lightbar.c @@ -412,9 +412,13 @@ static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev, class_dev); - struct platform_device *pdev = container_of(ec->dev, - struct platform_device, dev); - if (pdev->id != 0) + struct platform_device *pdev = to_platform_device(ec->dev); + struct cros_ec_platform *pdata = pdev->dev.platform_data; + int is_cros_ec; + + is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME); + + if (is_cros_ec != 0) return 0; /* Only instantiate this stuff if the EC has a lightbar */ diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 990308ca384f..b6e161f71b26 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -298,8 +298,8 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev) ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE; ec_dev->max_passthru = 0; ec_dev->pkt_xfer = NULL; - ec_dev->din_size = EC_MSG_BYTES; - ec_dev->dout_size = EC_MSG_BYTES; + ec_dev->din_size = EC_PROTO2_MSG_BYTES; + ec_dev->dout_size = EC_PROTO2_MSG_BYTES; } else { /* * It's possible for a test to occur too early when diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c new file mode 100644 index 000000000000..ca3e4da852b4 --- /dev/null +++ b/drivers/platform/chrome/cros_kbd_led_backlight.c @@ -0,0 +1,122 @@ +/* + * Keyboard backlight LED driver for Chrome OS. + * + * Copyright (C) 2012 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/acpi.h> +#include <linux/leds.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Keyboard LED ACPI Device must be defined in firmware */ +#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT" +#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC" +#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM" + +#define ACPI_KEYBOARD_BACKLIGHT_MAX 100 + +static void keyboard_led_set_brightness(struct led_classdev *cdev, + enum led_brightness brightness) +{ + union acpi_object param; + struct acpi_object_list input; + acpi_status status; + + param.type = ACPI_TYPE_INTEGER; + param.integer.value = brightness; + input.count = 1; + input.pointer = ¶m; + + status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE, + &input, NULL); + if (ACPI_FAILURE(status)) + dev_err(cdev->dev, "Error setting keyboard LED value: %d\n", + status); +} + +static enum led_brightness +keyboard_led_get_brightness(struct led_classdev *cdev) +{ + unsigned long long brightness; + acpi_status status; + + status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ, + NULL, &brightness); + if (ACPI_FAILURE(status)) { + dev_err(cdev->dev, "Error getting keyboard LED value: %d\n", + status); + return -EIO; + } + + return brightness; +} + +static int keyboard_led_probe(struct platform_device *pdev) +{ + struct led_classdev *cdev; + acpi_handle handle; + acpi_status status; + int error; + + /* Look for the keyboard LED ACPI Device */ + status = acpi_get_handle(ACPI_ROOT_OBJECT, + ACPI_KEYBOARD_BACKLIGHT_DEVICE, + &handle); + if (ACPI_FAILURE(status)) { + dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n", + ACPI_KEYBOARD_BACKLIGHT_DEVICE, status); + return -ENXIO; + } + + cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return -ENOMEM; + + cdev->name = "chromeos::kbd_backlight"; + cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX; + cdev->flags |= LED_CORE_SUSPENDRESUME; + cdev->brightness_set = keyboard_led_set_brightness; + cdev->brightness_get = keyboard_led_get_brightness; + + error = devm_led_classdev_register(&pdev->dev, cdev); + if (error) + return error; + + return 0; +} + +static const struct acpi_device_id keyboard_led_id[] = { + { "GOOG0002", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, keyboard_led_id); + +static struct platform_driver keyboard_led_driver = { + .driver = { + .name = "chromeos-keyboard-leds", + .acpi_match_table = ACPI_PTR(keyboard_led_id), + }, + .probe = keyboard_led_probe, +}; +module_platform_driver(keyboard_led_driver); + +MODULE_AUTHOR("Simon Que <sque@chromium.org>"); +MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:chromeos-keyboard-leds"); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ed2004be13cf..c06bb85c2839 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -846,6 +846,18 @@ config INTEL_IMR If you are running on a Galileo/Quark say Y here. +config INTEL_PMC_CORE + bool "Intel PMC Core driver" + depends on X86 && PCI + ---help--- + The Intel Platform Controller Hub for Intel Core SoCs provides access + to Power Management Controller registers via a PCI interface. This + driver can utilize debugging capabilities and supported features as + exposed by the Power Management Controller. + + Supported features: + - SLP_S0_RESIDENCY counter. + config IBM_RTL tristate "Device driver to enable PRTL support" depends on X86 && PCI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 448443c3baba..9b11b4073e03 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -69,3 +69,4 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ intel_telemetry_pltdrv.o \ intel_telemetry_debugfs.o +obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index f2b5d0a8adf0..15f131146501 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -771,12 +771,14 @@ static int asus_read_brightness(struct backlight_device *bd) { struct asus_laptop *asus = bl_get_data(bd); unsigned long long value; - acpi_status rv = AE_OK; + acpi_status rv; rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET, NULL, &value); - if (ACPI_FAILURE(rv)) + if (ACPI_FAILURE(rv)) { pr_warn("Error reading brightness\n"); + return 0; + } return value; } @@ -865,7 +867,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr, int len = 0; unsigned long long temp; char buf[16]; /* enough for all info */ - acpi_status rv = AE_OK; + acpi_status rv; /* * We use the easy way, we don't care of off and count, @@ -946,11 +948,10 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus, const char *method) { int rv, value; - int out = 0; rv = parse_arg(buf, count, &value); - if (rv > 0) - out = value ? 1 : 0; + if (rv <= 0) + return rv; if (write_acpi_int(asus->handle, method, value)) return -ENODEV; @@ -1265,7 +1266,7 @@ static DEVICE_ATTR_RO(ls_value); static int asus_gps_status(struct asus_laptop *asus) { unsigned long long status; - acpi_status rv = AE_OK; + acpi_status rv; rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS, NULL, &status); diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index a96630d52346..a26dca3640ea 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -114,6 +114,7 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_DEVID_LED6 0x00020016 /* Backlight and Brightness */ +#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */ #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 @@ -1730,6 +1731,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD); ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME); +ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE); static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1756,6 +1758,7 @@ static struct attribute *platform_attributes[] = { &dev_attr_cardr.attr, &dev_attr_touchpad.attr, &dev_attr_lid_resume.attr, + &dev_attr_als_enable.attr, NULL }; @@ -1776,6 +1779,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, devid = ASUS_WMI_DEVID_TOUCHPAD; else if (attr == &dev_attr_lid_resume.attr) devid = ASUS_WMI_DEVID_LID_RESUME; + else if (attr == &dev_attr_als_enable.attr) + devid = ASUS_WMI_DEVID_ALS_ENABLE; if (devid != -1) ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c index b51a2008d782..dcd9f40a4b18 100644 --- a/drivers/platform/x86/dell-rbtn.c +++ b/drivers/platform/x86/dell-rbtn.c @@ -28,6 +28,7 @@ struct rbtn_data { enum rbtn_type type; struct rfkill *rfkill; struct input_dev *input_dev; + bool suspended; }; @@ -235,9 +236,55 @@ static const struct acpi_device_id rbtn_ids[] = { { "", 0 }, }; +#ifdef CONFIG_PM_SLEEP +static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context) +{ + struct rbtn_data *rbtn_data = context; + + rbtn_data->suspended = false; +} + +static int rbtn_suspend(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct rbtn_data *rbtn_data = acpi_driver_data(device); + + rbtn_data->suspended = true; + + return 0; +} + +static int rbtn_resume(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct rbtn_data *rbtn_data = acpi_driver_data(device); + acpi_status status; + + /* + * Upon resume, some BIOSes send an ACPI notification thet triggers + * an unwanted input event. In order to ignore it, we use a flag + * that we set at suspend and clear once we have received the extra + * ACPI notification. Since ACPI notifications are delivered + * asynchronously to drivers, we clear the flag from the workqueue + * used to deliver the notifications. This should be enough + * to have the flag cleared only after we received the extra + * notification, if any. + */ + status = acpi_os_execute(OSL_NOTIFY_HANDLER, + rbtn_clear_suspended_flag, rbtn_data); + if (ACPI_FAILURE(status)) + rbtn_clear_suspended_flag(rbtn_data); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume); + static struct acpi_driver rbtn_driver = { .name = "dell-rbtn", .ids = rbtn_ids, + .drv.pm = &rbtn_pm_ops, .ops = { .add = rbtn_add, .remove = rbtn_remove, @@ -399,6 +446,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event) { struct rbtn_data *rbtn_data = device->driver_data; + /* + * Some BIOSes send a notification at resume. + * Ignore it to prevent unwanted input events. + */ + if (rbtn_data->suspended) { + dev_dbg(&device->dev, "ACPI notification ignored\n"); + return; + } + if (event != 0x80) { dev_info(&device->dev, "Received unknown event (0x%x)\n", event); diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index ffc84cc7b1c7..ce41bc34288d 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -69,7 +69,7 @@ #include <linux/kfifo.h> #include <linux/platform_device.h> #include <linux/slab.h> -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_ENABLED(CONFIG_LEDS_CLASS) #include <linux/leds.h> #endif #include <acpi/video.h> @@ -100,13 +100,14 @@ /* FUNC interface - responses */ #define UNSUPPORTED_CMD 0x80000000 -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_ENABLED(CONFIG_LEDS_CLASS) /* FUNC interface - LED control */ #define FUNC_LED_OFF 0x1 #define FUNC_LED_ON 0x30001 #define KEYBOARD_LAMPS 0x100 #define LOGOLAMP_POWERON 0x2000 #define LOGOLAMP_ALWAYS 0x4000 +#define RADIO_LED_ON 0x20 #endif /* Hotkey details */ @@ -174,13 +175,14 @@ struct fujitsu_hotkey_t { int rfkill_state; int logolamp_registered; int kblamps_registered; + int radio_led_registered; }; static struct fujitsu_hotkey_t *fujitsu_hotkey; static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_ENABLED(CONFIG_LEDS_CLASS) static enum led_brightness logolamp_get(struct led_classdev *cdev); static void logolamp_set(struct led_classdev *cdev, enum led_brightness brightness); @@ -200,6 +202,16 @@ static struct led_classdev kblamps_led = { .brightness_get = kblamps_get, .brightness_set = kblamps_set }; + +static enum led_brightness radio_led_get(struct led_classdev *cdev); +static void radio_led_set(struct led_classdev *cdev, + enum led_brightness brightness); + +static struct led_classdev radio_led = { + .name = "fujitsu::radio_led", + .brightness_get = radio_led_get, + .brightness_set = radio_led_set +}; #endif #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG @@ -249,7 +261,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) return value; } -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_ENABLED(CONFIG_LEDS_CLASS) /* LED class callbacks */ static void logolamp_set(struct led_classdev *cdev, @@ -275,6 +287,15 @@ static void kblamps_set(struct led_classdev *cdev, call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); } +static void radio_led_set(struct led_classdev *cdev, + enum led_brightness brightness) +{ + if (brightness >= LED_FULL) + call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); + else + call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); +} + static enum led_brightness logolamp_get(struct led_classdev *cdev) { enum led_brightness brightness = LED_OFF; @@ -299,6 +320,16 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev) return brightness; } + +static enum led_brightness radio_led_get(struct led_classdev *cdev) +{ + enum led_brightness brightness = LED_OFF; + + if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON) + brightness = LED_FULL; + + return brightness; +} #endif /* Hardware access for LCD brightness control */ @@ -872,7 +903,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) /* Suspect this is a keymap of the application panel, print it */ pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_ENABLED(CONFIG_LEDS_CLASS) if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { result = led_classdev_register(&fujitsu->pf_device->dev, &logolamp_led); @@ -895,6 +926,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) result); } } + + /* + * BTNI bit 24 seems to indicate the presence of a radio toggle + * button in place of a slide switch, and all such machines appear + * to also have an RF LED. Therefore use bit 24 as an indicator + * that an RF LED is present. + */ + if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) { + result = led_classdev_register(&fujitsu->pf_device->dev, + &radio_led); + if (result == 0) { + fujitsu_hotkey->radio_led_registered = 1; + } else { + pr_err("Could not register LED handler for radio LED, error %i\n", + result); + } + } #endif return result; @@ -915,12 +963,15 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device) struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); struct input_dev *input = fujitsu_hotkey->input; -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_ENABLED(CONFIG_LEDS_CLASS) if (fujitsu_hotkey->logolamp_registered) led_classdev_unregister(&logolamp_led); if (fujitsu_hotkey->kblamps_registered) led_classdev_unregister(&kblamps_led); + + if (fujitsu_hotkey->radio_led_registered) + led_classdev_unregister(&radio_led); #endif input_unregister_device(input); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index be3bc2f4edd4..4a23fbc66b71 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -48,7 +48,10 @@ #define CFG_CAMERA_BIT (19) #if IS_ENABLED(CONFIG_ACPI_WMI) -static const char ideapad_wmi_fnesc_event[] = "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6"; +static const char *const ideapad_wmi_fnesc_events[] = { + "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */ + "56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */ +}; #endif enum { @@ -93,6 +96,7 @@ struct ideapad_private { struct dentry *debug; unsigned long cfg; bool has_hw_rfkill_switch; + const char *fnesc_guid; }; static bool no_bt_rfkill; @@ -989,8 +993,16 @@ static int ideapad_acpi_add(struct platform_device *pdev) ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv); if (ret) goto notification_failed; + #if IS_ENABLED(CONFIG_ACPI_WMI) - ret = wmi_install_notify_handler(ideapad_wmi_fnesc_event, ideapad_wmi_notify, priv); + for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) { + ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i], + ideapad_wmi_notify, priv); + if (ret == AE_OK) { + priv->fnesc_guid = ideapad_wmi_fnesc_events[i]; + break; + } + } if (ret != AE_OK && ret != AE_NOT_EXIST) goto notification_failed_wmi; #endif @@ -1020,7 +1032,8 @@ static int ideapad_acpi_remove(struct platform_device *pdev) int i; #if IS_ENABLED(CONFIG_ACPI_WMI) - wmi_remove_notify_handler(ideapad_wmi_fnesc_event); + if (priv->fnesc_guid) + wmi_remove_notify_handler(priv->fnesc_guid); #endif acpi_remove_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY, ideapad_acpi_notify); diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index 0a919d81662c..cbe01021c939 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c @@ -306,33 +306,32 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value) #define to_intel_menlow_attr(_attr) \ container_of(_attr, struct intel_menlow_attribute, attr) -static ssize_t aux0_show(struct device *dev, - struct device_attribute *dev_attr, char *buf) +static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr, + char *buf, int idx) { struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); unsigned long long value; int result; - result = sensor_get_auxtrip(attr->handle, 0, &value); + result = sensor_get_auxtrip(attr->handle, idx, &value); return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); } -static ssize_t aux1_show(struct device *dev, +static ssize_t aux0_show(struct device *dev, struct device_attribute *dev_attr, char *buf) { - struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); - unsigned long long value; - int result; - - result = sensor_get_auxtrip(attr->handle, 1, &value); + return aux_show(dev, dev_attr, buf, 0); +} - return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); +static ssize_t aux1_show(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return aux_show(dev, dev_attr, buf, 1); } -static ssize_t aux0_store(struct device *dev, - struct device_attribute *dev_attr, - const char *buf, size_t count) +static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr, + const char *buf, size_t count, int idx) { struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); int value; @@ -345,27 +344,23 @@ static ssize_t aux0_store(struct device *dev, if (value < 0) return -EINVAL; - result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_DECI_KELVIN(value)); + result = sensor_set_auxtrip(attr->handle, idx, + CELSIUS_TO_DECI_KELVIN(value)); return result ? result : count; } -static ssize_t aux1_store(struct device *dev, +static ssize_t aux0_store(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count) { - struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); - int value; - int result; - - /*Sanity check; should be a positive integer */ - if (!sscanf(buf, "%d", &value)) - return -EINVAL; - - if (value < 0) - return -EINVAL; + return aux_store(dev, dev_attr, buf, count, 0); +} - result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_DECI_KELVIN(value)); - return result ? result : count; +static ssize_t aux1_store(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + return aux_store(dev, dev_attr, buf, count, 1); } /* BIOS can enable/disable the thermal user application in dabney platform */ diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c new file mode 100644 index 000000000000..2776bec89c88 --- /dev/null +++ b/drivers/platform/x86/intel_pmc_core.c @@ -0,0 +1,200 @@ +/* + * Intel Core SoC Power Management Controller Driver + * + * Copyright (c) 2016, Intel Corporation. + * All Rights Reserved. + * + * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> + * Vishwanath Somayaji <vishwanath.somayaji@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/pci.h> +#include <linux/seq_file.h> + +#include <asm/cpu_device_id.h> +#include <asm/pmc_core.h> + +#include "intel_pmc_core.h" + +static struct pmc_dev pmc; + +static const struct pci_device_id pmc_pci_ids[] = { + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL }, + { 0, }, +}; + +static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) +{ + return readl(pmcdev->regbase + reg_offset); +} + +static inline u32 pmc_core_adjust_slp_s0_step(u32 value) +{ + return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP; +} + +/** + * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency. + * @data: Out param that contains current SLP_S0 count. + * + * This API currently supports Intel Skylake SoC and Sunrise + * Point Platform Controller Hub. Future platform support + * should be added for platforms that support low power modes + * beyond Package C10 state. + * + * SLP_S0_RESIDENCY counter counts in 100 us granularity per + * step hence function populates the multiplied value in out + * parameter @data. + * + * Return: an error code or 0 on success. + */ +int intel_pmc_slp_s0_counter_read(u32 *data) +{ + struct pmc_dev *pmcdev = &pmc; + u32 value; + + if (!pmcdev->has_slp_s0_res) + return -EACCES; + + value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET); + *data = pmc_core_adjust_slp_s0_step(value); + + return 0; +} +EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static int pmc_core_dev_state_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + u32 counter_val; + + counter_val = pmc_core_reg_read(pmcdev, + SPT_PMC_SLP_S0_RES_COUNTER_OFFSET); + seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val)); + + return 0; +} + +static int pmc_core_dev_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmc_core_dev_state_show, inode->i_private); +} + +static const struct file_operations pmc_core_dev_state_ops = { + .open = pmc_core_dev_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) +{ + debugfs_remove_recursive(pmcdev->dbgfs_dir); +} + +static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) +{ + struct dentry *dir, *file; + + dir = debugfs_create_dir("pmc_core", NULL); + if (!dir) + return -ENOMEM; + + pmcdev->dbgfs_dir = dir; + file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO, + dir, pmcdev, &pmc_core_dev_state_ops); + + if (!file) { + pmc_core_dbgfs_unregister(pmcdev); + return -ENODEV; + } + + return 0; +} +#else +static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) +{ + return 0; +} + +static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +static const struct x86_cpu_id intel_pmc_core_ids[] = { + { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT, + (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */ + { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT, + (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */ + {} +}; + +static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct device *ptr_dev = &dev->dev; + struct pmc_dev *pmcdev = &pmc; + const struct x86_cpu_id *cpu_id; + int err; + + cpu_id = x86_match_cpu(intel_pmc_core_ids); + if (!cpu_id) { + dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n"); + return -EINVAL; + } + + err = pcim_enable_device(dev); + if (err < 0) { + dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n"); + return err; + } + + err = pci_read_config_dword(dev, + SPT_PMC_BASE_ADDR_OFFSET, + &pmcdev->base_addr); + if (err < 0) { + dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n"); + return err; + } + dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr); + + pmcdev->regbase = devm_ioremap_nocache(ptr_dev, + pmcdev->base_addr, + SPT_PMC_MMIO_REG_LEN); + if (!pmcdev->regbase) { + dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n"); + return -ENOMEM; + } + + err = pmc_core_dbgfs_register(pmcdev); + if (err < 0) { + dev_err(&dev->dev, "PMC Core: debugfs register failed.\n"); + return err; + } + + pmc.has_slp_s0_res = true; + return 0; +} + +static struct pci_driver intel_pmc_core_driver = { + .name = "intel_pmc_core", + .id_table = pmc_pci_ids, + .probe = pmc_core_probe, +}; + +builtin_pci_driver(intel_pmc_core_driver); diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h new file mode 100644 index 000000000000..a9dadaf787c1 --- /dev/null +++ b/drivers/platform/x86/intel_pmc_core.h @@ -0,0 +1,51 @@ +/* + * Intel Core SoC Power Management Controller Header File + * + * Copyright (c) 2016, Intel Corporation. + * All Rights Reserved. + * + * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> + * Vishwanath Somayaji <vishwanath.somayaji@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef PMC_CORE_H +#define PMC_CORE_H + +/* Sunrise Point Power Management Controller PCI Device ID */ +#define SPT_PMC_PCI_DEVICE_ID 0x9d21 +#define SPT_PMC_BASE_ADDR_OFFSET 0x48 +#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c +#define SPT_PMC_MMIO_REG_LEN 0x100 +#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64 + +/** + * struct pmc_dev - pmc device structure + * @base_addr: comtains pmc base address + * @regbase: pointer to io-remapped memory location + * @dbgfs_dir: path to debug fs interface + * @feature_available: flag to indicate whether + * the feature is available + * on a particular platform or not. + * + * pmc_dev contains info about power management controller device. + */ +struct pmc_dev { + u32 base_addr; + void __iomem *regbase; +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ + bool has_slp_s0_res; +}; + +#endif /* PMC_CORE_H */ diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c index a695a436a1c3..0d4c3808a6d8 100644 --- a/drivers/platform/x86/intel_telemetry_core.c +++ b/drivers/platform/x86/intel_telemetry_core.c @@ -25,7 +25,7 @@ struct telemetry_core_config { struct telemetry_plt_config *plt_config; - struct telemetry_core_ops *telem_ops; + const struct telemetry_core_ops *telem_ops; }; static struct telemetry_core_config telm_core_conf; @@ -95,7 +95,7 @@ static int telemetry_def_reset_events(void) return 0; } -static struct telemetry_core_ops telm_defpltops = { +static const struct telemetry_core_ops telm_defpltops = { .set_sampling_period = telemetry_def_set_sampling_period, .get_sampling_period = telemetry_def_get_sampling_period, .get_trace_verbosity = telemetry_def_get_trace_verbosity, @@ -332,7 +332,7 @@ EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity); * * Return: 0 success, < 0 for failure */ -int telemetry_set_pltdata(struct telemetry_core_ops *ops, +int telemetry_set_pltdata(const struct telemetry_core_ops *ops, struct telemetry_plt_config *pltconfig) { if (ops) diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index 781bd10ca7ac..09c84a2b1c2c 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -1081,7 +1081,7 @@ out: return ret; } -static struct telemetry_core_ops telm_pltops = { +static const struct telemetry_core_ops telm_pltops = { .get_trace_verbosity = telemetry_plt_get_trace_verbosity, .set_trace_verbosity = telemetry_plt_set_trace_verbosity, .set_sampling_period = telemetry_plt_set_sampling_period, diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e9caa347a9bf..1dba3598cfcb 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1446,6 +1446,9 @@ static void sony_nc_function_cleanup(struct platform_device *pd) { unsigned int i, result, bitmask, handle; + if (!handles) + return; + /* get enabled events and disable them */ sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask); sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result); diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c index 700e0fa0eec2..6505c97705e1 100644 --- a/drivers/platform/x86/surfacepro3_button.c +++ b/drivers/platform/x86/surfacepro3_button.c @@ -24,6 +24,8 @@ #define SURFACE_BUTTON_OBJ_NAME "VGBI" #define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons" +#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8 + #define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6 #define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7 @@ -33,7 +35,7 @@ #define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0 #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1 -#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2 +#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2 #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3 ACPI_MODULE_NAME("surface pro 3 button"); @@ -105,9 +107,12 @@ static void surface_button_notify(struct acpi_device *device, u32 event) case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN: key_code = KEY_VOLUMEDOWN; break; + case SURFACE_BUTTON_NOTIFY_TABLET_MODE: + dev_warn_once(&device->dev, "Tablet mode is not supported\n"); + break; default: dev_info_ratelimited(&device->dev, - "Unsupported event [0x%x]\n", event); + "Unsupported event [0x%x]\n", event); break; } input = button->input; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9255ff3ee81a..c3bfa1fe95bf 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -5001,6 +5001,8 @@ static int kbdlight_set_level(int level) return 0; } +static int kbdlight_set_level_and_update(int level); + static int kbdlight_get_level(void) { int status = 0; @@ -5068,7 +5070,7 @@ static void kbdlight_set_worker(struct work_struct *work) container_of(work, struct tpacpi_led_classdev, work); if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) - kbdlight_set_level(data->new_state); + kbdlight_set_level_and_update(data->new_state); } static void kbdlight_sysfs_set(struct led_classdev *led_cdev, @@ -5099,7 +5101,6 @@ static struct tpacpi_led_classdev tpacpi_led_kbdlight = { .max_brightness = 2, .brightness_set = &kbdlight_sysfs_set, .brightness_get = &kbdlight_sysfs_get, - .flags = LED_CORE_SUSPENDRESUME, } }; @@ -5137,6 +5138,20 @@ static void kbdlight_exit(void) flush_workqueue(tpacpi_wq); } +static int kbdlight_set_level_and_update(int level) +{ + int ret; + struct led_classdev *led_cdev; + + ret = kbdlight_set_level(level); + led_cdev = &tpacpi_led_kbdlight.led_classdev; + + if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED)) + led_cdev->brightness = level; + + return ret; +} + static int kbdlight_read(struct seq_file *m) { int level; @@ -5177,13 +5192,35 @@ static int kbdlight_write(char *buf) if (level == -1) return -EINVAL; - return kbdlight_set_level(level); + return kbdlight_set_level_and_update(level); +} + +static void kbdlight_suspend(void) +{ + struct led_classdev *led_cdev; + + if (!tp_features.kbdlight) + return; + + led_cdev = &tpacpi_led_kbdlight.led_classdev; + led_update_brightness(led_cdev); + led_classdev_suspend(led_cdev); +} + +static void kbdlight_resume(void) +{ + if (!tp_features.kbdlight) + return; + + led_classdev_resume(&tpacpi_led_kbdlight.led_classdev); } static struct ibm_struct kbdlight_driver_data = { .name = "kbdlight", .read = kbdlight_read, .write = kbdlight_write, + .suspend = kbdlight_suspend, + .resume = kbdlight_resume, .exit = kbdlight_exit, }; diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 579fd65299a0..d637c933c8a9 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET: - sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); - if (!sysoff) { - err = -ENOMEM; - break; - } - if (copy_from_user(sysoff, (void __user *)arg, - sizeof(*sysoff))) { - err = -EFAULT; + sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); + if (IS_ERR(sysoff)) { + err = PTR_ERR(sysoff); + sysoff = NULL; break; } if (sysoff->n_samples > PTP_MAX_SAMPLES) { diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 680fbc795a0a..dba3843c53b8 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -75,6 +75,7 @@ static void free_pwms(struct pwm_chip *chip) for (i = 0; i < chip->npwm; i++) { struct pwm_device *pwm = &chip->pwms[i]; + radix_tree_delete(&pwm_tree, pwm->pwm); } @@ -128,13 +129,6 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label) set_bit(PWMF_REQUESTED, &pwm->flags); pwm->label = label; - /* - * FIXME: This should be removed once all PWM users properly make use - * of struct pwm_args to initialize the PWM device. As long as this is - * here, the PWM state and hardware state can get out of sync. - */ - pwm_apply_args(pwm); - return 0; } @@ -233,6 +227,19 @@ void *pwm_get_chip_data(struct pwm_device *pwm) } EXPORT_SYMBOL_GPL(pwm_get_chip_data); +static bool pwm_ops_check(const struct pwm_ops *ops) +{ + /* driver supports legacy, non-atomic operation */ + if (ops->config && ops->enable && ops->disable) + return true; + + /* driver supports atomic operation */ + if (ops->apply) + return true; + + return false; +} + /** * pwmchip_add_with_polarity() - register a new PWM chip * @chip: the PWM chip to add @@ -251,8 +258,10 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, unsigned int i; int ret; - if (!chip || !chip->dev || !chip->ops || !chip->ops->config || - !chip->ops->enable || !chip->ops->disable || !chip->npwm) + if (!chip || !chip->dev || !chip->ops || !chip->npwm) + return -EINVAL; + + if (!pwm_ops_check(chip->ops)) return -EINVAL; mutex_lock(&pwm_lock); @@ -261,7 +270,7 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, if (ret < 0) goto out; - chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL); + chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL); if (!chip->pwms) { ret = -ENOMEM; goto out; @@ -275,8 +284,10 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, pwm->chip = chip; pwm->pwm = chip->base + i; pwm->hwpwm = i; - pwm->polarity = polarity; - mutex_init(&pwm->lock); + pwm->state.polarity = polarity; + + if (chip->ops->get_state) + chip->ops->get_state(chip, pwm, &pwm->state); radix_tree_insert(&pwm_tree, pwm->pwm, pwm); } @@ -436,107 +447,138 @@ void pwm_free(struct pwm_device *pwm) EXPORT_SYMBOL_GPL(pwm_free); /** - * pwm_config() - change a PWM device configuration + * pwm_apply_state() - atomically apply a new state to a PWM device * @pwm: PWM device - * @duty_ns: "on" time (in nanoseconds) - * @period_ns: duration (in nanoseconds) of one cycle - * - * Returns: 0 on success or a negative error code on failure. + * @state: new state to apply. This can be adjusted by the PWM driver + * if the requested config is not achievable, for example, + * ->duty_cycle and ->period might be approximated. */ -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) +int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state) { int err; - if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns) + if (!pwm) return -EINVAL; - err = pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns); - if (err) - return err; - - pwm->duty_cycle = duty_ns; - pwm->period = period_ns; + if (!memcmp(state, &pwm->state, sizeof(*state))) + return 0; - return 0; -} -EXPORT_SYMBOL_GPL(pwm_config); + if (pwm->chip->ops->apply) { + err = pwm->chip->ops->apply(pwm->chip, pwm, state); + if (err) + return err; -/** - * pwm_set_polarity() - configure the polarity of a PWM signal - * @pwm: PWM device - * @polarity: new polarity of the PWM signal - * - * Note that the polarity cannot be configured while the PWM device is - * enabled. - * - * Returns: 0 on success or a negative error code on failure. - */ -int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity) -{ - int err; + pwm->state = *state; + } else { + /* + * FIXME: restore the initial state in case of error. + */ + if (state->polarity != pwm->state.polarity) { + if (!pwm->chip->ops->set_polarity) + return -ENOTSUPP; + + /* + * Changing the polarity of a running PWM is + * only allowed when the PWM driver implements + * ->apply(). + */ + if (pwm->state.enabled) { + pwm->chip->ops->disable(pwm->chip, pwm); + pwm->state.enabled = false; + } + + err = pwm->chip->ops->set_polarity(pwm->chip, pwm, + state->polarity); + if (err) + return err; + + pwm->state.polarity = state->polarity; + } - if (!pwm || !pwm->chip->ops) - return -EINVAL; + if (state->period != pwm->state.period || + state->duty_cycle != pwm->state.duty_cycle) { + err = pwm->chip->ops->config(pwm->chip, pwm, + state->duty_cycle, + state->period); + if (err) + return err; - if (!pwm->chip->ops->set_polarity) - return -ENOSYS; + pwm->state.duty_cycle = state->duty_cycle; + pwm->state.period = state->period; + } - mutex_lock(&pwm->lock); + if (state->enabled != pwm->state.enabled) { + if (state->enabled) { + err = pwm->chip->ops->enable(pwm->chip, pwm); + if (err) + return err; + } else { + pwm->chip->ops->disable(pwm->chip, pwm); + } - if (pwm_is_enabled(pwm)) { - err = -EBUSY; - goto unlock; + pwm->state.enabled = state->enabled; + } } - err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity); - if (err) - goto unlock; - - pwm->polarity = polarity; - -unlock: - mutex_unlock(&pwm->lock); - return err; + return 0; } -EXPORT_SYMBOL_GPL(pwm_set_polarity); +EXPORT_SYMBOL_GPL(pwm_apply_state); /** - * pwm_enable() - start a PWM output toggling + * pwm_adjust_config() - adjust the current PWM config to the PWM arguments * @pwm: PWM device * - * Returns: 0 on success or a negative error code on failure. + * This function will adjust the PWM config to the PWM arguments provided + * by the DT or PWM lookup table. This is particularly useful to adapt + * the bootloader config to the Linux one. */ -int pwm_enable(struct pwm_device *pwm) +int pwm_adjust_config(struct pwm_device *pwm) { - int err = 0; + struct pwm_state state; + struct pwm_args pargs; - if (!pwm) - return -EINVAL; + pwm_get_args(pwm, &pargs); + pwm_get_state(pwm, &state); - mutex_lock(&pwm->lock); + /* + * If the current period is zero it means that either the PWM driver + * does not support initial state retrieval or the PWM has not yet + * been configured. + * + * In either case, we setup the new period and polarity, and assign a + * duty cycle of 0. + */ + if (!state.period) { + state.duty_cycle = 0; + state.period = pargs.period; + state.polarity = pargs.polarity; - if (!test_and_set_bit(PWMF_ENABLED, &pwm->flags)) { - err = pwm->chip->ops->enable(pwm->chip, pwm); - if (err) - clear_bit(PWMF_ENABLED, &pwm->flags); + return pwm_apply_state(pwm, &state); } - mutex_unlock(&pwm->lock); + /* + * Adjust the PWM duty cycle/period based on the period value provided + * in PWM args. + */ + if (pargs.period != state.period) { + u64 dutycycle = (u64)state.duty_cycle * pargs.period; - return err; -} -EXPORT_SYMBOL_GPL(pwm_enable); + do_div(dutycycle, state.period); + state.duty_cycle = dutycycle; + state.period = pargs.period; + } -/** - * pwm_disable() - stop a PWM output toggling - * @pwm: PWM device - */ -void pwm_disable(struct pwm_device *pwm) -{ - if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags)) - pwm->chip->ops->disable(pwm->chip, pwm); + /* + * If the polarity changed, we should also change the duty cycle. + */ + if (pargs.polarity != state.polarity) { + state.polarity = pargs.polarity; + state.duty_cycle = state.period - state.duty_cycle; + } + + return pwm_apply_state(pwm, &state); } -EXPORT_SYMBOL_GPL(pwm_disable); +EXPORT_SYMBOL_GPL(pwm_adjust_config); static struct pwm_chip *of_node_to_pwmchip(struct device_node *np) { @@ -754,13 +796,13 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) if (!chip) goto out; - pwm->args.period = chosen->period; - pwm->args.polarity = chosen->polarity; - pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id); if (IS_ERR(pwm)) goto out; + pwm->args.period = chosen->period; + pwm->args.polarity = chosen->polarity; + out: mutex_unlock(&pwm_lookup_lock); return pwm; @@ -907,15 +949,23 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) for (i = 0; i < chip->npwm; i++) { struct pwm_device *pwm = &chip->pwms[i]; + struct pwm_state state; + + pwm_get_state(pwm, &state); seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label); if (test_bit(PWMF_REQUESTED, &pwm->flags)) seq_puts(s, " requested"); - if (pwm_is_enabled(pwm)) + if (state.enabled) seq_puts(s, " enabled"); + seq_printf(s, " period: %u ns", state.period); + seq_printf(s, " duty: %u ns", state.duty_cycle); + seq_printf(s, " polarity: %s", + state.polarity ? "inverse" : "normal"); + seq_puts(s, "\n"); } } diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c index 7101c7020bf4..bd0ebd04856a 100644 --- a/drivers/pwm/pwm-crc.c +++ b/drivers/pwm/pwm-crc.c @@ -75,7 +75,7 @@ static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm, return -EINVAL; } - if (pwm->period != period_ns) { + if (pwm_get_period(pwm) != period_ns) { int clk_div; /* changing the clk divisor, need to disable fisrt */ diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 9861fed4e67d..19dc64cab2f0 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -249,7 +249,7 @@ static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event), LPC18XX_PWM_EVSTATEMSK_ALL); - if (pwm->polarity == PWM_POLARITY_NORMAL) { + if (pwm_get_polarity(pwm) == PWM_POLARITY_NORMAL) { set_event = lpc18xx_pwm->period_event; clear_event = lpc18xx_data->duty_event; res_action = LPC18XX_PWM_RES_SET; diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c index b7e6ecba7d5c..3e95090cd7cf 100644 --- a/drivers/pwm/pwm-omap-dmtimer.c +++ b/drivers/pwm/pwm-omap-dmtimer.c @@ -192,7 +192,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip, load_value, load_value, match_value, match_value); omap->pdata->set_pwm(omap->dm_timer, - pwm->polarity == PWM_POLARITY_INVERSED, + pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED, true, PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE); diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 7b8ac0678137..1c85ecc9e7ac 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -157,7 +157,7 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return div; /* Let the core driver set pwm->period if disabled and duty_ns == 0 */ - if (!test_bit(PWMF_ENABLED, &pwm->flags) && !duty_ns) + if (!pwm_is_enabled(pwm) && !duty_ns) return 0; rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR); diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 67af9f62361f..03a99a53c39e 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -354,7 +354,8 @@ static int sun4i_pwm_probe(struct platform_device *pdev) val = sun4i_pwm_readl(pwm, PWM_CTRL_REG); for (i = 0; i < pwm->chip.npwm; i++) if (!(val & BIT_CH(PWM_ACT_STATE, i))) - pwm->chip.pwms[i].polarity = PWM_POLARITY_INVERSED; + pwm_set_polarity(&pwm->chip.pwms[i], + PWM_POLARITY_INVERSED); clk_disable_unprepare(pwm->clk); return 0; diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 9c90886f4123..d98599249a05 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -26,6 +26,7 @@ struct pwm_export { struct device child; struct pwm_device *pwm; + struct mutex lock; }; static struct pwm_export *child_to_pwm_export(struct device *child) @@ -45,15 +46,20 @@ static ssize_t period_show(struct device *child, char *buf) { const struct pwm_device *pwm = child_to_pwm_device(child); + struct pwm_state state; - return sprintf(buf, "%u\n", pwm_get_period(pwm)); + pwm_get_state(pwm, &state); + + return sprintf(buf, "%u\n", state.period); } static ssize_t period_store(struct device *child, struct device_attribute *attr, const char *buf, size_t size) { - struct pwm_device *pwm = child_to_pwm_device(child); + struct pwm_export *export = child_to_pwm_export(child); + struct pwm_device *pwm = export->pwm; + struct pwm_state state; unsigned int val; int ret; @@ -61,7 +67,11 @@ static ssize_t period_store(struct device *child, if (ret) return ret; - ret = pwm_config(pwm, pwm_get_duty_cycle(pwm), val); + mutex_lock(&export->lock); + pwm_get_state(pwm, &state); + state.period = val; + ret = pwm_apply_state(pwm, &state); + mutex_unlock(&export->lock); return ret ? : size; } @@ -71,15 +81,20 @@ static ssize_t duty_cycle_show(struct device *child, char *buf) { const struct pwm_device *pwm = child_to_pwm_device(child); + struct pwm_state state; + + pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", pwm_get_duty_cycle(pwm)); + return sprintf(buf, "%u\n", state.duty_cycle); } static ssize_t duty_cycle_store(struct device *child, struct device_attribute *attr, const char *buf, size_t size) { - struct pwm_device *pwm = child_to_pwm_device(child); + struct pwm_export *export = child_to_pwm_export(child); + struct pwm_device *pwm = export->pwm; + struct pwm_state state; unsigned int val; int ret; @@ -87,7 +102,11 @@ static ssize_t duty_cycle_store(struct device *child, if (ret) return ret; - ret = pwm_config(pwm, val, pwm_get_period(pwm)); + mutex_lock(&export->lock); + pwm_get_state(pwm, &state); + state.duty_cycle = val; + ret = pwm_apply_state(pwm, &state); + mutex_unlock(&export->lock); return ret ? : size; } @@ -97,33 +116,46 @@ static ssize_t enable_show(struct device *child, char *buf) { const struct pwm_device *pwm = child_to_pwm_device(child); + struct pwm_state state; + + pwm_get_state(pwm, &state); - return sprintf(buf, "%d\n", pwm_is_enabled(pwm)); + return sprintf(buf, "%d\n", state.enabled); } static ssize_t enable_store(struct device *child, struct device_attribute *attr, const char *buf, size_t size) { - struct pwm_device *pwm = child_to_pwm_device(child); + struct pwm_export *export = child_to_pwm_export(child); + struct pwm_device *pwm = export->pwm; + struct pwm_state state; int val, ret; ret = kstrtoint(buf, 0, &val); if (ret) return ret; + mutex_lock(&export->lock); + + pwm_get_state(pwm, &state); + switch (val) { case 0: - pwm_disable(pwm); + state.enabled = false; break; case 1: - ret = pwm_enable(pwm); + state.enabled = true; break; default: ret = -EINVAL; - break; + goto unlock; } + pwm_apply_state(pwm, &state); + +unlock: + mutex_unlock(&export->lock); return ret ? : size; } @@ -133,8 +165,11 @@ static ssize_t polarity_show(struct device *child, { const struct pwm_device *pwm = child_to_pwm_device(child); const char *polarity = "unknown"; + struct pwm_state state; + + pwm_get_state(pwm, &state); - switch (pwm_get_polarity(pwm)) { + switch (state.polarity) { case PWM_POLARITY_NORMAL: polarity = "normal"; break; @@ -151,8 +186,10 @@ static ssize_t polarity_store(struct device *child, struct device_attribute *attr, const char *buf, size_t size) { - struct pwm_device *pwm = child_to_pwm_device(child); + struct pwm_export *export = child_to_pwm_export(child); + struct pwm_device *pwm = export->pwm; enum pwm_polarity polarity; + struct pwm_state state; int ret; if (sysfs_streq(buf, "normal")) @@ -162,7 +199,11 @@ static ssize_t polarity_store(struct device *child, else return -EINVAL; - ret = pwm_set_polarity(pwm, polarity); + mutex_lock(&export->lock); + pwm_get_state(pwm, &state); + state.polarity = polarity; + ret = pwm_apply_state(pwm, &state); + mutex_unlock(&export->lock); return ret ? : size; } @@ -203,6 +244,7 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) } export->pwm = pwm; + mutex_init(&export->lock); export->child.release = pwm_export_release; export->child.parent = parent; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index b83908670a9a..bed53c46dd90 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode); static blk_qc_t dcssblk_make_request(struct request_queue *q, struct bio *bio); static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, - void __pmem **kaddr, pfn_t *pfn); + void __pmem **kaddr, pfn_t *pfn, long size); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; @@ -884,7 +884,7 @@ fail: static long dcssblk_direct_access (struct block_device *bdev, sector_t secnum, - void __pmem **kaddr, pfn_t *pfn) + void __pmem **kaddr, pfn_t *pfn, long size) { struct dcssblk_dev_info *dev_info; unsigned long offset, dev_sz; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 8f90d9e77104..969c312de1be 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -621,6 +621,11 @@ struct aac_driver_ident #define AAC_QUIRK_SCSI_32 0x0020 /* + * SRC based adapters support the AifReqEvent functions + */ +#define AAC_QUIRK_SRC 0x0040 + +/* * The adapter interface specs all queues to be located in the same * physically contiguous block. The host structure that defines the * commuication queues will assume they are each a separate physically diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a943bd230bc2..79871f3519ff 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = { { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ - { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */ - { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */ - { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */ - { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */ + { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */ }; /** @@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else shost->this_id = shost->max_id; - aac_intr_normal(aac, 0, 2, 0, NULL); + if (aac_drivers[index].quirks & AAC_QUIRK_SRC) + aac_intr_normal(aac, 0, 2, 0, NULL); /* * dmb - we may need to move the setting of these parms somewhere else once diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6a4df5a315e9..6bff13e7afc7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, ActiveCableEventData = (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; if (ActiveCableEventData->ReasonCode == - MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) + MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) { pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", ioc->name, ActiveCableEventData->ReceptacleID); pr_info("cannot be powered and devices connected to this active cable"); pr_info("will not be seen. This active cable"); pr_info("requires %d mW of power", ActiveCableEventData->ActiveCablePowerRequirement); + } break; default: /* ignore the rest */ diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 10aa18ba05fd..67c0d5aa3212 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -36,3 +36,12 @@ config TCM_QLA2XXX default n ---help--- Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs + +if TCM_QLA2XXX +config TCM_QLA2XXX_DEBUG + bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs" + default n + ---help--- + Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs + This will include code to enable the SCSI command jammer +endif diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 8a44d1541eb4..ca39deb4ff5b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -637,8 +637,10 @@ static void qlt_free_session_done(struct work_struct *work) } /* ha->tgt.sess_lock supposed to be held on entry */ -void qlt_unreg_sess(struct qla_tgt_sess *sess) +static void qlt_release_session(struct kref *kref) { + struct qla_tgt_sess *sess = + container_of(kref, struct qla_tgt_sess, sess_kref); struct scsi_qla_host *vha = sess->vha; if (sess->se_sess) @@ -651,8 +653,16 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess) INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); } -EXPORT_SYMBOL(qlt_unreg_sess); +void qlt_put_sess(struct qla_tgt_sess *sess) +{ + if (!sess) + return; + + assert_spin_locked(&sess->vha->hw->tgt.sess_lock); + kref_put(&sess->sess_kref, qlt_release_session); +} +EXPORT_SYMBOL(qlt_put_sess); static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { @@ -857,12 +867,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, "Timeout: sess %p about to be deleted\n", sess); - if (sess->se_sess) { + if (sess->se_sess) ha->tgt.tgt_ops->shutdown_sess(sess); - ha->tgt.tgt_ops->put_sess(sess); - } else { - qlt_unreg_sess(sess); - } + qlt_put_sess(sess); } else { schedule_delayed_work(&tgt->sess_del_work, sess->expires - elapsed); @@ -917,7 +924,7 @@ static struct qla_tgt_sess *qlt_create_sess( } } - kref_get(&sess->se_sess->sess_kref); + kref_get(&sess->sess_kref); ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, (fcport->flags & FCF_CONF_COMP_SUPPORTED)); @@ -947,6 +954,7 @@ static struct qla_tgt_sess *qlt_create_sess( sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; sess->local = local; + kref_init(&sess->sess_kref); INIT_LIST_HEAD(&sess->del_list_entry); /* Under normal circumstances we want to logout from firmware when @@ -991,7 +999,7 @@ static struct qla_tgt_sess *qlt_create_sess( * Take an extra reference to ->sess_kref here to handle qla_tgt_sess * access across ->tgt.sess_lock reaquire. */ - kref_get(&sess->se_sess->sess_kref); + kref_get(&sess->sess_kref); } return sess; @@ -1035,7 +1043,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } else { - kref_get(&sess->se_sess->sess_kref); + kref_get(&sess->sess_kref); if (sess->deleted) { qlt_undelete_sess(sess); @@ -1060,7 +1068,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->port_name, sess->loop_id); sess->local = 0; } - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -3817,7 +3825,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; @@ -3836,7 +3844,7 @@ out_term: spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } @@ -3936,13 +3944,13 @@ static void qlt_create_sess_from_atio(struct work_struct *work) if (!cmd) { spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); return; } /* - * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release + * __qlt_do_work() will call qlt_put_sess() to release * the extra reference taken above by qlt_make_local_sess() */ __qlt_do_work(cmd); @@ -4003,13 +4011,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ - kref_get(&sess->se_sess->sess_kref); + kref_get(&sess->sess_kref); cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); return -ENOMEM; } @@ -5911,7 +5919,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, goto out_term2; } - kref_get(&sess->se_sess->sess_kref); + kref_get(&sess->sess_kref); } spin_lock_irqsave(&ha->hardware_lock, flags); @@ -5924,7 +5932,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, goto out_term; spin_unlock_irqrestore(&ha->hardware_lock, flags); - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); return; @@ -5935,8 +5943,7 @@ out_term: qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (sess) - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); } @@ -5976,7 +5983,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, goto out_term; } - kref_get(&sess->se_sess->sess_kref); + kref_get(&sess->sess_kref); } iocb = a; @@ -5988,14 +5995,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (rc != 0) goto out_term; - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); - if (sess) - ha->tgt.tgt_ops->put_sess(sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index d857feeb6514..f26c5f60eedd 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -738,7 +738,6 @@ struct qla_tgt_func_tmpl { struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, const uint8_t *); void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); - void (*put_sess)(struct qla_tgt_sess *); void (*shutdown_sess)(struct qla_tgt_sess *); }; @@ -930,6 +929,7 @@ struct qla_tgt_sess { int generation; struct se_session *se_sess; + struct kref sess_kref; struct scsi_qla_host *vha; struct qla_tgt *tgt; @@ -1101,7 +1101,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); extern int qlt_lport_register(void *, u64, u64, u64, int (*callback)(struct scsi_qla_host *, void *, u64, u64)); extern void qlt_lport_deregister(struct scsi_qla_host *); -extern void qlt_unreg_sess(struct qla_tgt_sess *); +void qlt_put_sess(struct qla_tgt_sess *sess); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index c1461d225f08..6643f6fc7795 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -339,22 +339,6 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) qlt_free_cmd(cmd); } -static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) -{ - struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; - struct scsi_qla_host *vha; - unsigned long flags; - - BUG_ON(!sess); - vha = sess->vha; - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - target_sess_cmd_list_set_waiting(se_sess); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - - return 1; -} - static void tcm_qla2xxx_close_session(struct se_session *se_sess) { struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; @@ -365,7 +349,8 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) vha = sess->vha; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - qlt_unreg_sess(sess); + target_sess_cmd_list_set_waiting(se_sess); + qlt_put_sess(sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } @@ -457,6 +442,10 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, struct se_cmd *se_cmd = &cmd->se_cmd; struct se_session *se_sess; struct qla_tgt_sess *sess; +#ifdef CONFIG_TCM_QLA2XXX_DEBUG + struct se_portal_group *se_tpg; + struct tcm_qla2xxx_tpg *tpg; +#endif int flags = TARGET_SCF_ACK_KREF; if (bidi) @@ -477,6 +466,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, return -EINVAL; } +#ifdef CONFIG_TCM_QLA2XXX_DEBUG + se_tpg = se_sess->se_tpg; + tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); + if (unlikely(tpg->tpg_attrib.jam_host)) { + /* return, and dont run target_submit_cmd,discarding command */ + return 0; + } +#endif + cmd->vha->tgt_counters.qla_core_sbt_cmd++; return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], cmd->unpacked_lun, data_length, fcp_task_attr, @@ -758,23 +756,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); } -static void tcm_qla2xxx_release_session(struct kref *kref) -{ - struct se_session *se_sess = container_of(kref, - struct se_session, sess_kref); - - qlt_unreg_sess(se_sess->fabric_sess_ptr); -} - -static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) -{ - if (!sess) - return; - - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); - kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); -} - static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) { assert_spin_locked(&sess->vha->hw->tgt.sess_lock); @@ -844,6 +825,9 @@ DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); DEF_QLA_TPG_ATTRIB(demo_mode_login_only); +#ifdef CONFIG_TCM_QLA2XXX_DEBUG +DEF_QLA_TPG_ATTRIB(jam_host); +#endif static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, @@ -851,6 +835,9 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, +#ifdef CONFIG_TCM_QLA2XXX_DEBUG + &tcm_qla2xxx_tpg_attrib_attr_jam_host, +#endif NULL, }; @@ -1023,6 +1010,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( tpg->tpg_attrib.demo_mode_write_protect = 1; tpg->tpg_attrib.cache_dynamic_acls = 1; tpg->tpg_attrib.demo_mode_login_only = 1; + tpg->tpg_attrib.jam_host = 0; ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); if (ret < 0) { @@ -1579,7 +1567,6 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, - .put_sess = tcm_qla2xxx_put_sess, .shutdown_sess = tcm_qla2xxx_shutdown_sess, }; @@ -1847,7 +1834,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, - .shutdown_session = tcm_qla2xxx_shutdown_session, .close_session = tcm_qla2xxx_close_session, .sess_get_index = tcm_qla2xxx_sess_get_index, .sess_get_initiator_sid = NULL, @@ -1890,7 +1876,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, - .shutdown_session = tcm_qla2xxx_shutdown_session, .close_session = tcm_qla2xxx_close_session, .sess_get_index = tcm_qla2xxx_sess_get_index, .sess_get_initiator_sid = NULL, diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 3bbf4cb6fd97..37e026a4823d 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -34,6 +34,7 @@ struct tcm_qla2xxx_tpg_attrib { int prod_mode_write_protect; int demo_mode_login_only; int fabric_prot_type; + int jam_host; }; struct tcm_qla2xxx_tpg { diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b2e332af0f51..c71344aebdbb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } /* - * If we finished all bytes in the request we are done now. + * special case: failed zero length commands always need to + * drop down into the retry code. Otherwise, if we finished + * all bytes in the request we are done now. */ - if (!scsi_end_request(req, error, good_bytes, 0)) + if (!(blk_rq_bytes(req) == 0 && error) && + !scsi_end_request(req, error, good_bytes, 0)) return; /* diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 428c03ef02b2..f459dff30512 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp, **/ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { - struct scsi_disk *sdkp = scsi_disk(disk); - struct scsi_device *sdp = sdkp->device; + struct scsi_disk *sdkp = scsi_disk_get(disk); + struct scsi_device *sdp; struct scsi_sense_hdr *sshdr = NULL; int retval; + if (!sdkp) + return 0; + + sdp = sdkp->device; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); /* @@ -1459,6 +1463,7 @@ out: kfree(sshdr); retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; sdp->changed = 0; + scsi_disk_put(sdkp); return retval; } diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index 3c3e56df526e..a003ba26ca6e 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -1059,7 +1059,7 @@ static const struct pmic_wrapper_type pwrap_mt2701 = { .regs = mt2701_regs, .type = PWRAP_MT2701, .arb_en_all = 0x3f, - .int_en_all = ~(BIT(31) | BIT(2)), + .int_en_all = ~(u32)(BIT(31) | BIT(2)), .spi_w = PWRAP_MAN_CMD_SPI_WRITE_NEW, .wdt_src = PWRAP_WDT_SRC_MASK_ALL, .has_bridge = 0, @@ -1071,7 +1071,7 @@ static struct pmic_wrapper_type pwrap_mt8135 = { .regs = mt8135_regs, .type = PWRAP_MT8135, .arb_en_all = 0x1ff, - .int_en_all = ~(BIT(31) | BIT(1)), + .int_en_all = ~(u32)(BIT(31) | BIT(1)), .spi_w = PWRAP_MAN_CMD_SPI_WRITE, .wdt_src = PWRAP_WDT_SRC_MASK_ALL, .has_bridge = 1, @@ -1083,7 +1083,7 @@ static struct pmic_wrapper_type pwrap_mt8173 = { .regs = mt8173_regs, .type = PWRAP_MT8173, .arb_en_all = 0x3f, - .int_en_all = ~(BIT(31) | BIT(1)), + .int_en_all = ~(u32)(BIT(31) | BIT(1)), .spi_w = PWRAP_MAN_CMD_SPI_WRITE, .wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD, .has_bridge = 0, diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 9d8c84bb1544..4b931ec8d90b 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -410,7 +410,6 @@ config SPI_OMAP_UWIRE config SPI_OMAP24XX tristate "McSPI driver for OMAP" depends on HAS_DMA - depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH depends on ARCH_OMAP2PLUS || COMPILE_TEST help SPI master controller for OMAP24XX and later Multichannel SPI @@ -432,10 +431,23 @@ config SPI_OMAP_100K config SPI_ORION tristate "Orion SPI master" - depends on PLAT_ORION || COMPILE_TEST + depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST help This enables using the SPI master controller on the Orion chips. +config SPI_PIC32 + tristate "Microchip PIC32 series SPI" + depends on MACH_PIC32 || COMPILE_TEST + help + SPI driver for Microchip PIC32 SPI master controller. + +config SPI_PIC32_SQI + tristate "Microchip PIC32 Quad SPI driver" + depends on MACH_PIC32 || COMPILE_TEST + depends on HAS_DMA + help + SPI driver for PIC32 Quad SPI controller. + config SPI_PL022 tristate "ARM AMBA PL022 SSP controller" depends on ARM_AMBA @@ -469,7 +481,6 @@ config SPI_PXA2XX_PCI config SPI_ROCKCHIP tristate "Rockchip SPI controller driver" - depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH help This selects a driver for Rockchip SPI controller. @@ -569,7 +580,7 @@ config SPI_SIRF config SPI_ST_SSC4 tristate "STMicroelectronics SPI SSC-based driver" - depends on ARCH_STI + depends on ARCH_STI || COMPILE_TEST help STMicroelectronics SoCs support for SPI. If you say yes to this option, support will be included for the SSC driven SPI. @@ -656,7 +667,7 @@ config SPI_XILINX config SPI_XLP tristate "Netlogic XLP SPI controller driver" - depends on CPU_XLP || COMPILE_TEST + depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST help Enable support for the SPI controller on the Netlogic XLP SoCs. Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX, XLP9XX diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index fbb255c5a608..3c74d003535b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -62,6 +62,8 @@ obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o obj-$(CONFIG_SPI_ORION) += spi-orion.o +obj-$(CONFIG_SPI_PIC32) += spi-pic32.o +obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o obj-$(CONFIG_SPI_PL022) += spi-pl022.o obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index c968ab210a51..2b1456e5e221 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -525,7 +525,6 @@ static int spi_engine_probe(struct platform_device *pdev) if (ret) goto err_ref_clk_disable; - master->dev.parent = &pdev->dev; master->dev.of_node = pdev->dev.of_node; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE; master->bits_per_word_mask = SPI_BPW_MASK(8); diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c index cc3f938f0a6b..afb51699dbb5 100644 --- a/drivers/spi/spi-bcm53xx.c +++ b/drivers/spi/spi-bcm53xx.c @@ -10,6 +10,7 @@ #include "spi-bcm53xx.h" #define BCM53XXSPI_MAX_SPI_BAUD 13500000 /* 216 MHz? */ +#define BCM53XXSPI_FLASH_WINDOW SZ_32M /* The longest observed required wait was 19 ms */ #define BCM53XXSPI_SPE_TIMEOUT_MS 80 @@ -17,8 +18,10 @@ struct bcm53xxspi { struct bcma_device *core; struct spi_master *master; + void __iomem *mmio_base; size_t read_offset; + bool bspi; /* Boot SPI mode with memory mapping */ }; static inline u32 bcm53xxspi_read(struct bcm53xxspi *b53spi, u16 offset) @@ -32,6 +35,50 @@ static inline void bcm53xxspi_write(struct bcm53xxspi *b53spi, u16 offset, bcma_write32(b53spi->core, offset, value); } +static void bcm53xxspi_disable_bspi(struct bcm53xxspi *b53spi) +{ + struct device *dev = &b53spi->core->dev; + unsigned long deadline; + u32 tmp; + + if (!b53spi->bspi) + return; + + tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL); + if (tmp & 0x1) + return; + + deadline = jiffies + usecs_to_jiffies(200); + do { + tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_BUSY_STATUS); + if (!(tmp & 0x1)) { + bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL, + 0x1); + ndelay(200); + b53spi->bspi = false; + return; + } + udelay(1); + } while (!time_after_eq(jiffies, deadline)); + + dev_warn(dev, "Timeout disabling BSPI\n"); +} + +static void bcm53xxspi_enable_bspi(struct bcm53xxspi *b53spi) +{ + u32 tmp; + + if (b53spi->bspi) + return; + + tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL); + if (!(tmp & 0x1)) + return; + + bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL, 0x0); + b53spi->bspi = true; +} + static inline unsigned int bcm53xxspi_calc_timeout(size_t len) { /* Do some magic calculation based on length and buad. Add 10% and 1. */ @@ -176,6 +223,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master, u8 *buf; size_t left; + bcm53xxspi_disable_bspi(b53spi); + if (t->tx_buf) { buf = (u8 *)t->tx_buf; left = t->len; @@ -206,6 +255,22 @@ static int bcm53xxspi_transfer_one(struct spi_master *master, return 0; } +static int bcm53xxspi_flash_read(struct spi_device *spi, + struct spi_flash_read_message *msg) +{ + struct bcm53xxspi *b53spi = spi_master_get_devdata(spi->master); + int ret = 0; + + if (msg->from + msg->len > BCM53XXSPI_FLASH_WINDOW) + return -EINVAL; + + bcm53xxspi_enable_bspi(b53spi); + memcpy_fromio(msg->buf, b53spi->mmio_base + msg->from, msg->len); + msg->retlen = msg->len; + + return ret; +} + /************************************************** * BCMA **************************************************/ @@ -222,6 +287,7 @@ MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl); static int bcm53xxspi_bcma_probe(struct bcma_device *core) { + struct device *dev = &core->dev; struct bcm53xxspi *b53spi; struct spi_master *master; int err; @@ -231,7 +297,7 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core) return -ENOTSUPP; } - master = spi_alloc_master(&core->dev, sizeof(*b53spi)); + master = spi_alloc_master(dev, sizeof(*b53spi)); if (!master) return -ENOMEM; @@ -239,11 +305,19 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core) b53spi->master = master; b53spi->core = core; + if (core->addr_s[0]) + b53spi->mmio_base = devm_ioremap(dev, core->addr_s[0], + BCM53XXSPI_FLASH_WINDOW); + b53spi->bspi = true; + bcm53xxspi_disable_bspi(b53spi); + master->transfer_one = bcm53xxspi_transfer_one; + if (b53spi->mmio_base) + master->spi_flash_read = bcm53xxspi_flash_read; bcma_set_drvdata(core, b53spi); - err = devm_spi_register_master(&core->dev, master); + err = devm_spi_register_master(dev, master); if (err) { spi_master_put(master); bcma_set_drvdata(core, NULL); diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 121a4135b540..1c57ce64abba 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -19,44 +19,46 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/spi/spi.h> /* Name of this driver */ #define CDNS_SPI_NAME "cdns-spi" /* Register offset definitions */ -#define CDNS_SPI_CR_OFFSET 0x00 /* Configuration Register, RW */ -#define CDNS_SPI_ISR_OFFSET 0x04 /* Interrupt Status Register, RO */ -#define CDNS_SPI_IER_OFFSET 0x08 /* Interrupt Enable Register, WO */ -#define CDNS_SPI_IDR_OFFSET 0x0c /* Interrupt Disable Register, WO */ -#define CDNS_SPI_IMR_OFFSET 0x10 /* Interrupt Enabled Mask Register, RO */ -#define CDNS_SPI_ER_OFFSET 0x14 /* Enable/Disable Register, RW */ -#define CDNS_SPI_DR_OFFSET 0x18 /* Delay Register, RW */ -#define CDNS_SPI_TXD_OFFSET 0x1C /* Data Transmit Register, WO */ -#define CDNS_SPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */ -#define CDNS_SPI_SICR_OFFSET 0x24 /* Slave Idle Count Register, RW */ -#define CDNS_SPI_THLD_OFFSET 0x28 /* Transmit FIFO Watermark Register,RW */ - +#define CDNS_SPI_CR 0x00 /* Configuration Register, RW */ +#define CDNS_SPI_ISR 0x04 /* Interrupt Status Register, RO */ +#define CDNS_SPI_IER 0x08 /* Interrupt Enable Register, WO */ +#define CDNS_SPI_IDR 0x0c /* Interrupt Disable Register, WO */ +#define CDNS_SPI_IMR 0x10 /* Interrupt Enabled Mask Register, RO */ +#define CDNS_SPI_ER 0x14 /* Enable/Disable Register, RW */ +#define CDNS_SPI_DR 0x18 /* Delay Register, RW */ +#define CDNS_SPI_TXD 0x1C /* Data Transmit Register, WO */ +#define CDNS_SPI_RXD 0x20 /* Data Receive Register, RO */ +#define CDNS_SPI_SICR 0x24 /* Slave Idle Count Register, RW */ +#define CDNS_SPI_THLD 0x28 /* Transmit FIFO Watermark Register,RW */ + +#define SPI_AUTOSUSPEND_TIMEOUT 3000 /* * SPI Configuration Register bit Masks * * This register contains various control bits that affect the operation * of the SPI controller */ -#define CDNS_SPI_CR_MANSTRT_MASK 0x00010000 /* Manual TX Start */ -#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */ -#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */ -#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */ -#define CDNS_SPI_CR_PERI_SEL_MASK 0x00000200 /* Peripheral Select Decode */ -#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */ -#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */ -#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */ -#define CDNS_SPI_CR_SSFORCE_MASK 0x00004000 /* Manual SS Enable Mask */ -#define CDNS_SPI_CR_BAUD_DIV_4_MASK 0x00000008 /* Default Baud Div Mask */ -#define CDNS_SPI_CR_DEFAULT_MASK (CDNS_SPI_CR_MSTREN_MASK | \ - CDNS_SPI_CR_SSCTRL_MASK | \ - CDNS_SPI_CR_SSFORCE_MASK | \ - CDNS_SPI_CR_BAUD_DIV_4_MASK) +#define CDNS_SPI_CR_MANSTRT 0x00010000 /* Manual TX Start */ +#define CDNS_SPI_CR_CPHA 0x00000004 /* Clock Phase Control */ +#define CDNS_SPI_CR_CPOL 0x00000002 /* Clock Polarity Control */ +#define CDNS_SPI_CR_SSCTRL 0x00003C00 /* Slave Select Mask */ +#define CDNS_SPI_CR_PERI_SEL 0x00000200 /* Peripheral Select Decode */ +#define CDNS_SPI_CR_BAUD_DIV 0x00000038 /* Baud Rate Divisor Mask */ +#define CDNS_SPI_CR_MSTREN 0x00000001 /* Master Enable Mask */ +#define CDNS_SPI_CR_MANSTRTEN 0x00008000 /* Manual TX Enable Mask */ +#define CDNS_SPI_CR_SSFORCE 0x00004000 /* Manual SS Enable Mask */ +#define CDNS_SPI_CR_BAUD_DIV_4 0x00000008 /* Default Baud Div Mask */ +#define CDNS_SPI_CR_DEFAULT (CDNS_SPI_CR_MSTREN | \ + CDNS_SPI_CR_SSCTRL | \ + CDNS_SPI_CR_SSFORCE | \ + CDNS_SPI_CR_BAUD_DIV_4) /* * SPI Configuration Register - Baud rate and slave select @@ -77,21 +79,21 @@ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same * bit definitions. */ -#define CDNS_SPI_IXR_TXOW_MASK 0x00000004 /* SPI TX FIFO Overwater */ -#define CDNS_SPI_IXR_MODF_MASK 0x00000002 /* SPI Mode Fault */ -#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */ -#define CDNS_SPI_IXR_DEFAULT_MASK (CDNS_SPI_IXR_TXOW_MASK | \ - CDNS_SPI_IXR_MODF_MASK) -#define CDNS_SPI_IXR_TXFULL_MASK 0x00000008 /* SPI TX Full */ -#define CDNS_SPI_IXR_ALL_MASK 0x0000007F /* SPI all interrupts */ +#define CDNS_SPI_IXR_TXOW 0x00000004 /* SPI TX FIFO Overwater */ +#define CDNS_SPI_IXR_MODF 0x00000002 /* SPI Mode Fault */ +#define CDNS_SPI_IXR_RXNEMTY 0x00000010 /* SPI RX FIFO Not Empty */ +#define CDNS_SPI_IXR_DEFAULT (CDNS_SPI_IXR_TXOW | \ + CDNS_SPI_IXR_MODF) +#define CDNS_SPI_IXR_TXFULL 0x00000008 /* SPI TX Full */ +#define CDNS_SPI_IXR_ALL 0x0000007F /* SPI all interrupts */ /* * SPI Enable Register bit Masks * * This register is used to enable or disable the SPI controller */ -#define CDNS_SPI_ER_ENABLE_MASK 0x00000001 /* SPI Enable Bit Mask */ -#define CDNS_SPI_ER_DISABLE_MASK 0x0 /* SPI Disable Bit Mask */ +#define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */ +#define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */ /* SPI FIFO depth in bytes */ #define CDNS_SPI_FIFO_DEPTH 128 @@ -149,56 +151,51 @@ static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val) */ static void cdns_spi_init_hw(struct cdns_spi *xspi) { - u32 ctrl_reg = CDNS_SPI_CR_DEFAULT_MASK; + u32 ctrl_reg = CDNS_SPI_CR_DEFAULT; if (xspi->is_decoded_cs) - ctrl_reg |= CDNS_SPI_CR_PERI_SEL_MASK; + ctrl_reg |= CDNS_SPI_CR_PERI_SEL; - cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, - CDNS_SPI_ER_DISABLE_MASK); - cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET, - CDNS_SPI_IXR_ALL_MASK); + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); + cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_ALL); /* Clear the RX FIFO */ - while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) & - CDNS_SPI_IXR_RXNEMTY_MASK) - cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET); - - cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, - CDNS_SPI_IXR_ALL_MASK); - cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg); - cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, - CDNS_SPI_ER_ENABLE_MASK); + while (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_RXNEMTY) + cdns_spi_read(xspi, CDNS_SPI_RXD); + + cdns_spi_write(xspi, CDNS_SPI_ISR, CDNS_SPI_IXR_ALL); + cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg); + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE); } /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure - * @is_on: Select(0) or deselect (1) the chip select line + * @is_high: Select(0) or deselect (1) the chip select line */ static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; - ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET); + ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); if (is_high) { /* Deselect the slave */ - ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK; + ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { /* Select the slave */ - ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK; + ctrl_reg &= ~CDNS_SPI_CR_SSCTRL; if (!(xspi->is_decoded_cs)) ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) << CDNS_SPI_SS_SHIFT) & - CDNS_SPI_CR_SSCTRL_MASK; + CDNS_SPI_CR_SSCTRL; else ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) & - CDNS_SPI_CR_SSCTRL_MASK; + CDNS_SPI_CR_SSCTRL; } - cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg); + cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg); } /** @@ -212,14 +209,15 @@ static void cdns_spi_config_clock_mode(struct spi_device *spi) struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg, new_ctrl_reg; - new_ctrl_reg = ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET); + new_ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); + ctrl_reg = new_ctrl_reg; /* Set the SPI clock phase and clock polarity */ - new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK); + new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA | CDNS_SPI_CR_CPOL); if (spi->mode & SPI_CPHA) - new_ctrl_reg |= CDNS_SPI_CR_CPHA_MASK; + new_ctrl_reg |= CDNS_SPI_CR_CPHA; if (spi->mode & SPI_CPOL) - new_ctrl_reg |= CDNS_SPI_CR_CPOL_MASK; + new_ctrl_reg |= CDNS_SPI_CR_CPOL; if (new_ctrl_reg != ctrl_reg) { /* @@ -228,11 +226,9 @@ static void cdns_spi_config_clock_mode(struct spi_device *spi) * polarity as it will cause the SPI slave to see spurious clock * transitions. To workaround the issue toggle the ER register. */ - cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, - CDNS_SPI_ER_DISABLE_MASK); - cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, new_ctrl_reg); - cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, - CDNS_SPI_ER_ENABLE_MASK); + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); + cdns_spi_write(xspi, CDNS_SPI_CR, new_ctrl_reg); + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE); } } @@ -251,7 +247,7 @@ static void cdns_spi_config_clock_mode(struct spi_device *spi) * controller. */ static void cdns_spi_config_clock_freq(struct spi_device *spi, - struct spi_transfer *transfer) + struct spi_transfer *transfer) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg, baud_rate_val; @@ -259,7 +255,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi, frequency = clk_get_rate(xspi->ref_clk); - ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET); + ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); /* Set the clock frequency */ if (xspi->speed_hz != transfer->speed_hz) { @@ -269,12 +265,12 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi, (frequency / (2 << baud_rate_val)) > transfer->speed_hz) baud_rate_val++; - ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK; + ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV; ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT; xspi->speed_hz = frequency / (2 << baud_rate_val); } - cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg); + cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg); } /** @@ -313,10 +309,9 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && (xspi->tx_bytes > 0)) { if (xspi->txbuf) - cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, - *xspi->txbuf++); + cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); else - cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0); + cdns_spi_write(xspi, CDNS_SPI_TXD, 0); xspi->tx_bytes--; trans_cnt++; @@ -344,19 +339,18 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) u32 intr_status, status; status = IRQ_NONE; - intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET); - cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status); + intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR); + cdns_spi_write(xspi, CDNS_SPI_ISR, intr_status); - if (intr_status & CDNS_SPI_IXR_MODF_MASK) { + if (intr_status & CDNS_SPI_IXR_MODF) { /* Indicate that transfer is completed, the SPI subsystem will * identify the error as the remaining bytes to be * transferred is non-zero */ - cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET, - CDNS_SPI_IXR_DEFAULT_MASK); + cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(master); status = IRQ_HANDLED; - } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) { + } else if (intr_status & CDNS_SPI_IXR_TXOW) { unsigned long trans_cnt; trans_cnt = xspi->rx_bytes - xspi->tx_bytes; @@ -365,7 +359,7 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) while (trans_cnt) { u8 data; - data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET); + data = cdns_spi_read(xspi, CDNS_SPI_RXD); if (xspi->rxbuf) *xspi->rxbuf++ = data; @@ -378,8 +372,8 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) cdns_spi_fill_tx_fifo(xspi); } else { /* Transfer is completed */ - cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET, - CDNS_SPI_IXR_DEFAULT_MASK); + cdns_spi_write(xspi, CDNS_SPI_IDR, + CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(master); } status = IRQ_HANDLED; @@ -387,6 +381,7 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) return status; } + static int cdns_prepare_message(struct spi_master *master, struct spi_message *msg) { @@ -421,8 +416,7 @@ static int cdns_transfer_one(struct spi_master *master, cdns_spi_fill_tx_fifo(xspi); - cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET, - CDNS_SPI_IXR_DEFAULT_MASK); + cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); return transfer->len; } @@ -439,8 +433,7 @@ static int cdns_prepare_transfer_hardware(struct spi_master *master) { struct cdns_spi *xspi = spi_master_get_devdata(master); - cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, - CDNS_SPI_ER_ENABLE_MASK); + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE); return 0; } @@ -458,8 +451,7 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master) { struct cdns_spi *xspi = spi_master_get_devdata(master); - cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, - CDNS_SPI_ER_DISABLE_MASK); + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); return 0; } @@ -481,7 +473,7 @@ static int cdns_spi_probe(struct platform_device *pdev) u32 num_cs; master = spi_alloc_master(&pdev->dev, sizeof(*xspi)); - if (master == NULL) + if (!master) return -ENOMEM; xspi = spi_master_get_devdata(master); @@ -521,6 +513,11 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_apb; } + pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); + pm_runtime_set_active(&pdev->dev); + ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; @@ -535,11 +532,14 @@ static int cdns_spi_probe(struct platform_device *pdev) /* SPI controller initializations */ cdns_spi_init_hw(xspi); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + irq = platform_get_irq(pdev, 0); if (irq <= 0) { ret = -ENXIO; dev_err(&pdev->dev, "irq number is invalid\n"); - goto remove_master; + goto clk_dis_all; } ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq, @@ -547,7 +547,7 @@ static int cdns_spi_probe(struct platform_device *pdev) if (ret != 0) { ret = -ENXIO; dev_err(&pdev->dev, "request_irq failed\n"); - goto remove_master; + goto clk_dis_all; } master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; @@ -555,6 +555,7 @@ static int cdns_spi_probe(struct platform_device *pdev) master->transfer_one = cdns_transfer_one; master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware; master->set_cs = cdns_spi_chipselect; + master->auto_runtime_pm = true; master->mode_bits = SPI_CPOL | SPI_CPHA; /* Set to default valid value */ @@ -572,6 +573,8 @@ static int cdns_spi_probe(struct platform_device *pdev) return ret; clk_dis_all: + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_disable(&pdev->dev); clk_disable_unprepare(xspi->ref_clk); clk_dis_apb: clk_disable_unprepare(xspi->pclk); @@ -595,11 +598,12 @@ static int cdns_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct cdns_spi *xspi = spi_master_get_devdata(master); - cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, - CDNS_SPI_ER_DISABLE_MASK); + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); clk_disable_unprepare(xspi->ref_clk); clk_disable_unprepare(xspi->pclk); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_disable(&pdev->dev); spi_unregister_master(master); @@ -613,21 +617,14 @@ static int cdns_spi_remove(struct platform_device *pdev) * This function disables the SPI controller and * changes the driver state to "suspend" * - * Return: Always 0 + * Return: 0 on success and error value on error */ static int __maybe_unused cdns_spi_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = platform_get_drvdata(pdev); - struct cdns_spi *xspi = spi_master_get_devdata(master); - - spi_master_suspend(master); - - clk_disable_unprepare(xspi->ref_clk); - - clk_disable_unprepare(xspi->pclk); - return 0; + return spi_master_suspend(master); } /** @@ -642,8 +639,23 @@ static int __maybe_unused cdns_spi_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct spi_master *master = platform_get_drvdata(pdev); + + return spi_master_resume(master); +} + +/** + * cdns_spi_runtime_resume - Runtime resume method for the SPI driver + * @dev: Address of the platform_device structure + * + * This function enables the clocks + * + * Return: 0 on success and error value on error + */ +static int __maybe_unused cnds_runtime_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); struct cdns_spi *xspi = spi_master_get_devdata(master); - int ret = 0; + int ret; ret = clk_prepare_enable(xspi->pclk); if (ret) { @@ -657,13 +669,33 @@ static int __maybe_unused cdns_spi_resume(struct device *dev) clk_disable(xspi->pclk); return ret; } - spi_master_resume(master); + return 0; +} + +/** + * cdns_spi_runtime_suspend - Runtime suspend method for the SPI driver + * @dev: Address of the platform_device structure + * + * This function disables the clocks + * + * Return: Always 0 + */ +static int __maybe_unused cnds_runtime_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct cdns_spi *xspi = spi_master_get_devdata(master); + + clk_disable_unprepare(xspi->ref_clk); + clk_disable_unprepare(xspi->pclk); return 0; } -static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend, - cdns_spi_resume); +static const struct dev_pm_ops cdns_spi_dev_pm_ops = { + SET_RUNTIME_PM_OPS(cnds_runtime_suspend, + cnds_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(cdns_spi_suspend, cdns_spi_resume) +}; static const struct of_device_id cdns_spi_of_match[] = { { .compatible = "xlnx,zynq-spi-r1p6" }, diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index fddb7a3be322..d36c11b73a35 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -23,7 +23,6 @@ #include <linux/clk.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> -#include <linux/edma.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> @@ -33,8 +32,6 @@ #include <linux/platform_data/spi-davinci.h> -#define SPI_NO_RESOURCE ((resource_size_t)-1) - #define CS_DEFAULT 0xFF #define SPIFMT_PHASE_MASK BIT(16) @@ -130,8 +127,6 @@ struct davinci_spi { struct dma_chan *dma_rx; struct dma_chan *dma_tx; - int dma_rx_chnum; - int dma_tx_chnum; struct davinci_spi_platform_data pdata; @@ -797,35 +792,19 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data) static int davinci_spi_request_dma(struct davinci_spi *dspi) { - dma_cap_mask_t mask; struct device *sdev = dspi->bitbang.master->dev.parent; - int r; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - dspi->dma_rx = dma_request_channel(mask, edma_filter_fn, - &dspi->dma_rx_chnum); - if (!dspi->dma_rx) { - dev_err(sdev, "request RX DMA channel failed\n"); - r = -ENODEV; - goto rx_dma_failed; - } + dspi->dma_rx = dma_request_chan(sdev, "rx"); + if (IS_ERR(dspi->dma_rx)) + return PTR_ERR(dspi->dma_rx); - dspi->dma_tx = dma_request_channel(mask, edma_filter_fn, - &dspi->dma_tx_chnum); - if (!dspi->dma_tx) { - dev_err(sdev, "request TX DMA channel failed\n"); - r = -ENODEV; - goto tx_dma_failed; + dspi->dma_tx = dma_request_chan(sdev, "tx"); + if (IS_ERR(dspi->dma_tx)) { + dma_release_channel(dspi->dma_rx); + return PTR_ERR(dspi->dma_tx); } return 0; - -tx_dma_failed: - dma_release_channel(dspi->dma_rx); -rx_dma_failed: - return r; } #if defined(CONFIG_OF) @@ -936,8 +915,6 @@ static int davinci_spi_probe(struct platform_device *pdev) struct davinci_spi *dspi; struct davinci_spi_platform_data *pdata; struct resource *r; - resource_size_t dma_rx_chan = SPI_NO_RESOURCE; - resource_size_t dma_tx_chan = SPI_NO_RESOURCE; int ret = 0; u32 spipc0; @@ -1044,27 +1021,15 @@ static int davinci_spi_probe(struct platform_device *pdev) } } - r = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (r) - dma_rx_chan = r->start; - r = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (r) - dma_tx_chan = r->start; - dspi->bitbang.txrx_bufs = davinci_spi_bufs; - if (dma_rx_chan != SPI_NO_RESOURCE && - dma_tx_chan != SPI_NO_RESOURCE) { - dspi->dma_rx_chnum = dma_rx_chan; - dspi->dma_tx_chnum = dma_tx_chan; - - ret = davinci_spi_request_dma(dspi); - if (ret) - goto free_clk; - - dev_info(&pdev->dev, "DMA: supported\n"); - dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n", - &dma_rx_chan, &dma_tx_chan, - pdata->dma_event_q); + + ret = davinci_spi_request_dma(dspi); + if (ret == -EPROBE_DEFER) { + goto free_clk; + } else if (ret) { + dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret); + dspi->dma_rx = NULL; + dspi->dma_tx = NULL; } dspi->get_rx = davinci_spi_rx_buf_u8; @@ -1102,8 +1067,10 @@ static int davinci_spi_probe(struct platform_device *pdev) return ret; free_dma: - dma_release_channel(dspi->dma_rx); - dma_release_channel(dspi->dma_tx); + if (dspi->dma_rx) { + dma_release_channel(dspi->dma_rx); + dma_release_channel(dspi->dma_tx); + } free_clk: clk_disable_unprepare(dspi->clk); free_master: @@ -1134,6 +1101,11 @@ static int davinci_spi_remove(struct platform_device *pdev) clk_disable_unprepare(dspi->clk); spi_master_put(master); + if (dspi->dma_rx) { + dma_release_channel(dspi->dma_rx); + dma_release_channel(dspi->dma_tx); + } + return 0; } diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c index 3b7d91d94fea..b62a99caacc0 100644 --- a/drivers/spi/spi-dln2.c +++ b/drivers/spi/spi-dln2.c @@ -683,6 +683,7 @@ static int dln2_spi_probe(struct platform_device *pdev) struct spi_master *master; struct dln2_spi *dln2; struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; int ret; master = spi_alloc_master(&pdev->dev, sizeof(*dln2)); @@ -700,6 +701,7 @@ static int dln2_spi_probe(struct platform_device *pdev) } dln2->master = master; + dln2->master->dev.of_node = dev->of_node; dln2->pdev = pdev; dln2->port = pdata->port; /* cs/mode can never be 0xff, so the first transfer will set them */ diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 332ccb0539a7..ef7db75c92c1 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c @@ -67,7 +67,7 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dws->irq = pdev->irq; /* - * Specific handling for paltforms, like dma setup, + * Specific handling for platforms, like dma setup, * clock rate, FIFO depth. */ if (desc) { diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index bb00be8d1851..17a6387e20b5 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -567,7 +567,7 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); if (IS_ERR(txd)) { ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); - dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); + dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); msg->status = PTR_ERR(txd); return; } diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index c1a2d747b246..9e9dadb52b3d 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -121,18 +121,22 @@ enum dspi_trans_mode { struct fsl_dspi_devtype_data { enum dspi_trans_mode trans_mode; + u8 max_clock_factor; }; static const struct fsl_dspi_devtype_data vf610_data = { .trans_mode = DSPI_EOQ_MODE, + .max_clock_factor = 2, }; static const struct fsl_dspi_devtype_data ls1021a_v1_data = { .trans_mode = DSPI_TCFQ_MODE, + .max_clock_factor = 8, }; static const struct fsl_dspi_devtype_data ls2085a_data = { .trans_mode = DSPI_TCFQ_MODE, + .max_clock_factor = 8, }; struct fsl_dspi { @@ -726,6 +730,9 @@ static int dspi_probe(struct platform_device *pdev) } clk_prepare_enable(dspi->clk); + master->max_speed_hz = + clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor; + init_waitqueue_head(&dspi->waitq); platform_set_drvdata(pdev, master); diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 7cb0c1921495..8d85a3c343da 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -245,7 +245,12 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) if (ret) return ret; - wait_for_completion(&mpc8xxx_spi->done); + /* Won't hang up forever, SPI bus sometimes got lost interrupts... */ + ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ); + if (ret == 0) + dev_err(mpc8xxx_spi->dev, + "Transaction hanging up (left %d bytes)\n", + mpc8xxx_spi->count); /* disable rx ints */ mpc8xxx_spi_write_reg(®_base->mask, 0); @@ -539,16 +544,31 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) if (events & SPIE_NE) { u32 rx_data, tmp; u8 rx_data_8; + int rx_nr_bytes = 4; + int ret; /* Spin until RX is done */ - while (SPIE_RXCNT(events) < min(4, mspi->len)) { - cpu_relax(); - events = mpc8xxx_spi_read_reg(®_base->event); + if (SPIE_RXCNT(events) < min(4, mspi->len)) { + ret = spin_event_timeout( + !(SPIE_RXCNT(events = + mpc8xxx_spi_read_reg(®_base->event)) < + min(4, mspi->len)), + 10000, 0); /* 10 msec */ + if (!ret) + dev_err(mspi->dev, + "tired waiting for SPIE_RXCNT\n"); } if (mspi->len >= 4) { rx_data = mpc8xxx_spi_read_reg(®_base->receive); + } else if (mspi->len <= 0) { + dev_err(mspi->dev, + "unexpected RX(SPIE_NE) interrupt occurred,\n" + "(local rxlen %d bytes, reg rxlen %d bytes)\n", + min(4, mspi->len), SPIE_RXCNT(events)); + rx_nr_bytes = 0; } else { + rx_nr_bytes = mspi->len; tmp = mspi->len; rx_data = 0; while (tmp--) { @@ -559,7 +579,7 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) rx_data <<= (4 - mspi->len) * 8; } - mspi->len -= 4; + mspi->len -= rx_nr_bytes; if (mspi->rx) mspi->get_rx(rx_data, mspi); diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c index 07e4ce8273df..3b170093989f 100644 --- a/drivers/spi/spi-octeon.c +++ b/drivers/spi/spi-octeon.c @@ -175,6 +175,7 @@ err: static int octeon_spi_probe(struct platform_device *pdev) { struct resource *res_mem; + void __iomem *reg_base; struct spi_master *master; struct octeon_spi *p; int err = -ENOENT; @@ -186,19 +187,13 @@ static int octeon_spi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, master); res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - if (res_mem == NULL) { - dev_err(&pdev->dev, "found no memory resource\n"); - err = -ENXIO; - goto fail; - } - if (!devm_request_mem_region(&pdev->dev, res_mem->start, - resource_size(res_mem), res_mem->name)) { - dev_err(&pdev->dev, "request_mem_region failed\n"); + reg_base = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(reg_base)) { + err = PTR_ERR(reg_base); goto fail; } - p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start, - resource_size(res_mem)); + + p->register_base = (u64)reg_base; master->num_chipselect = 4; master->mode_bits = SPI_CPHA | diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 0caa3c8bef46..1d237e93a289 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -23,7 +23,6 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/omap-dma.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/err.h> @@ -103,9 +102,6 @@ struct omap2_mcspi_dma { struct dma_chan *dma_tx; struct dma_chan *dma_rx; - int dma_tx_sync_dev; - int dma_rx_sync_dev; - struct completion dma_tx_completion; struct completion dma_rx_completion; @@ -964,8 +960,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) struct spi_master *master = spi->master; struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; - dma_cap_mask_t mask; - unsigned sig; + int ret = 0; mcspi = spi_master_get_devdata(master); mcspi_dma = mcspi->dma_channels + spi->chip_select; @@ -973,34 +968,25 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) init_completion(&mcspi_dma->dma_rx_completion); init_completion(&mcspi_dma->dma_tx_completion); - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - sig = mcspi_dma->dma_rx_sync_dev; - - mcspi_dma->dma_rx = - dma_request_slave_channel_compat(mask, omap_dma_filter_fn, - &sig, &master->dev, - mcspi_dma->dma_rx_ch_name); - if (!mcspi_dma->dma_rx) + mcspi_dma->dma_rx = dma_request_chan(&master->dev, + mcspi_dma->dma_rx_ch_name); + if (IS_ERR(mcspi_dma->dma_rx)) { + ret = PTR_ERR(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; goto no_dma; + } - sig = mcspi_dma->dma_tx_sync_dev; - mcspi_dma->dma_tx = - dma_request_slave_channel_compat(mask, omap_dma_filter_fn, - &sig, &master->dev, - mcspi_dma->dma_tx_ch_name); - - if (!mcspi_dma->dma_tx) { + mcspi_dma->dma_tx = dma_request_chan(&master->dev, + mcspi_dma->dma_tx_ch_name); + if (IS_ERR(mcspi_dma->dma_tx)) { + ret = PTR_ERR(mcspi_dma->dma_tx); + mcspi_dma->dma_tx = NULL; dma_release_channel(mcspi_dma->dma_rx); mcspi_dma->dma_rx = NULL; - goto no_dma; } - return 0; - no_dma: - dev_warn(&spi->dev, "not using DMA for McSPI\n"); - return -EAGAIN; + return ret; } static int omap2_mcspi_setup(struct spi_device *spi) @@ -1039,8 +1025,9 @@ static int omap2_mcspi_setup(struct spi_device *spi) if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { ret = omap2_mcspi_request_dma(spi); - if (ret < 0 && ret != -EAGAIN) - return ret; + if (ret) + dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n", + ret); } ret = pm_runtime_get_sync(mcspi->dev); @@ -1434,42 +1421,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev) } for (i = 0; i < master->num_chipselect; i++) { - char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name; - char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name; - struct resource *dma_res; - - sprintf(dma_rx_ch_name, "rx%d", i); - if (!pdev->dev.of_node) { - dma_res = - platform_get_resource_byname(pdev, - IORESOURCE_DMA, - dma_rx_ch_name); - if (!dma_res) { - dev_dbg(&pdev->dev, - "cannot get DMA RX channel\n"); - status = -ENODEV; - break; - } - - mcspi->dma_channels[i].dma_rx_sync_dev = - dma_res->start; - } - sprintf(dma_tx_ch_name, "tx%d", i); - if (!pdev->dev.of_node) { - dma_res = - platform_get_resource_byname(pdev, - IORESOURCE_DMA, - dma_tx_ch_name); - if (!dma_res) { - dev_dbg(&pdev->dev, - "cannot get DMA TX channel\n"); - status = -ENODEV; - break; - } - - mcspi->dma_channels[i].dma_tx_sync_dev = - dma_res->start; - } + sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i); + sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i); } if (status < 0) diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c new file mode 100644 index 000000000000..ca3c8d94b290 --- /dev/null +++ b/drivers/spi/spi-pic32-sqi.c @@ -0,0 +1,727 @@ +/* + * PIC32 Quad SPI controller driver. + * + * Purna Chandra Mandal <purna.mandal@microchip.com> + * Copyright (c) 2016, Microchip Technology Inc. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> + +/* SQI registers */ +#define PESQI_XIP_CONF1_REG 0x00 +#define PESQI_XIP_CONF2_REG 0x04 +#define PESQI_CONF_REG 0x08 +#define PESQI_CTRL_REG 0x0C +#define PESQI_CLK_CTRL_REG 0x10 +#define PESQI_CMD_THRES_REG 0x14 +#define PESQI_INT_THRES_REG 0x18 +#define PESQI_INT_ENABLE_REG 0x1C +#define PESQI_INT_STAT_REG 0x20 +#define PESQI_TX_DATA_REG 0x24 +#define PESQI_RX_DATA_REG 0x28 +#define PESQI_STAT1_REG 0x2C +#define PESQI_STAT2_REG 0x30 +#define PESQI_BD_CTRL_REG 0x34 +#define PESQI_BD_CUR_ADDR_REG 0x38 +#define PESQI_BD_BASE_ADDR_REG 0x40 +#define PESQI_BD_STAT_REG 0x44 +#define PESQI_BD_POLL_CTRL_REG 0x48 +#define PESQI_BD_TX_DMA_STAT_REG 0x4C +#define PESQI_BD_RX_DMA_STAT_REG 0x50 +#define PESQI_THRES_REG 0x54 +#define PESQI_INT_SIGEN_REG 0x58 + +/* PESQI_CONF_REG fields */ +#define PESQI_MODE 0x7 +#define PESQI_MODE_BOOT 0 +#define PESQI_MODE_PIO 1 +#define PESQI_MODE_DMA 2 +#define PESQI_MODE_XIP 3 +#define PESQI_MODE_SHIFT 0 +#define PESQI_CPHA BIT(3) +#define PESQI_CPOL BIT(4) +#define PESQI_LSBF BIT(5) +#define PESQI_RXLATCH BIT(7) +#define PESQI_SERMODE BIT(8) +#define PESQI_WP_EN BIT(9) +#define PESQI_HOLD_EN BIT(10) +#define PESQI_BURST_EN BIT(12) +#define PESQI_CS_CTRL_HW BIT(15) +#define PESQI_SOFT_RESET BIT(16) +#define PESQI_LANES_SHIFT 20 +#define PESQI_SINGLE_LANE 0 +#define PESQI_DUAL_LANE 1 +#define PESQI_QUAD_LANE 2 +#define PESQI_CSEN_SHIFT 24 +#define PESQI_EN BIT(23) + +/* PESQI_CLK_CTRL_REG fields */ +#define PESQI_CLK_EN BIT(0) +#define PESQI_CLK_STABLE BIT(1) +#define PESQI_CLKDIV_SHIFT 8 +#define PESQI_CLKDIV 0xff + +/* PESQI_INT_THR/CMD_THR_REG */ +#define PESQI_TXTHR_MASK 0x1f +#define PESQI_TXTHR_SHIFT 8 +#define PESQI_RXTHR_MASK 0x1f +#define PESQI_RXTHR_SHIFT 0 + +/* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */ +#define PESQI_TXEMPTY BIT(0) +#define PESQI_TXFULL BIT(1) +#define PESQI_TXTHR BIT(2) +#define PESQI_RXEMPTY BIT(3) +#define PESQI_RXFULL BIT(4) +#define PESQI_RXTHR BIT(5) +#define PESQI_BDDONE BIT(9) /* BD processing complete */ +#define PESQI_PKTCOMP BIT(10) /* packet processing complete */ +#define PESQI_DMAERR BIT(11) /* error */ + +/* PESQI_BD_CTRL_REG */ +#define PESQI_DMA_EN BIT(0) /* enable DMA engine */ +#define PESQI_POLL_EN BIT(1) /* enable polling */ +#define PESQI_BDP_START BIT(2) /* start BD processor */ + +/* PESQI controller buffer descriptor */ +struct buf_desc { + u32 bd_ctrl; /* control */ + u32 bd_status; /* reserved */ + u32 bd_addr; /* DMA buffer addr */ + u32 bd_nextp; /* next item in chain */ +}; + +/* bd_ctrl */ +#define BD_BUFLEN 0x1ff +#define BD_CBD_INT_EN BIT(16) /* Current BD is processed */ +#define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */ +#define BD_LIFM BIT(18) /* last data of pkt */ +#define BD_LAST BIT(19) /* end of list */ +#define BD_DATA_RECV BIT(20) /* receive data */ +#define BD_DDR BIT(21) /* DDR mode */ +#define BD_DUAL BIT(22) /* Dual SPI */ +#define BD_QUAD BIT(23) /* Quad SPI */ +#define BD_LSBF BIT(25) /* LSB First */ +#define BD_STAT_CHECK BIT(27) /* Status poll */ +#define BD_DEVSEL_SHIFT 28 /* CS */ +#define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */ +#define BD_EN BIT(31) /* BD owned by H/W */ + +/** + * struct ring_desc - Representation of SQI ring descriptor + * @list: list element to add to free or used list. + * @bd: PESQI controller buffer descriptor + * @bd_dma: DMA address of PESQI controller buffer descriptor + * @xfer_len: transfer length + */ +struct ring_desc { + struct list_head list; + struct buf_desc *bd; + dma_addr_t bd_dma; + u32 xfer_len; +}; + +/* Global constants */ +#define PESQI_BD_BUF_LEN_MAX 256 +#define PESQI_BD_COUNT 256 /* max 64KB data per spi message */ + +struct pic32_sqi { + void __iomem *regs; + struct clk *sys_clk; + struct clk *base_clk; /* drives spi clock */ + struct spi_master *master; + int irq; + struct completion xfer_done; + struct ring_desc *ring; + void *bd; + dma_addr_t bd_dma; + struct list_head bd_list_free; /* free */ + struct list_head bd_list_used; /* allocated */ + struct spi_device *cur_spi; + u32 cur_speed; + u8 cur_mode; +}; + +static inline void pic32_setbits(void __iomem *reg, u32 set) +{ + writel(readl(reg) | set, reg); +} + +static inline void pic32_clrbits(void __iomem *reg, u32 clr) +{ + writel(readl(reg) & ~clr, reg); +} + +static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck) +{ + u32 val, div; + + /* div = base_clk / (2 * spi_clk) */ + div = clk_get_rate(sqi->base_clk) / (2 * sck); + div &= PESQI_CLKDIV; + + val = readl(sqi->regs + PESQI_CLK_CTRL_REG); + /* apply new divider */ + val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT)); + val |= div << PESQI_CLKDIV_SHIFT; + writel(val, sqi->regs + PESQI_CLK_CTRL_REG); + + /* wait for stability */ + return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val, + val & PESQI_CLK_STABLE, 1, 5000); +} + +static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi) +{ + u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP; + + writel(mask, sqi->regs + PESQI_INT_ENABLE_REG); + /* INT_SIGEN works as interrupt-gate to INTR line */ + writel(mask, sqi->regs + PESQI_INT_SIGEN_REG); +} + +static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi) +{ + writel(0, sqi->regs + PESQI_INT_ENABLE_REG); + writel(0, sqi->regs + PESQI_INT_SIGEN_REG); +} + +static irqreturn_t pic32_sqi_isr(int irq, void *dev_id) +{ + struct pic32_sqi *sqi = dev_id; + u32 enable, status; + + enable = readl(sqi->regs + PESQI_INT_ENABLE_REG); + status = readl(sqi->regs + PESQI_INT_STAT_REG); + + /* check spurious interrupt */ + if (!status) + return IRQ_NONE; + + if (status & PESQI_DMAERR) { + enable = 0; + goto irq_done; + } + + if (status & PESQI_TXTHR) + enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY); + + if (status & PESQI_RXTHR) + enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY); + + if (status & PESQI_BDDONE) + enable &= ~PESQI_BDDONE; + + /* packet processing completed */ + if (status & PESQI_PKTCOMP) { + /* mask all interrupts */ + enable = 0; + /* complete trasaction */ + complete(&sqi->xfer_done); + } + +irq_done: + /* interrupts are sticky, so mask when handled */ + writel(enable, sqi->regs + PESQI_INT_ENABLE_REG); + + return IRQ_HANDLED; +} + +static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi) +{ + struct ring_desc *rdesc; + + if (list_empty(&sqi->bd_list_free)) + return NULL; + + rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list); + list_del(&rdesc->list); + list_add_tail(&rdesc->list, &sqi->bd_list_used); + return rdesc; +} + +static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc) +{ + list_del(&rdesc->list); + list_add(&rdesc->list, &sqi->bd_list_free); +} + +static int pic32_sqi_one_transfer(struct pic32_sqi *sqi, + struct spi_message *mesg, + struct spi_transfer *xfer) +{ + struct spi_device *spi = mesg->spi; + struct scatterlist *sg, *sgl; + struct ring_desc *rdesc; + struct buf_desc *bd; + int nents, i; + u32 bd_ctrl; + u32 nbits; + + /* Device selection */ + bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT; + + /* half-duplex: select transfer buffer, direction and lane */ + if (xfer->rx_buf) { + bd_ctrl |= BD_DATA_RECV; + nbits = xfer->rx_nbits; + sgl = xfer->rx_sg.sgl; + nents = xfer->rx_sg.nents; + } else { + nbits = xfer->tx_nbits; + sgl = xfer->tx_sg.sgl; + nents = xfer->tx_sg.nents; + } + + if (nbits & SPI_NBITS_QUAD) + bd_ctrl |= BD_QUAD; + else if (nbits & SPI_NBITS_DUAL) + bd_ctrl |= BD_DUAL; + + /* LSB first */ + if (spi->mode & SPI_LSB_FIRST) + bd_ctrl |= BD_LSBF; + + /* ownership to hardware */ + bd_ctrl |= BD_EN; + + for_each_sg(sgl, sg, nents, i) { + /* get ring descriptor */ + rdesc = ring_desc_get(sqi); + if (!rdesc) + break; + + bd = rdesc->bd; + + /* BD CTRL: length */ + rdesc->xfer_len = sg_dma_len(sg); + bd->bd_ctrl = bd_ctrl; + bd->bd_ctrl |= rdesc->xfer_len; + + /* BD STAT */ + bd->bd_status = 0; + + /* BD BUFFER ADDRESS */ + bd->bd_addr = sg->dma_address; + } + + return 0; +} + +static int pic32_sqi_prepare_hardware(struct spi_master *master) +{ + struct pic32_sqi *sqi = spi_master_get_devdata(master); + + /* enable spi interface */ + pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN); + /* enable spi clk */ + pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); + + return 0; +} + +static bool pic32_sqi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *x) +{ + /* Do DMA irrespective of transfer size */ + return true; +} + +static int pic32_sqi_one_message(struct spi_master *master, + struct spi_message *msg) +{ + struct spi_device *spi = msg->spi; + struct ring_desc *rdesc, *next; + struct spi_transfer *xfer; + struct pic32_sqi *sqi; + int ret = 0, mode; + u32 val; + + sqi = spi_master_get_devdata(master); + + reinit_completion(&sqi->xfer_done); + msg->actual_length = 0; + + /* We can't handle spi_transfer specific "speed_hz", "bits_per_word" + * and "delay_usecs". But spi_device specific speed and mode change + * can be handled at best during spi chip-select switch. + */ + if (sqi->cur_spi != spi) { + /* set spi speed */ + if (sqi->cur_speed != spi->max_speed_hz) { + sqi->cur_speed = spi->max_speed_hz; + ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz); + if (ret) + dev_warn(&spi->dev, "set_clk, %d\n", ret); + } + + /* set spi mode */ + mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST); + if (sqi->cur_mode != mode) { + val = readl(sqi->regs + PESQI_CONF_REG); + val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF); + if (mode & SPI_CPOL) + val |= PESQI_CPOL; + if (mode & SPI_LSB_FIRST) + val |= PESQI_LSBF; + val |= PESQI_CPHA; + writel(val, sqi->regs + PESQI_CONF_REG); + + sqi->cur_mode = mode; + } + sqi->cur_spi = spi; + } + + /* prepare hardware desc-list(BD) for transfer(s) */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + ret = pic32_sqi_one_transfer(sqi, msg, xfer); + if (ret) { + dev_err(&spi->dev, "xfer %p err\n", xfer); + goto xfer_out; + } + } + + /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last + * element of the list. + */ + rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list); + rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT | + BD_LIFM | BD_PKT_INT_EN; + + /* set base address BD list for DMA engine */ + rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list); + writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG); + + /* enable interrupt */ + pic32_sqi_enable_int(sqi); + + /* enable DMA engine */ + val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START; + writel(val, sqi->regs + PESQI_BD_CTRL_REG); + + /* wait for xfer completion */ + ret = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ); + if (ret <= 0) { + dev_err(&sqi->master->dev, "wait timedout/interrupted\n"); + ret = -EIO; + msg->status = ret; + } else { + /* success */ + msg->status = 0; + ret = 0; + } + + /* disable DMA */ + writel(0, sqi->regs + PESQI_BD_CTRL_REG); + + pic32_sqi_disable_int(sqi); + +xfer_out: + list_for_each_entry_safe_reverse(rdesc, next, + &sqi->bd_list_used, list) { + /* Update total byte transferred */ + msg->actual_length += rdesc->xfer_len; + /* release ring descr */ + ring_desc_put(sqi, rdesc); + } + spi_finalize_current_message(spi->master); + + return ret; +} + +static int pic32_sqi_unprepare_hardware(struct spi_master *master) +{ + struct pic32_sqi *sqi = spi_master_get_devdata(master); + + /* disable clk */ + pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); + /* disable spi */ + pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN); + + return 0; +} + +static int ring_desc_ring_alloc(struct pic32_sqi *sqi) +{ + struct ring_desc *rdesc; + struct buf_desc *bd; + int i; + + /* allocate coherent DMAable memory for hardware buffer descriptors. */ + sqi->bd = dma_zalloc_coherent(&sqi->master->dev, + sizeof(*bd) * PESQI_BD_COUNT, + &sqi->bd_dma, GFP_DMA32); + if (!sqi->bd) { + dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); + return -ENOMEM; + } + + /* allocate software ring descriptors */ + sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL); + if (!sqi->ring) { + dma_free_coherent(&sqi->master->dev, + sizeof(*bd) * PESQI_BD_COUNT, + sqi->bd, sqi->bd_dma); + return -ENOMEM; + } + + bd = (struct buf_desc *)sqi->bd; + + INIT_LIST_HEAD(&sqi->bd_list_free); + INIT_LIST_HEAD(&sqi->bd_list_used); + + /* initialize ring-desc */ + for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) { + INIT_LIST_HEAD(&rdesc->list); + rdesc->bd = &bd[i]; + rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd; + list_add_tail(&rdesc->list, &sqi->bd_list_free); + } + + /* Prepare BD: chain to next BD(s) */ + for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++) + bd[i].bd_nextp = rdesc[i + 1].bd_dma; + bd[PESQI_BD_COUNT - 1].bd_nextp = 0; + + return 0; +} + +static void ring_desc_ring_free(struct pic32_sqi *sqi) +{ + dma_free_coherent(&sqi->master->dev, + sizeof(struct buf_desc) * PESQI_BD_COUNT, + sqi->bd, sqi->bd_dma); + kfree(sqi->ring); +} + +static void pic32_sqi_hw_init(struct pic32_sqi *sqi) +{ + unsigned long flags; + u32 val; + + /* Soft-reset of PESQI controller triggers interrupt. + * We are not yet ready to handle them so disable CPU + * interrupt for the time being. + */ + local_irq_save(flags); + + /* assert soft-reset */ + writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG); + + /* wait until clear */ + readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val, + !(val & PESQI_SOFT_RESET), 1, 5000); + + /* disable all interrupts */ + pic32_sqi_disable_int(sqi); + + /* Now it is safe to enable back CPU interrupt */ + local_irq_restore(flags); + + /* tx and rx fifo interrupt threshold */ + val = readl(sqi->regs + PESQI_CMD_THRES_REG); + val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT); + val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT); + val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT); + writel(val, sqi->regs + PESQI_CMD_THRES_REG); + + val = readl(sqi->regs + PESQI_INT_THRES_REG); + val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT); + val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT); + val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT); + writel(val, sqi->regs + PESQI_INT_THRES_REG); + + /* default configuration */ + val = readl(sqi->regs + PESQI_CONF_REG); + + /* set mode: DMA */ + val &= ~PESQI_MODE; + val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT; + writel(val, sqi->regs + PESQI_CONF_REG); + + /* DATAEN - SQIID0-ID3 */ + val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT; + + /* burst/INCR4 enable */ + val |= PESQI_BURST_EN; + + /* CSEN - all CS */ + val |= 3U << PESQI_CSEN_SHIFT; + writel(val, sqi->regs + PESQI_CONF_REG); + + /* write poll count */ + writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG); + + sqi->cur_speed = 0; + sqi->cur_mode = -1; +} + +static int pic32_sqi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct pic32_sqi *sqi; + struct resource *reg; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof(*sqi)); + if (!master) + return -ENOMEM; + + sqi = spi_master_get_devdata(master); + sqi->master = master; + + reg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sqi->regs = devm_ioremap_resource(&pdev->dev, reg); + if (IS_ERR(sqi->regs)) { + ret = PTR_ERR(sqi->regs); + goto err_free_master; + } + + /* irq */ + sqi->irq = platform_get_irq(pdev, 0); + if (sqi->irq < 0) { + dev_err(&pdev->dev, "no irq found\n"); + ret = sqi->irq; + goto err_free_master; + } + + /* clocks */ + sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck"); + if (IS_ERR(sqi->sys_clk)) { + ret = PTR_ERR(sqi->sys_clk); + dev_err(&pdev->dev, "no sys_clk ?\n"); + goto err_free_master; + } + + sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck"); + if (IS_ERR(sqi->base_clk)) { + ret = PTR_ERR(sqi->base_clk); + dev_err(&pdev->dev, "no base clk ?\n"); + goto err_free_master; + } + + ret = clk_prepare_enable(sqi->sys_clk); + if (ret) { + dev_err(&pdev->dev, "sys clk enable failed\n"); + goto err_free_master; + } + + ret = clk_prepare_enable(sqi->base_clk); + if (ret) { + dev_err(&pdev->dev, "base clk enable failed\n"); + clk_disable_unprepare(sqi->sys_clk); + goto err_free_master; + } + + init_completion(&sqi->xfer_done); + + /* initialize hardware */ + pic32_sqi_hw_init(sqi); + + /* allocate buffers & descriptors */ + ret = ring_desc_ring_alloc(sqi); + if (ret) { + dev_err(&pdev->dev, "ring alloc failed\n"); + goto err_disable_clk; + } + + /* install irq handlers */ + ret = request_irq(sqi->irq, pic32_sqi_isr, 0, + dev_name(&pdev->dev), sqi); + if (ret < 0) { + dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq); + goto err_free_ring; + } + + /* register master */ + master->num_chipselect = 2; + master->max_speed_hz = clk_get_rate(sqi->base_clk); + master->dma_alignment = 32; + master->max_dma_len = PESQI_BD_BUF_LEN_MAX; + master->dev.of_node = of_node_get(pdev->dev.of_node); + master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL | + SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; + master->flags = SPI_MASTER_HALF_DUPLEX; + master->can_dma = pic32_sqi_can_dma; + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + master->transfer_one_message = pic32_sqi_one_message; + master->prepare_transfer_hardware = pic32_sqi_prepare_hardware; + master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware; + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + dev_err(&master->dev, "failed registering spi master\n"); + free_irq(sqi->irq, sqi); + goto err_free_ring; + } + + platform_set_drvdata(pdev, sqi); + + return 0; + +err_free_ring: + ring_desc_ring_free(sqi); + +err_disable_clk: + clk_disable_unprepare(sqi->base_clk); + clk_disable_unprepare(sqi->sys_clk); + +err_free_master: + spi_master_put(master); + return ret; +} + +static int pic32_sqi_remove(struct platform_device *pdev) +{ + struct pic32_sqi *sqi = platform_get_drvdata(pdev); + + /* release resources */ + free_irq(sqi->irq, sqi); + ring_desc_ring_free(sqi); + + /* disable clk */ + clk_disable_unprepare(sqi->base_clk); + clk_disable_unprepare(sqi->sys_clk); + + return 0; +} + +static const struct of_device_id pic32_sqi_of_ids[] = { + {.compatible = "microchip,pic32mzda-sqi",}, + {}, +}; +MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids); + +static struct platform_driver pic32_sqi_driver = { + .driver = { + .name = "sqi-pic32", + .of_match_table = of_match_ptr(pic32_sqi_of_ids), + }, + .probe = pic32_sqi_probe, + .remove = pic32_sqi_remove, +}; + +module_platform_driver(pic32_sqi_driver); + +MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>"); +MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c new file mode 100644 index 000000000000..73db87f805a1 --- /dev/null +++ b/drivers/spi/spi-pic32.c @@ -0,0 +1,878 @@ +/* + * Microchip PIC32 SPI controller driver. + * + * Purna Chandra Mandal <purna.mandal@microchip.com> + * Copyright (c) 2016, Microchip Technology Inc. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/highmem.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_gpio.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +/* SPI controller registers */ +struct pic32_spi_regs { + u32 ctrl; + u32 ctrl_clr; + u32 ctrl_set; + u32 ctrl_inv; + u32 status; + u32 status_clr; + u32 status_set; + u32 status_inv; + u32 buf; + u32 dontuse[3]; + u32 baud; + u32 dontuse2[3]; + u32 ctrl2; + u32 ctrl2_clr; + u32 ctrl2_set; + u32 ctrl2_inv; +}; + +/* Bit fields of SPI Control Register */ +#define CTRL_RX_INT_SHIFT 0 /* Rx interrupt generation */ +#define RX_FIFO_EMTPY 0 +#define RX_FIFO_NOT_EMPTY 1 /* not empty */ +#define RX_FIFO_HALF_FULL 2 /* full by half or more */ +#define RX_FIFO_FULL 3 /* completely full */ + +#define CTRL_TX_INT_SHIFT 2 /* TX interrupt generation */ +#define TX_FIFO_ALL_EMPTY 0 /* completely empty */ +#define TX_FIFO_EMTPY 1 /* empty */ +#define TX_FIFO_HALF_EMPTY 2 /* empty by half or more */ +#define TX_FIFO_NOT_FULL 3 /* atleast one empty */ + +#define CTRL_MSTEN BIT(5) /* enable master mode */ +#define CTRL_CKP BIT(6) /* active low */ +#define CTRL_CKE BIT(8) /* Tx on falling edge */ +#define CTRL_SMP BIT(9) /* Rx at middle or end of tx */ +#define CTRL_BPW_MASK 0x03 /* bits per word/sample */ +#define CTRL_BPW_SHIFT 10 +#define PIC32_BPW_8 0 +#define PIC32_BPW_16 1 +#define PIC32_BPW_32 2 +#define CTRL_SIDL BIT(13) /* sleep when idle */ +#define CTRL_ON BIT(15) /* enable macro */ +#define CTRL_ENHBUF BIT(16) /* enable enhanced buffering */ +#define CTRL_MCLKSEL BIT(23) /* select clock source */ +#define CTRL_MSSEN BIT(28) /* macro driven /SS */ +#define CTRL_FRMEN BIT(31) /* enable framing mode */ + +/* Bit fields of SPI Status Register */ +#define STAT_RF_EMPTY BIT(5) /* RX Fifo empty */ +#define STAT_RX_OV BIT(6) /* err, s/w needs to clear */ +#define STAT_TX_UR BIT(8) /* UR in Framed SPI modes */ +#define STAT_FRM_ERR BIT(12) /* Multiple Frame Sync pulse */ +#define STAT_TF_LVL_MASK 0x1F +#define STAT_TF_LVL_SHIFT 16 +#define STAT_RF_LVL_MASK 0x1F +#define STAT_RF_LVL_SHIFT 24 + +/* Bit fields of SPI Baud Register */ +#define BAUD_MASK 0x1ff + +/* Bit fields of SPI Control2 Register */ +#define CTRL2_TX_UR_EN BIT(10) /* Enable int on Tx under-run */ +#define CTRL2_RX_OV_EN BIT(11) /* Enable int on Rx over-run */ +#define CTRL2_FRM_ERR_EN BIT(12) /* Enable frame err int */ + +/* Minimum DMA transfer size */ +#define PIC32_DMA_LEN_MIN 64 + +struct pic32_spi { + dma_addr_t dma_base; + struct pic32_spi_regs __iomem *regs; + int fault_irq; + int rx_irq; + int tx_irq; + u32 fifo_n_byte; /* FIFO depth in bytes */ + struct clk *clk; + struct spi_master *master; + /* Current controller setting */ + u32 speed_hz; /* spi-clk rate */ + u32 mode; + u32 bits_per_word; + u32 fifo_n_elm; /* FIFO depth in words */ +#define PIC32F_DMA_PREP 0 /* DMA chnls configured */ + unsigned long flags; + /* Current transfer state */ + struct completion xfer_done; + /* PIO transfer specific */ + const void *tx; + const void *tx_end; + const void *rx; + const void *rx_end; + int len; + void (*rx_fifo)(struct pic32_spi *); + void (*tx_fifo)(struct pic32_spi *); +}; + +static inline void pic32_spi_enable(struct pic32_spi *pic32s) +{ + writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_set); +} + +static inline void pic32_spi_disable(struct pic32_spi *pic32s) +{ + writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_clr); + + /* avoid SPI registers read/write at immediate next CPU clock */ + ndelay(20); +} + +static void pic32_spi_set_clk_rate(struct pic32_spi *pic32s, u32 spi_ck) +{ + u32 div; + + /* div = (clk_in / 2 * spi_ck) - 1 */ + div = DIV_ROUND_CLOSEST(clk_get_rate(pic32s->clk), 2 * spi_ck) - 1; + + writel(div & BAUD_MASK, &pic32s->regs->baud); +} + +static inline u32 pic32_rx_fifo_level(struct pic32_spi *pic32s) +{ + u32 sr = readl(&pic32s->regs->status); + + return (sr >> STAT_RF_LVL_SHIFT) & STAT_RF_LVL_MASK; +} + +static inline u32 pic32_tx_fifo_level(struct pic32_spi *pic32s) +{ + u32 sr = readl(&pic32s->regs->status); + + return (sr >> STAT_TF_LVL_SHIFT) & STAT_TF_LVL_MASK; +} + +/* Return the max entries we can fill into tx fifo */ +static u32 pic32_tx_max(struct pic32_spi *pic32s, int n_bytes) +{ + u32 tx_left, tx_room, rxtx_gap; + + tx_left = (pic32s->tx_end - pic32s->tx) / n_bytes; + tx_room = pic32s->fifo_n_elm - pic32_tx_fifo_level(pic32s); + + /* + * Another concern is about the tx/rx mismatch, we + * though to use (pic32s->fifo_n_byte - rxfl - txfl) as + * one maximum value for tx, but it doesn't cover the + * data which is out of tx/rx fifo and inside the + * shift registers. So a ctrl from sw point of + * view is taken. + */ + rxtx_gap = ((pic32s->rx_end - pic32s->rx) - + (pic32s->tx_end - pic32s->tx)) / n_bytes; + return min3(tx_left, tx_room, (u32)(pic32s->fifo_n_elm - rxtx_gap)); +} + +/* Return the max entries we should read out of rx fifo */ +static u32 pic32_rx_max(struct pic32_spi *pic32s, int n_bytes) +{ + u32 rx_left = (pic32s->rx_end - pic32s->rx) / n_bytes; + + return min_t(u32, rx_left, pic32_rx_fifo_level(pic32s)); +} + +#define BUILD_SPI_FIFO_RW(__name, __type, __bwl) \ +static void pic32_spi_rx_##__name(struct pic32_spi *pic32s) \ +{ \ + __type v; \ + u32 mx = pic32_rx_max(pic32s, sizeof(__type)); \ + for (; mx; mx--) { \ + v = read##__bwl(&pic32s->regs->buf); \ + if (pic32s->rx_end - pic32s->len) \ + *(__type *)(pic32s->rx) = v; \ + pic32s->rx += sizeof(__type); \ + } \ +} \ + \ +static void pic32_spi_tx_##__name(struct pic32_spi *pic32s) \ +{ \ + __type v; \ + u32 mx = pic32_tx_max(pic32s, sizeof(__type)); \ + for (; mx ; mx--) { \ + v = (__type)~0U; \ + if (pic32s->tx_end - pic32s->len) \ + v = *(__type *)(pic32s->tx); \ + write##__bwl(v, &pic32s->regs->buf); \ + pic32s->tx += sizeof(__type); \ + } \ +} + +BUILD_SPI_FIFO_RW(byte, u8, b); +BUILD_SPI_FIFO_RW(word, u16, w); +BUILD_SPI_FIFO_RW(dword, u32, l); + +static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg) +{ + /* disable all interrupts */ + disable_irq_nosync(pic32s->fault_irq); + disable_irq_nosync(pic32s->rx_irq); + disable_irq_nosync(pic32s->tx_irq); + + /* Show err message and abort xfer with err */ + dev_err(&pic32s->master->dev, "%s\n", msg); + if (pic32s->master->cur_msg) + pic32s->master->cur_msg->status = -EIO; + complete(&pic32s->xfer_done); +} + +static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id) +{ + struct pic32_spi *pic32s = dev_id; + u32 status; + + status = readl(&pic32s->regs->status); + + /* Error handling */ + if (status & (STAT_RX_OV | STAT_TX_UR)) { + writel(STAT_RX_OV, &pic32s->regs->status_clr); + writel(STAT_TX_UR, &pic32s->regs->status_clr); + pic32_err_stop(pic32s, "err_irq: fifo ov/ur-run\n"); + return IRQ_HANDLED; + } + + if (status & STAT_FRM_ERR) { + pic32_err_stop(pic32s, "err_irq: frame error"); + return IRQ_HANDLED; + } + + if (!pic32s->master->cur_msg) { + pic32_err_stop(pic32s, "err_irq: no mesg"); + return IRQ_NONE; + } + + return IRQ_NONE; +} + +static irqreturn_t pic32_spi_rx_irq(int irq, void *dev_id) +{ + struct pic32_spi *pic32s = dev_id; + + pic32s->rx_fifo(pic32s); + + /* rx complete ? */ + if (pic32s->rx_end == pic32s->rx) { + /* disable all interrupts */ + disable_irq_nosync(pic32s->fault_irq); + disable_irq_nosync(pic32s->rx_irq); + + /* complete current xfer */ + complete(&pic32s->xfer_done); + } + + return IRQ_HANDLED; +} + +static irqreturn_t pic32_spi_tx_irq(int irq, void *dev_id) +{ + struct pic32_spi *pic32s = dev_id; + + pic32s->tx_fifo(pic32s); + + /* tx complete? disable tx interrupt */ + if (pic32s->tx_end == pic32s->tx) + disable_irq_nosync(pic32s->tx_irq); + + return IRQ_HANDLED; +} + +static void pic32_spi_dma_rx_notify(void *data) +{ + struct pic32_spi *pic32s = data; + + complete(&pic32s->xfer_done); +} + +static int pic32_spi_dma_transfer(struct pic32_spi *pic32s, + struct spi_transfer *xfer) +{ + struct spi_master *master = pic32s->master; + struct dma_async_tx_descriptor *desc_rx; + struct dma_async_tx_descriptor *desc_tx; + dma_cookie_t cookie; + int ret; + + if (!master->dma_rx || !master->dma_tx) + return -ENODEV; + + desc_rx = dmaengine_prep_slave_sg(master->dma_rx, + xfer->rx_sg.sgl, + xfer->rx_sg.nents, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_rx) { + ret = -EINVAL; + goto err_dma; + } + + desc_tx = dmaengine_prep_slave_sg(master->dma_tx, + xfer->tx_sg.sgl, + xfer->tx_sg.nents, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) { + ret = -EINVAL; + goto err_dma; + } + + /* Put callback on the RX transfer, that should finish last */ + desc_rx->callback = pic32_spi_dma_rx_notify; + desc_rx->callback_param = pic32s; + + cookie = dmaengine_submit(desc_rx); + ret = dma_submit_error(cookie); + if (ret) + goto err_dma; + + cookie = dmaengine_submit(desc_tx); + ret = dma_submit_error(cookie); + if (ret) + goto err_dma_tx; + + dma_async_issue_pending(master->dma_rx); + dma_async_issue_pending(master->dma_tx); + + return 0; + +err_dma_tx: + dmaengine_terminate_all(master->dma_rx); +err_dma: + return ret; +} + +static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width) +{ + int buf_offset = offsetof(struct pic32_spi_regs, buf); + struct spi_master *master = pic32s->master; + struct dma_slave_config cfg; + int ret; + + cfg.device_fc = true; + cfg.src_addr = pic32s->dma_base + buf_offset; + cfg.dst_addr = pic32s->dma_base + buf_offset; + cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */ + cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */ + cfg.src_addr_width = dma_width; + cfg.dst_addr_width = dma_width; + /* tx channel */ + cfg.slave_id = pic32s->tx_irq; + cfg.direction = DMA_MEM_TO_DEV; + ret = dmaengine_slave_config(master->dma_tx, &cfg); + if (ret) { + dev_err(&master->dev, "tx channel setup failed\n"); + return ret; + } + /* rx channel */ + cfg.slave_id = pic32s->rx_irq; + cfg.direction = DMA_DEV_TO_MEM; + ret = dmaengine_slave_config(master->dma_rx, &cfg); + if (ret) + dev_err(&master->dev, "rx channel setup failed\n"); + + return ret; +} + +static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word) +{ + enum dma_slave_buswidth dmawidth; + u32 buswidth, v; + + switch (bits_per_word) { + case 8: + pic32s->rx_fifo = pic32_spi_rx_byte; + pic32s->tx_fifo = pic32_spi_tx_byte; + buswidth = PIC32_BPW_8; + dmawidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case 16: + pic32s->rx_fifo = pic32_spi_rx_word; + pic32s->tx_fifo = pic32_spi_tx_word; + buswidth = PIC32_BPW_16; + dmawidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case 32: + pic32s->rx_fifo = pic32_spi_rx_dword; + pic32s->tx_fifo = pic32_spi_tx_dword; + buswidth = PIC32_BPW_32; + dmawidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + default: + /* not supported */ + return -EINVAL; + } + + /* calculate maximum number of words fifos can hold */ + pic32s->fifo_n_elm = DIV_ROUND_UP(pic32s->fifo_n_byte, + bits_per_word / 8); + /* set word size */ + v = readl(&pic32s->regs->ctrl); + v &= ~(CTRL_BPW_MASK << CTRL_BPW_SHIFT); + v |= buswidth << CTRL_BPW_SHIFT; + writel(v, &pic32s->regs->ctrl); + + /* re-configure dma width, if required */ + if (test_bit(PIC32F_DMA_PREP, &pic32s->flags)) + pic32_spi_dma_config(pic32s, dmawidth); + + return 0; +} + +static int pic32_spi_prepare_hardware(struct spi_master *master) +{ + struct pic32_spi *pic32s = spi_master_get_devdata(master); + + pic32_spi_enable(pic32s); + + return 0; +} + +static int pic32_spi_prepare_message(struct spi_master *master, + struct spi_message *msg) +{ + struct pic32_spi *pic32s = spi_master_get_devdata(master); + struct spi_device *spi = msg->spi; + u32 val; + + /* set device specific bits_per_word */ + if (pic32s->bits_per_word != spi->bits_per_word) { + pic32_spi_set_word_size(pic32s, spi->bits_per_word); + pic32s->bits_per_word = spi->bits_per_word; + } + + /* device specific speed change */ + if (pic32s->speed_hz != spi->max_speed_hz) { + pic32_spi_set_clk_rate(pic32s, spi->max_speed_hz); + pic32s->speed_hz = spi->max_speed_hz; + } + + /* device specific mode change */ + if (pic32s->mode != spi->mode) { + val = readl(&pic32s->regs->ctrl); + /* active low */ + if (spi->mode & SPI_CPOL) + val |= CTRL_CKP; + else + val &= ~CTRL_CKP; + /* tx on rising edge */ + if (spi->mode & SPI_CPHA) + val &= ~CTRL_CKE; + else + val |= CTRL_CKE; + + /* rx at end of tx */ + val |= CTRL_SMP; + writel(val, &pic32s->regs->ctrl); + pic32s->mode = spi->mode; + } + + return 0; +} + +static bool pic32_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct pic32_spi *pic32s = spi_master_get_devdata(master); + + /* skip using DMA on small size transfer to avoid overhead.*/ + return (xfer->len >= PIC32_DMA_LEN_MIN) && + test_bit(PIC32F_DMA_PREP, &pic32s->flags); +} + +static int pic32_spi_one_transfer(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct pic32_spi *pic32s; + bool dma_issued = false; + int ret; + + pic32s = spi_master_get_devdata(master); + + /* handle transfer specific word size change */ + if (transfer->bits_per_word && + (transfer->bits_per_word != pic32s->bits_per_word)) { + ret = pic32_spi_set_word_size(pic32s, transfer->bits_per_word); + if (ret) + return ret; + pic32s->bits_per_word = transfer->bits_per_word; + } + + /* handle transfer specific speed change */ + if (transfer->speed_hz && (transfer->speed_hz != pic32s->speed_hz)) { + pic32_spi_set_clk_rate(pic32s, transfer->speed_hz); + pic32s->speed_hz = transfer->speed_hz; + } + + reinit_completion(&pic32s->xfer_done); + + /* transact by DMA mode */ + if (transfer->rx_sg.nents && transfer->tx_sg.nents) { + ret = pic32_spi_dma_transfer(pic32s, transfer); + if (ret) { + dev_err(&spi->dev, "dma submit error\n"); + return ret; + } + + /* DMA issued */ + dma_issued = true; + } else { + /* set current transfer information */ + pic32s->tx = (const void *)transfer->tx_buf; + pic32s->rx = (const void *)transfer->rx_buf; + pic32s->tx_end = pic32s->tx + transfer->len; + pic32s->rx_end = pic32s->rx + transfer->len; + pic32s->len = transfer->len; + + /* transact by interrupt driven PIO */ + enable_irq(pic32s->fault_irq); + enable_irq(pic32s->rx_irq); + enable_irq(pic32s->tx_irq); + } + + /* wait for completion */ + ret = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ); + if (ret <= 0) { + dev_err(&spi->dev, "wait error/timedout\n"); + if (dma_issued) { + dmaengine_terminate_all(master->dma_rx); + dmaengine_terminate_all(master->dma_rx); + } + ret = -ETIMEDOUT; + } else { + ret = 0; + } + + return ret; +} + +static int pic32_spi_unprepare_message(struct spi_master *master, + struct spi_message *msg) +{ + /* nothing to do */ + return 0; +} + +static int pic32_spi_unprepare_hardware(struct spi_master *master) +{ + struct pic32_spi *pic32s = spi_master_get_devdata(master); + + pic32_spi_disable(pic32s); + + return 0; +} + +/* This may be called multiple times by same spi dev */ +static int pic32_spi_setup(struct spi_device *spi) +{ + if (!spi->max_speed_hz) { + dev_err(&spi->dev, "No max speed HZ parameter\n"); + return -EINVAL; + } + + /* PIC32 spi controller can drive /CS during transfer depending + * on tx fifo fill-level. /CS will stay asserted as long as TX + * fifo is non-empty, else will be deasserted indicating + * completion of the ongoing transfer. This might result into + * unreliable/erroneous SPI transactions. + * To avoid that we will always handle /CS by toggling GPIO. + */ + if (!gpio_is_valid(spi->cs_gpio)) + return -EINVAL; + + gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); + + return 0; +} + +static void pic32_spi_cleanup(struct spi_device *spi) +{ + /* de-activate cs-gpio */ + gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); +} + +static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev) +{ + struct spi_master *master = pic32s->master; + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + master->dma_rx = dma_request_slave_channel_compat(mask, NULL, NULL, + dev, "spi-rx"); + if (!master->dma_rx) { + dev_warn(dev, "RX channel not found.\n"); + goto out_err; + } + + master->dma_tx = dma_request_slave_channel_compat(mask, NULL, NULL, + dev, "spi-tx"); + if (!master->dma_tx) { + dev_warn(dev, "TX channel not found.\n"); + goto out_err; + } + + if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE)) + goto out_err; + + /* DMA chnls allocated and prepared */ + set_bit(PIC32F_DMA_PREP, &pic32s->flags); + + return; + +out_err: + if (master->dma_rx) + dma_release_channel(master->dma_rx); + + if (master->dma_tx) + dma_release_channel(master->dma_tx); +} + +static void pic32_spi_dma_unprep(struct pic32_spi *pic32s) +{ + if (!test_bit(PIC32F_DMA_PREP, &pic32s->flags)) + return; + + clear_bit(PIC32F_DMA_PREP, &pic32s->flags); + if (pic32s->master->dma_rx) + dma_release_channel(pic32s->master->dma_rx); + + if (pic32s->master->dma_tx) + dma_release_channel(pic32s->master->dma_tx); +} + +static void pic32_spi_hw_init(struct pic32_spi *pic32s) +{ + u32 ctrl; + + /* disable hardware */ + pic32_spi_disable(pic32s); + + ctrl = readl(&pic32s->regs->ctrl); + /* enable enhanced fifo of 128bit deep */ + ctrl |= CTRL_ENHBUF; + pic32s->fifo_n_byte = 16; + + /* disable framing mode */ + ctrl &= ~CTRL_FRMEN; + + /* enable master mode while disabled */ + ctrl |= CTRL_MSTEN; + + /* set tx fifo threshold interrupt */ + ctrl &= ~(0x3 << CTRL_TX_INT_SHIFT); + ctrl |= (TX_FIFO_HALF_EMPTY << CTRL_TX_INT_SHIFT); + + /* set rx fifo threshold interrupt */ + ctrl &= ~(0x3 << CTRL_RX_INT_SHIFT); + ctrl |= (RX_FIFO_NOT_EMPTY << CTRL_RX_INT_SHIFT); + + /* select clk source */ + ctrl &= ~CTRL_MCLKSEL; + + /* set manual /CS mode */ + ctrl &= ~CTRL_MSSEN; + + writel(ctrl, &pic32s->regs->ctrl); + + /* enable error reporting */ + ctrl = CTRL2_TX_UR_EN | CTRL2_RX_OV_EN | CTRL2_FRM_ERR_EN; + writel(ctrl, &pic32s->regs->ctrl2_set); +} + +static int pic32_spi_hw_probe(struct platform_device *pdev, + struct pic32_spi *pic32s) +{ + struct resource *mem; + int ret; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pic32s->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(pic32s->regs)) + return PTR_ERR(pic32s->regs); + + pic32s->dma_base = mem->start; + + /* get irq resources: err-irq, rx-irq, tx-irq */ + pic32s->fault_irq = platform_get_irq_byname(pdev, "fault"); + if (pic32s->fault_irq < 0) { + dev_err(&pdev->dev, "fault-irq not found\n"); + return pic32s->fault_irq; + } + + pic32s->rx_irq = platform_get_irq_byname(pdev, "rx"); + if (pic32s->rx_irq < 0) { + dev_err(&pdev->dev, "rx-irq not found\n"); + return pic32s->rx_irq; + } + + pic32s->tx_irq = platform_get_irq_byname(pdev, "tx"); + if (pic32s->tx_irq < 0) { + dev_err(&pdev->dev, "tx-irq not found\n"); + return pic32s->tx_irq; + } + + /* get clock */ + pic32s->clk = devm_clk_get(&pdev->dev, "mck0"); + if (IS_ERR(pic32s->clk)) { + dev_err(&pdev->dev, "clk not found\n"); + ret = PTR_ERR(pic32s->clk); + goto err_unmap_mem; + } + + ret = clk_prepare_enable(pic32s->clk); + if (ret) + goto err_unmap_mem; + + pic32_spi_hw_init(pic32s); + + return 0; + +err_unmap_mem: + dev_err(&pdev->dev, "%s failed, err %d\n", __func__, ret); + return ret; +} + +static int pic32_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct pic32_spi *pic32s; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof(*pic32s)); + if (!master) + return -ENOMEM; + + pic32s = spi_master_get_devdata(master); + pic32s->master = master; + + ret = pic32_spi_hw_probe(pdev, pic32s); + if (ret) + goto err_master; + + master->dev.of_node = of_node_get(pdev->dev.of_node); + master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH; + master->num_chipselect = 1; /* single chip-select */ + master->max_speed_hz = clk_get_rate(pic32s->clk); + master->setup = pic32_spi_setup; + master->cleanup = pic32_spi_cleanup; + master->flags = SPI_MASTER_MUST_TX | SPI_MASTER_MUST_RX; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(32); + master->transfer_one = pic32_spi_one_transfer; + master->prepare_message = pic32_spi_prepare_message; + master->unprepare_message = pic32_spi_unprepare_message; + master->prepare_transfer_hardware = pic32_spi_prepare_hardware; + master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware; + + /* optional DMA support */ + pic32_spi_dma_prep(pic32s, &pdev->dev); + if (test_bit(PIC32F_DMA_PREP, &pic32s->flags)) + master->can_dma = pic32_spi_can_dma; + + init_completion(&pic32s->xfer_done); + pic32s->mode = -1; + + /* install irq handlers (with irq-disabled) */ + irq_set_status_flags(pic32s->fault_irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&pdev->dev, pic32s->fault_irq, + pic32_spi_fault_irq, IRQF_NO_THREAD, + dev_name(&pdev->dev), pic32s); + if (ret < 0) { + dev_err(&pdev->dev, "request fault-irq %d\n", pic32s->rx_irq); + goto err_bailout; + } + + /* receive interrupt handler */ + irq_set_status_flags(pic32s->rx_irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&pdev->dev, pic32s->rx_irq, + pic32_spi_rx_irq, IRQF_NO_THREAD, + dev_name(&pdev->dev), pic32s); + if (ret < 0) { + dev_err(&pdev->dev, "request rx-irq %d\n", pic32s->rx_irq); + goto err_bailout; + } + + /* transmit interrupt handler */ + irq_set_status_flags(pic32s->tx_irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&pdev->dev, pic32s->tx_irq, + pic32_spi_tx_irq, IRQF_NO_THREAD, + dev_name(&pdev->dev), pic32s); + if (ret < 0) { + dev_err(&pdev->dev, "request tx-irq %d\n", pic32s->tx_irq); + goto err_bailout; + } + + /* register master */ + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + dev_err(&master->dev, "failed registering spi master\n"); + goto err_bailout; + } + + platform_set_drvdata(pdev, pic32s); + + return 0; + +err_bailout: + clk_disable_unprepare(pic32s->clk); +err_master: + spi_master_put(master); + return ret; +} + +static int pic32_spi_remove(struct platform_device *pdev) +{ + struct pic32_spi *pic32s; + + pic32s = platform_get_drvdata(pdev); + pic32_spi_disable(pic32s); + clk_disable_unprepare(pic32s->clk); + pic32_spi_dma_unprep(pic32s); + + return 0; +} + +static const struct of_device_id pic32_spi_of_match[] = { + {.compatible = "microchip,pic32mzda-spi",}, + {}, +}; +MODULE_DEVICE_TABLE(of, pic32_spi_of_match); + +static struct platform_driver pic32_spi_driver = { + .driver = { + .name = "spi-pic32", + .of_match_table = of_match_ptr(pic32_spi_of_match), + }, + .probe = pic32_spi_probe, + .remove = pic32_spi_remove, +}; + +module_platform_driver(pic32_spi_driver); + +MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>"); +MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SPI controller."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 365fc22c3572..a18a03d0afb7 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -33,12 +33,10 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, dmadev = drv_data->tx_chan->device->dev; sgt = &drv_data->tx_sgt; buf = drv_data->tx; - drv_data->tx_map_len = len; } else { dmadev = drv_data->rx_chan->device->dev; sgt = &drv_data->rx_sgt; buf = drv_data->rx; - drv_data->rx_map_len = len; } nents = DIV_ROUND_UP(len, SZ_2K); @@ -55,11 +53,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes = min_t(size_t, len, SZ_2K); - if (buf) - sg_set_buf(sg, pbuf, bytes); - else - sg_set_buf(sg, drv_data->dummy, bytes); - + sg_set_buf(sg, pbuf, bytes); pbuf += bytes; len -= bytes; } @@ -133,9 +127,6 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, if (!error) { pxa2xx_spi_unmap_dma_buffers(drv_data); - drv_data->tx += drv_data->tx_map_len; - drv_data->rx += drv_data->rx_map_len; - msg->actual_length += drv_data->len; msg->state = pxa2xx_spi_next_transfer(drv_data); } else { @@ -267,19 +258,22 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) { struct dma_async_tx_descriptor *tx_desc, *rx_desc; + int err = 0; tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV); if (!tx_desc) { dev_err(&drv_data->pdev->dev, "failed to get DMA TX descriptor\n"); - return -EBUSY; + err = -EBUSY; + goto err_tx; } rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM); if (!rx_desc) { dev_err(&drv_data->pdev->dev, "failed to get DMA RX descriptor\n"); - return -EBUSY; + err = -EBUSY; + goto err_rx; } /* We are ready when RX completes */ @@ -289,6 +283,12 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) dmaengine_submit(rx_desc); dmaengine_submit(tx_desc); return 0; + +err_rx: + dmaengine_terminate_async(drv_data->tx_chan); +err_tx: + pxa2xx_spi_unmap_dma_buffers(drv_data); + return err; } void pxa2xx_spi_dma_start(struct driver_data *drv_data) @@ -308,10 +308,6 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL); - if (!drv_data->dummy) - return -ENOMEM; - drv_data->tx_chan = dma_request_slave_channel_compat(mask, pdata->dma_filter, pdata->tx_param, dev, "tx"); if (!drv_data->tx_chan) diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 4fd7f9802f1b..5202de94f792 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -173,8 +173,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, ssp->type = c->type; snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id); - ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL, - CLK_IS_ROOT, c->max_clk_rate); + ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL, 0, + c->max_clk_rate); if (IS_ERR(ssp->clk)) return PTR_ERR(ssp->clk); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 86138e4101b0..fe07c0592b44 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -570,9 +570,8 @@ static void giveback(struct driver_data *drv_data) /* see if the next and current messages point * to the same chip */ - if (next_msg && next_msg->spi != msg->spi) - next_msg = NULL; - if (!next_msg || msg->state == ERROR_STATE) + if ((next_msg && next_msg->spi != msg->spi) || + msg->state == ERROR_STATE) cs_deassert(drv_data); } @@ -928,6 +927,7 @@ static void pump_transfers(unsigned long data) u32 dma_thresh = drv_data->cur_chip->dma_threshold; u32 dma_burst = drv_data->cur_chip->dma_burst_size; u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data); + int err; /* Get current state information */ message = drv_data->cur_msg; @@ -1047,7 +1047,12 @@ static void pump_transfers(unsigned long data) /* Ensure we have the correct interrupt handler */ drv_data->transfer_handler = pxa2xx_spi_dma_transfer; - pxa2xx_spi_dma_prepare(drv_data, dma_burst); + err = pxa2xx_spi_dma_prepare(drv_data, dma_burst); + if (err) { + message->status = err; + giveback(drv_data); + return; + } /* Clear status and start DMA engine */ cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; @@ -1543,7 +1548,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data->pdev = pdev; drv_data->ssp = ssp; - master->dev.parent = &pdev->dev; master->dev.of_node = pdev->dev.of_node; /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; @@ -1556,6 +1560,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; master->fw_translate_cs = pxa2xx_spi_fw_translate_cs; master->auto_runtime_pm = true; + master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; drv_data->ssp_type = ssp->type; diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index a1ef88948144..e6b09000ff14 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -56,7 +56,6 @@ struct driver_data { struct sg_table tx_sgt; int rx_nents; int tx_nents; - void *dummy; atomic_t dma_running; /* Current message transfer state info */ @@ -69,8 +68,6 @@ struct driver_data { void *rx; void *rx_end; int dma_mapped; - size_t rx_map_len; - size_t tx_map_len; u8 n_bytes; int (*write)(struct driver_data *drv_data); int (*read)(struct driver_data *drv_data); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 810a7fae3479..c338ef1136f6 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -937,6 +937,10 @@ static int spi_qup_pm_suspend_runtime(struct device *device) config = readl(controller->base + QUP_CONFIG); config |= QUP_CONFIG_CLOCK_AUTO_GATE; writel_relaxed(config, controller->base + QUP_CONFIG); + + clk_disable_unprepare(controller->cclk); + clk_disable_unprepare(controller->iclk); + return 0; } @@ -945,6 +949,15 @@ static int spi_qup_pm_resume_runtime(struct device *device) struct spi_master *master = dev_get_drvdata(device); struct spi_qup *controller = spi_master_get_devdata(master); u32 config; + int ret; + + ret = clk_prepare_enable(controller->iclk); + if (ret) + return ret; + + ret = clk_prepare_enable(controller->cclk); + if (ret) + return ret; /* Disable clocks auto gaiting */ config = readl_relaxed(controller->base + QUP_CONFIG); @@ -1017,6 +1030,8 @@ static int spi_qup_remove(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + spi_master_put(master); + return 0; } diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 6c6c0013ec7a..cd89682065b9 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -744,10 +744,8 @@ static int rockchip_spi_probe(struct platform_device *pdev) rs->dma_rx.ch = dma_request_chan(rs->dev, "rx"); if (IS_ERR(rs->dma_rx.ch)) { if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) { - dma_release_channel(rs->dma_tx.ch); - rs->dma_tx.ch = NULL; ret = -EPROBE_DEFER; - goto err_get_fifo_len; + goto err_free_dma_tx; } dev_warn(rs->dev, "Failed to request RX DMA channel\n"); rs->dma_rx.ch = NULL; @@ -775,10 +773,11 @@ static int rockchip_spi_probe(struct platform_device *pdev) err_register_master: pm_runtime_disable(&pdev->dev); - if (rs->dma_tx.ch) - dma_release_channel(rs->dma_tx.ch); if (rs->dma_rx.ch) dma_release_channel(rs->dma_rx.ch); +err_free_dma_tx: + if (rs->dma_tx.ch) + dma_release_channel(rs->dma_tx.ch); err_get_fifo_len: clk_disable_unprepare(rs->spiclk); err_spiclk_enable: diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index f17c0abe299f..d5adf9f31602 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -345,12 +345,13 @@ static int spi_st_probe(struct platform_device *pdev) spi_st->clk = devm_clk_get(&pdev->dev, "ssc"); if (IS_ERR(spi_st->clk)) { dev_err(&pdev->dev, "Unable to request clock\n"); - return PTR_ERR(spi_st->clk); + ret = PTR_ERR(spi_st->clk); + goto put_master; } ret = spi_st_clk_enable(spi_st); if (ret) - return ret; + goto put_master; init_completion(&spi_st->done); @@ -408,7 +409,8 @@ static int spi_st_probe(struct platform_device *pdev) clk_disable: spi_st_clk_disable(spi_st); - +put_master: + spi_master_put(master); return ret; } diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index aab9b492c627..18aeaceee286 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -360,7 +360,7 @@ static int zynqmp_prepare_transfer_hardware(struct spi_master *master) ret = clk_enable(xqspi->refclk); if (ret) - goto clk_err; + return ret; ret = clk_enable(xqspi->pclk); if (ret) @@ -369,6 +369,7 @@ static int zynqmp_prepare_transfer_hardware(struct spi_master *master) zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK); return 0; clk_err: + clk_disable(xqspi->refclk); return ret; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 0239b45eed92..77e6e45951f4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -717,9 +717,11 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, if (vmalloced_buf) { desc_len = min_t(int, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); - } else { + } else if (virt_addr_valid(buf)) { desc_len = min_t(int, max_seg_size, master->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); + } else { + return -EINVAL; } ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); @@ -933,7 +935,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) * spi_transfer_one_message - Default implementation of transfer_one_message() * * This is a standard implementation of transfer_one_message() for - * drivers which impelment a transfer_one() operation. It provides + * drivers which implement a transfer_one() operation. It provides * standard handling of delays and chip select management. */ static int spi_transfer_one_message(struct spi_master *master, @@ -1764,6 +1766,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) master->num_chipselect = 1; master->dev.class = &spi_master_class; master->dev.parent = dev; + pm_suspend_ignore_children(&master->dev, true); spi_master_set_devdata(master, &master[1]); return master; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 5bac28a3944e..7c197d1a1231 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -66,8 +66,6 @@ source "drivers/staging/nvec/Kconfig" source "drivers/staging/media/Kconfig" -source "drivers/staging/rdma/Kconfig" - source "drivers/staging/android/Kconfig" source "drivers/staging/board/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index a954242b0f2c..a470c7276142 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_FB_XGI) += xgifb/ obj-$(CONFIG_USB_EMXX) += emxx_udc/ obj-$(CONFIG_SPEAKUP) += speakup/ obj-$(CONFIG_MFD_NVEC) += nvec/ -obj-$(CONFIG_STAGING_RDMA) += rdma/ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_STAGING_BOARD) += board/ obj-$(CONFIG_LTE_GDM724X) += gdm724x/ diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index ce1f949430f1..3f2f30b6542c 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -976,8 +976,8 @@ static inline __u64 ll_file_maxbytes(struct inode *inode) } /* llite/xattr.c */ -int ll_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); +int ll_setxattr(struct dentry *dentry, struct inode *inode, + const char *name, const void *value, size_t size, int flags); ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index ed4de04381c3..608014b0dbcd 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -211,11 +211,9 @@ int ll_setxattr_common(struct inode *inode, const char *name, return 0; } -int ll_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags) +int ll_setxattr(struct dentry *dentry, struct inode *inode, + const char *name, const void *value, size_t size, int flags) { - struct inode *inode = d_inode(dentry); - LASSERT(inode); LASSERT(name); diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c index 163f21a1298d..e389009fca42 100644 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c @@ -42,23 +42,33 @@ static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd) static int enable_hw_ecc; static int enable_read_hw_ecc; -static struct nand_ecclayout spinand_oob_64 = { - .eccbytes = 24, - .eccpos = { - 1, 2, 3, 4, 5, 6, - 17, 18, 19, 20, 21, 22, - 33, 34, 35, 36, 37, 38, - 49, 50, 51, 52, 53, 54, }, - .oobfree = { - {.offset = 8, - .length = 8}, - {.offset = 24, - .length = 8}, - {.offset = 40, - .length = 8}, - {.offset = 56, - .length = 8}, - } +static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 3) + return -ERANGE; + + oobregion->offset = (section * 16) + 1; + oobregion->length = 6; + + return 0; +} + +static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section > 3) + return -ERANGE; + + oobregion->offset = (section * 16) + 8; + oobregion->length = 8; + + return 0; +} + +static const struct mtd_ooblayout_ops spinand_oob_64_ops = { + .ecc = spinand_ooblayout_64_ecc, + .free = spinand_ooblayout_64_free, }; #endif @@ -886,11 +896,11 @@ static int spinand_probe(struct spi_device *spi_nand) chip->ecc.strength = 1; chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; - chip->ecc.layout = &spinand_oob_64; chip->ecc.read_page = spinand_read_page_hwecc; chip->ecc.write_page = spinand_write_page_hwecc; #else chip->ecc.mode = NAND_ECC_SOFT; + chip->ecc.algo = NAND_ECC_HAMMING; if (spinand_disable_ecc(spi_nand) < 0) dev_info(&spi_nand->dev, "%s: disable ecc failed!\n", __func__); @@ -912,6 +922,9 @@ static int spinand_probe(struct spi_device *spi_nand) mtd->dev.parent = &spi_nand->dev; mtd->oobsize = 64; +#ifdef CONFIG_MTD_SPINAND_ONDIEECC + mtd_set_ooblayout(mtd, &spinand_oob_64_ops); +#endif if (nand_scan(mtd, 1)) return -ENXIO; diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig deleted file mode 100644 index f1f3ecadf0fb..000000000000 --- a/drivers/staging/rdma/Kconfig +++ /dev/null @@ -1,27 +0,0 @@ -menuconfig STAGING_RDMA - tristate "RDMA staging drivers" - depends on INFINIBAND - depends on PCI || BROKEN - depends on HAS_IOMEM - depends on NET - depends on INET - default n - ---help--- - This option allows you to select a number of RDMA drivers that - fall into one of two categories: deprecated drivers being held - here before finally being removed or new drivers that still need - some work before being moved to the normal RDMA driver area. - - If you wish to work on these drivers, to help improve them, or - to report problems you have with them, please use the - linux-rdma@vger.kernel.org mailing list. - - If in doubt, say N here. - - -# Please keep entries in alphabetic order -if STAGING_RDMA - -source "drivers/staging/rdma/hfi1/Kconfig" - -endif diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile deleted file mode 100644 index 8c7fc1de48a7..000000000000 --- a/drivers/staging/rdma/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# Entries for RDMA_STAGING tree -obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO deleted file mode 100644 index 4c6f1d7d2eaf..000000000000 --- a/drivers/staging/rdma/hfi1/TODO +++ /dev/null @@ -1,6 +0,0 @@ -July, 2015 - -- Remove unneeded file entries in sysfs -- Remove software processing of IB protocol and place in library for use - by qib, ipath (if still present), hfi1, and eventually soft-roce -- Replace incorrect uAPI diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c deleted file mode 100644 index bb2409ad891a..000000000000 --- a/drivers/staging/rdma/hfi1/diag.c +++ /dev/null @@ -1,1925 +0,0 @@ -/* - * Copyright(c) 2015, 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* - * This file contains support for diagnostic functions. It is accessed by - * opening the hfi1_diag device, normally minor number 129. Diagnostic use - * of the chip may render the chip or board unusable until the driver - * is unloaded, or in some cases, until the system is rebooted. - * - * Accesses to the chip through this interface are not similar to going - * through the /sys/bus/pci resource mmap interface. - */ - -#include <linux/io.h> -#include <linux/pci.h> -#include <linux/poll.h> -#include <linux/vmalloc.h> -#include <linux/export.h> -#include <linux/fs.h> -#include <linux/uaccess.h> -#include <linux/module.h> -#include <rdma/ib_smi.h> -#include "hfi.h" -#include "device.h" -#include "common.h" -#include "verbs_txreq.h" -#include "trace.h" - -#undef pr_fmt -#define pr_fmt(fmt) DRIVER_NAME ": " fmt -#define snoop_dbg(fmt, ...) \ - hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__) - -/* Snoop option mask */ -#define SNOOP_DROP_SEND BIT(0) -#define SNOOP_USE_METADATA BIT(1) -#define SNOOP_SET_VL0TOVL15 BIT(2) - -static u8 snoop_flags; - -/* - * Extract packet length from LRH header. - * This is in Dwords so multiply by 4 to get size in bytes - */ -#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2) - -enum hfi1_filter_status { - HFI1_FILTER_HIT, - HFI1_FILTER_ERR, - HFI1_FILTER_MISS -}; - -/* snoop processing functions */ -rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = { - [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler, - [RHF_RCV_TYPE_EAGER] = snoop_recv_handler, - [RHF_RCV_TYPE_IB] = snoop_recv_handler, - [RHF_RCV_TYPE_ERROR] = snoop_recv_handler, - [RHF_RCV_TYPE_BYPASS] = snoop_recv_handler, - [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, - [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, - [RHF_RCV_TYPE_INVALID7] = process_receive_invalid -}; - -/* Snoop packet structure */ -struct snoop_packet { - struct list_head list; - u32 total_len; - u8 data[]; -}; - -/* Do not make these an enum or it will blow up the capture_md */ -#define PKT_DIR_EGRESS 0x0 -#define PKT_DIR_INGRESS 0x1 - -/* Packet capture metadata returned to the user with the packet. */ -struct capture_md { - u8 port; - u8 dir; - u8 reserved[6]; - union { - u64 pbc; - u64 rhf; - } u; -}; - -static atomic_t diagpkt_count = ATOMIC_INIT(0); -static struct cdev diagpkt_cdev; -static struct device *diagpkt_device; - -static ssize_t diagpkt_write(struct file *fp, const char __user *data, - size_t count, loff_t *off); - -static const struct file_operations diagpkt_file_ops = { - .owner = THIS_MODULE, - .write = diagpkt_write, - .llseek = noop_llseek, -}; - -/* - * This is used for communication with user space for snoop extended IOCTLs - */ -struct hfi1_link_info { - __be64 node_guid; - u8 port_mode; - u8 port_state; - u16 link_speed_active; - u16 link_width_active; - u16 vl15_init; - u8 port_number; - /* - * Add padding to make this a full IB SMP payload. Note: changing the - * size of this structure will make the IOCTLs created with _IOWR - * change. - * Be sure to run tests on all IOCTLs when making changes to this - * structure. - */ - u8 res[47]; -}; - -/* - * This starts our ioctl sequence numbers *way* off from the ones - * defined in ib_core. - */ -#define SNOOP_CAPTURE_VERSION 0x1 - -#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl-number.txt */ -#define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC -#define HFI1_SNOOP_IOC_BASE_SEQ 0x80 - -#define HFI1_SNOOP_IOCGETLINKSTATE \ - _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ) -#define HFI1_SNOOP_IOCSETLINKSTATE \ - _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1) -#define HFI1_SNOOP_IOCCLEARQUEUE \ - _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2) -#define HFI1_SNOOP_IOCCLEARFILTER \ - _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3) -#define HFI1_SNOOP_IOCSETFILTER \ - _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4) -#define HFI1_SNOOP_IOCGETVERSION \ - _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5) -#define HFI1_SNOOP_IOCSET_OPTS \ - _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6) - -/* - * These offsets +6/+7 could change, but these are already known and used - * IOCTL numbers so don't change them without a good reason. - */ -#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \ - _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \ - struct hfi1_link_info) -#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \ - _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \ - struct hfi1_link_info) - -static int hfi1_snoop_open(struct inode *in, struct file *fp); -static ssize_t hfi1_snoop_read(struct file *fp, char __user *data, - size_t pkt_len, loff_t *off); -static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data, - size_t count, loff_t *off); -static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); -static unsigned int hfi1_snoop_poll(struct file *fp, - struct poll_table_struct *wait); -static int hfi1_snoop_release(struct inode *in, struct file *fp); - -struct hfi1_packet_filter_command { - int opcode; - int length; - void *value_ptr; -}; - -/* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */ -#define HFI1_SNOOP_INGRESS 0x1 -#define HFI1_SNOOP_EGRESS 0x2 - -enum hfi1_packet_filter_opcodes { - FILTER_BY_LID, - FILTER_BY_DLID, - FILTER_BY_MAD_MGMT_CLASS, - FILTER_BY_QP_NUMBER, - FILTER_BY_PKT_TYPE, - FILTER_BY_SERVICE_LEVEL, - FILTER_BY_PKEY, - FILTER_BY_DIRECTION, -}; - -static const struct file_operations snoop_file_ops = { - .owner = THIS_MODULE, - .open = hfi1_snoop_open, - .read = hfi1_snoop_read, - .unlocked_ioctl = hfi1_ioctl, - .poll = hfi1_snoop_poll, - .write = hfi1_snoop_write, - .release = hfi1_snoop_release -}; - -struct hfi1_filter_array { - int (*filter)(void *, void *, void *); -}; - -static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value); -static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value); -static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data, - void *value); -static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value); -static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data, - void *value); -static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data, - void *value); -static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value); -static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value); - -static const struct hfi1_filter_array hfi1_filters[] = { - { hfi1_filter_lid }, - { hfi1_filter_dlid }, - { hfi1_filter_mad_mgmt_class }, - { hfi1_filter_qp_number }, - { hfi1_filter_ibpacket_type }, - { hfi1_filter_ib_service_level }, - { hfi1_filter_ib_pkey }, - { hfi1_filter_direction }, -}; - -#define HFI1_MAX_FILTERS ARRAY_SIZE(hfi1_filters) -#define HFI1_DIAG_MINOR_BASE 129 - -static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name); - -int hfi1_diag_add(struct hfi1_devdata *dd) -{ - char name[16]; - int ret = 0; - - snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(), - dd->unit); - /* - * Do this for each device as opposed to the normal diagpkt - * interface which is one per host - */ - ret = hfi1_snoop_add(dd, name); - if (ret) - dd_dev_err(dd, "Unable to init snoop/capture device"); - - snprintf(name, sizeof(name), "%s_diagpkt", class_name()); - if (atomic_inc_return(&diagpkt_count) == 1) { - ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name, - &diagpkt_file_ops, &diagpkt_cdev, - &diagpkt_device, false); - } - - return ret; -} - -/* this must be called w/ dd->snoop_in_lock held */ -static void drain_snoop_list(struct list_head *queue) -{ - struct list_head *pos, *q; - struct snoop_packet *packet; - - list_for_each_safe(pos, q, queue) { - packet = list_entry(pos, struct snoop_packet, list); - list_del(pos); - kfree(packet); - } -} - -static void hfi1_snoop_remove(struct hfi1_devdata *dd) -{ - unsigned long flags = 0; - - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - drain_snoop_list(&dd->hfi1_snoop.queue); - hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev); - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); -} - -void hfi1_diag_remove(struct hfi1_devdata *dd) -{ - hfi1_snoop_remove(dd); - if (atomic_dec_and_test(&diagpkt_count)) - hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device); - hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device); -} - -/* - * Allocated structure shared between the credit return mechanism and - * diagpkt_send(). - */ -struct diagpkt_wait { - struct completion credits_returned; - int code; - atomic_t count; -}; - -/* - * When each side is finished with the structure, they call this. - * The last user frees the structure. - */ -static void put_diagpkt_wait(struct diagpkt_wait *wait) -{ - if (atomic_dec_and_test(&wait->count)) - kfree(wait); -} - -/* - * Callback from the credit return code. Set the complete, which - * will let diapkt_send() continue. - */ -static void diagpkt_complete(void *arg, int code) -{ - struct diagpkt_wait *wait = (struct diagpkt_wait *)arg; - - wait->code = code; - complete(&wait->credits_returned); - put_diagpkt_wait(wait); /* finished with the structure */ -} - -/** - * diagpkt_send - send a packet - * @dp: diag packet descriptor - */ -static ssize_t diagpkt_send(struct diag_pkt *dp) -{ - struct hfi1_devdata *dd; - struct send_context *sc; - struct pio_buf *pbuf; - u32 *tmpbuf = NULL; - ssize_t ret = 0; - u32 pkt_len, total_len; - pio_release_cb credit_cb = NULL; - void *credit_arg = NULL; - struct diagpkt_wait *wait = NULL; - int trycount = 0; - - dd = hfi1_lookup(dp->unit); - if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) { - ret = -ENODEV; - goto bail; - } - if (!(dd->flags & HFI1_INITTED)) { - /* no hardware, freeze, etc. */ - ret = -ENODEV; - goto bail; - } - - if (dp->version != _DIAG_PKT_VERS) { - dd_dev_err(dd, "Invalid version %u for diagpkt_write\n", - dp->version); - ret = -EINVAL; - goto bail; - } - - /* send count must be an exact number of dwords */ - if (dp->len & 3) { - ret = -EINVAL; - goto bail; - } - - /* there is only port 1 */ - if (dp->port != 1) { - ret = -EINVAL; - goto bail; - } - - /* need a valid context */ - if (dp->sw_index >= dd->num_send_contexts) { - ret = -EINVAL; - goto bail; - } - /* can only use kernel contexts */ - if (dd->send_contexts[dp->sw_index].type != SC_KERNEL && - dd->send_contexts[dp->sw_index].type != SC_VL15) { - ret = -EINVAL; - goto bail; - } - /* must be allocated */ - sc = dd->send_contexts[dp->sw_index].sc; - if (!sc) { - ret = -EINVAL; - goto bail; - } - /* must be enabled */ - if (!(sc->flags & SCF_ENABLED)) { - ret = -EINVAL; - goto bail; - } - - /* allocate a buffer and copy the data in */ - tmpbuf = vmalloc(dp->len); - if (!tmpbuf) { - ret = -ENOMEM; - goto bail; - } - - if (copy_from_user(tmpbuf, - (const void __user *)(unsigned long)dp->data, - dp->len)) { - ret = -EFAULT; - goto bail; - } - - /* - * pkt_len is how much data we have to write, includes header and data. - * total_len is length of the packet in Dwords plus the PBC should not - * include the CRC. - */ - pkt_len = dp->len >> 2; - total_len = pkt_len + 2; /* PBC + packet */ - - /* if 0, fill in a default */ - if (dp->pbc == 0) { - struct hfi1_pportdata *ppd = dd->pport; - - hfi1_cdbg(PKT, "Generating PBC"); - dp->pbc = create_pbc(ppd, 0, 0, 0, total_len); - } else { - hfi1_cdbg(PKT, "Using passed in PBC"); - } - - hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc); - - /* - * The caller wants to wait until the packet is sent and to - * check for errors. The best we can do is wait until - * the buffer credits are returned and check if any packet - * error has occurred. If there are any late errors, this - * could miss it. If there are other senders who generate - * an error, this may find it. However, in general, it - * should catch most. - */ - if (dp->flags & F_DIAGPKT_WAIT) { - /* always force a credit return */ - dp->pbc |= PBC_CREDIT_RETURN; - /* turn on credit return interrupts */ - sc_add_credit_return_intr(sc); - wait = kmalloc(sizeof(*wait), GFP_KERNEL); - if (!wait) { - ret = -ENOMEM; - goto bail; - } - init_completion(&wait->credits_returned); - atomic_set(&wait->count, 2); - wait->code = PRC_OK; - - credit_cb = diagpkt_complete; - credit_arg = wait; - } - -retry: - pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg); - if (!pbuf) { - if (trycount == 0) { - /* force a credit return and try again */ - sc_return_credits(sc); - trycount = 1; - goto retry; - } - /* - * No send buffer means no credit callback. Undo - * the wait set-up that was done above. We free wait - * because the callback will never be called. - */ - if (dp->flags & F_DIAGPKT_WAIT) { - sc_del_credit_return_intr(sc); - kfree(wait); - wait = NULL; - } - ret = -ENOSPC; - goto bail; - } - - pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len); - /* no flush needed as the HW knows the packet size */ - - ret = sizeof(*dp); - - if (dp->flags & F_DIAGPKT_WAIT) { - /* wait for credit return */ - ret = wait_for_completion_interruptible( - &wait->credits_returned); - /* - * If the wait returns an error, the wait was interrupted, - * e.g. with a ^C in the user program. The callback is - * still pending. This is OK as the wait structure is - * kmalloc'ed and the structure will free itself when - * all users are done with it. - * - * A context disable occurs on a send context restart, so - * include that in the list of errors below to check for. - * NOTE: PRC_FILL_ERR is at best informational and cannot - * be depended on. - */ - if (!ret && (((wait->code & PRC_STATUS_ERR) || - (wait->code & PRC_FILL_ERR) || - (wait->code & PRC_SC_DISABLE)))) - ret = -EIO; - - put_diagpkt_wait(wait); /* finished with the structure */ - sc_del_credit_return_intr(sc); - } - -bail: - vfree(tmpbuf); - return ret; -} - -static ssize_t diagpkt_write(struct file *fp, const char __user *data, - size_t count, loff_t *off) -{ - struct hfi1_devdata *dd; - struct send_context *sc; - u8 vl; - - struct diag_pkt dp; - - if (count != sizeof(dp)) - return -EINVAL; - - if (copy_from_user(&dp, data, sizeof(dp))) - return -EFAULT; - - /* - * The Send Context is derived from the PbcVL value - * if PBC is populated - */ - if (dp.pbc) { - dd = hfi1_lookup(dp.unit); - if (!dd) - return -ENODEV; - vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK; - sc = dd->vld[vl].sc; - if (sc) { - dp.sw_index = sc->sw_index; - hfi1_cdbg( - PKT, - "Packet sent over VL %d via Send Context %u(%u)", - vl, sc->sw_index, sc->hw_context); - } - } - - return diagpkt_send(&dp); -} - -static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name) -{ - int ret = 0; - - dd->hfi1_snoop.mode_flag = 0; - spin_lock_init(&dd->hfi1_snoop.snoop_lock); - INIT_LIST_HEAD(&dd->hfi1_snoop.queue); - init_waitqueue_head(&dd->hfi1_snoop.waitq); - - ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name, - &snoop_file_ops, - &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev, - false); - - if (ret) { - dd_dev_err(dd, "Couldn't create %s device: %d", name, ret); - hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, - &dd->hfi1_snoop.class_dev); - } - - return ret; -} - -static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in) -{ - int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE; - struct hfi1_devdata *dd; - - dd = hfi1_lookup(unit); - return dd; -} - -/* clear or restore send context integrity checks */ -static void adjust_integrity_checks(struct hfi1_devdata *dd) -{ - struct send_context *sc; - unsigned long sc_flags; - int i; - - spin_lock_irqsave(&dd->sc_lock, sc_flags); - for (i = 0; i < dd->num_send_contexts; i++) { - int enable; - - sc = dd->send_contexts[i].sc; - - if (!sc) - continue; /* not allocated */ - - enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) && - dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE; - - set_pio_integrity(sc); - - if (enable) /* take HFI_CAP_* flags into account */ - hfi1_init_ctxt(sc); - } - spin_unlock_irqrestore(&dd->sc_lock, sc_flags); -} - -static int hfi1_snoop_open(struct inode *in, struct file *fp) -{ - int ret; - int mode_flag = 0; - unsigned long flags = 0; - struct hfi1_devdata *dd; - struct list_head *queue; - - mutex_lock(&hfi1_mutex); - - dd = hfi1_dd_from_sc_inode(in); - if (!dd) { - ret = -ENODEV; - goto bail; - } - - /* - * File mode determines snoop or capture. Some existing user - * applications expect the capture device to be able to be opened RDWR - * because they expect a dedicated capture device. For this reason we - * support a module param to force capture mode even if the file open - * mode matches snoop. - */ - if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { - snoop_dbg("Capture Enabled"); - mode_flag = HFI1_PORT_CAPTURE_MODE; - } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) { - snoop_dbg("Snoop Enabled"); - mode_flag = HFI1_PORT_SNOOP_MODE; - } else { - snoop_dbg("Invalid"); - ret = -EINVAL; - goto bail; - } - queue = &dd->hfi1_snoop.queue; - - /* - * We are not supporting snoop and capture at the same time. - */ - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - if (dd->hfi1_snoop.mode_flag) { - ret = -EBUSY; - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - goto bail; - } - - dd->hfi1_snoop.mode_flag = mode_flag; - drain_snoop_list(queue); - - dd->hfi1_snoop.filter_callback = NULL; - dd->hfi1_snoop.filter_value = NULL; - - /* - * Send side packet integrity checks are not helpful when snooping so - * disable and re-enable when we stop snooping. - */ - if (mode_flag == HFI1_PORT_SNOOP_MODE) { - /* clear after snoop mode is on */ - adjust_integrity_checks(dd); /* clear */ - - /* - * We also do not want to be doing the DLID LMC check for - * ingressed packets. - */ - dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1); - write_csr(dd, DCC_CFG_PORT_CONFIG1, - (dd->hfi1_snoop.dcc_cfg >> 32) << 32); - } - - /* - * As soon as we set these function pointers the recv and send handlers - * are active. This is a race condition so we must make sure to drain - * the queue and init filter values above. Technically we should add - * locking here but all that will happen is on recv a packet will get - * allocated and get stuck on the snoop_lock before getting added to the - * queue. Same goes for send. - */ - dd->rhf_rcv_function_map = snoop_rhf_rcv_functions; - dd->process_pio_send = snoop_send_pio_handler; - dd->process_dma_send = snoop_send_pio_handler; - dd->pio_inline_send = snoop_inline_pio_send; - - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - ret = 0; - -bail: - mutex_unlock(&hfi1_mutex); - - return ret; -} - -static int hfi1_snoop_release(struct inode *in, struct file *fp) -{ - unsigned long flags = 0; - struct hfi1_devdata *dd; - int mode_flag; - - dd = hfi1_dd_from_sc_inode(in); - if (!dd) - return -ENODEV; - - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - - /* clear the snoop mode before re-adjusting send context CSRs */ - mode_flag = dd->hfi1_snoop.mode_flag; - dd->hfi1_snoop.mode_flag = 0; - - /* - * Drain the queue and clear the filters we are done with it. Don't - * forget to restore the packet integrity checks - */ - drain_snoop_list(&dd->hfi1_snoop.queue); - if (mode_flag == HFI1_PORT_SNOOP_MODE) { - /* restore after snoop mode is clear */ - adjust_integrity_checks(dd); /* restore */ - - /* - * Also should probably reset the DCC_CONFIG1 register for DLID - * checking on incoming packets again. Use the value saved when - * opening the snoop device. - */ - write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg); - } - - dd->hfi1_snoop.filter_callback = NULL; - kfree(dd->hfi1_snoop.filter_value); - dd->hfi1_snoop.filter_value = NULL; - - /* - * User is done snooping and capturing, return control to the normal - * handler. Re-enable SDMA handling. - */ - dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; - dd->process_pio_send = hfi1_verbs_send_pio; - dd->process_dma_send = hfi1_verbs_send_dma; - dd->pio_inline_send = pio_copy; - - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - - snoop_dbg("snoop/capture device released"); - - return 0; -} - -static unsigned int hfi1_snoop_poll(struct file *fp, - struct poll_table_struct *wait) -{ - int ret = 0; - unsigned long flags = 0; - - struct hfi1_devdata *dd; - - dd = hfi1_dd_from_sc_inode(fp->f_inode); - if (!dd) - return -ENODEV; - - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - - poll_wait(fp, &dd->hfi1_snoop.waitq, wait); - if (!list_empty(&dd->hfi1_snoop.queue)) - ret |= POLLIN | POLLRDNORM; - - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - return ret; -} - -static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data, - size_t count, loff_t *off) -{ - struct diag_pkt dpkt; - struct hfi1_devdata *dd; - size_t ret; - u8 byte_two, sl, sc5, sc4, vl, byte_one; - struct send_context *sc; - u32 len; - u64 pbc; - struct hfi1_ibport *ibp; - struct hfi1_pportdata *ppd; - - dd = hfi1_dd_from_sc_inode(fp->f_inode); - if (!dd) - return -ENODEV; - - ppd = dd->pport; - snoop_dbg("received %lu bytes from user", count); - - memset(&dpkt, 0, sizeof(struct diag_pkt)); - dpkt.version = _DIAG_PKT_VERS; - dpkt.unit = dd->unit; - dpkt.port = 1; - - if (likely(!(snoop_flags & SNOOP_USE_METADATA))) { - /* - * We need to generate the PBC and not let diagpkt_send do it, - * to do this we need the VL and the length in dwords. - * The VL can be determined by using the SL and looking up the - * SC. Then the SC can be converted into VL. The exception to - * this is those packets which are from an SMI queue pair. - * Since we can't detect anything about the QP here we have to - * rely on the SC. If its 0xF then we assume its SMI and - * do not look at the SL. - */ - if (copy_from_user(&byte_one, data, 1)) - return -EINVAL; - - if (copy_from_user(&byte_two, data + 1, 1)) - return -EINVAL; - - sc4 = (byte_one >> 4) & 0xf; - if (sc4 == 0xF) { - snoop_dbg("Detected VL15 packet ignoring SL in packet"); - vl = sc4; - } else { - sl = (byte_two >> 4) & 0xf; - ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1); - sc5 = ibp->sl_to_sc[sl]; - vl = sc_to_vlt(dd, sc5); - if (vl != sc4) { - snoop_dbg("VL %d does not match SC %d of packet", - vl, sc4); - return -EINVAL; - } - } - - sc = dd->vld[vl].sc; /* Look up the context based on VL */ - if (sc) { - dpkt.sw_index = sc->sw_index; - snoop_dbg("Sending on context %u(%u)", sc->sw_index, - sc->hw_context); - } else { - snoop_dbg("Could not find context for vl %d", vl); - return -EINVAL; - } - - len = (count >> 2) + 2; /* Add in PBC */ - pbc = create_pbc(ppd, 0, 0, vl, len); - } else { - if (copy_from_user(&pbc, data, sizeof(pbc))) - return -EINVAL; - vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK; - sc = dd->vld[vl].sc; /* Look up the context based on VL */ - if (sc) { - dpkt.sw_index = sc->sw_index; - } else { - snoop_dbg("Could not find context for vl %d", vl); - return -EINVAL; - } - data += sizeof(pbc); - count -= sizeof(pbc); - } - dpkt.len = count; - dpkt.data = (unsigned long)data; - - snoop_dbg("PBC: vl=0x%llx Length=0x%llx", - (pbc >> 12) & 0xf, - (pbc & 0xfff)); - - dpkt.pbc = pbc; - ret = diagpkt_send(&dpkt); - /* - * diagpkt_send only returns number of bytes in the diagpkt so patch - * that up here before returning. - */ - if (ret == sizeof(dpkt)) - return count; - - return ret; -} - -static ssize_t hfi1_snoop_read(struct file *fp, char __user *data, - size_t pkt_len, loff_t *off) -{ - ssize_t ret = 0; - unsigned long flags = 0; - struct snoop_packet *packet = NULL; - struct hfi1_devdata *dd; - - dd = hfi1_dd_from_sc_inode(fp->f_inode); - if (!dd) - return -ENODEV; - - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - - while (list_empty(&dd->hfi1_snoop.queue)) { - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - - if (fp->f_flags & O_NONBLOCK) - return -EAGAIN; - - if (wait_event_interruptible( - dd->hfi1_snoop.waitq, - !list_empty(&dd->hfi1_snoop.queue))) - return -EINTR; - - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - } - - if (!list_empty(&dd->hfi1_snoop.queue)) { - packet = list_entry(dd->hfi1_snoop.queue.next, - struct snoop_packet, list); - list_del(&packet->list); - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - if (pkt_len >= packet->total_len) { - if (copy_to_user(data, packet->data, - packet->total_len)) - ret = -EFAULT; - else - ret = packet->total_len; - } else { - ret = -EINVAL; - } - - kfree(packet); - } else { - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - } - - return ret; -} - -/** - * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others - * @ppd : ptr to hfi1 port data - * @value : options from user space - * - * Assumes the rest of the CM credit registers are zero from a - * previous global or credit reset. - * Leave shared count at zero for both global and all vls. - * In snoop mode ideally we don't use shared credits - * Reserve 8.5k for VL15 - * If total credits less than 8.5kbytes return error. - * Divide the rest of the credits across VL0 to VL7 and if - * each of these levels has less than 34 credits (at least 2048 + 128 bytes) - * return with an error. - * The credit registers will be reset to zero on link negotiation or link up - * so this function should be activated from user space only if the port has - * gone past link negotiation and link up. - * - * Return -- 0 if successful else error condition - * - */ -static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd, - int value) -{ -#define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */ - struct buffer_control t; - int i; - struct hfi1_devdata *dd = ppd->dd; - u16 total_credits = (value >> 16) & 0xffff; - u16 vl15_credits = dd->vl15_init / 2; - u16 per_vl_credits; - __be16 be_per_vl_credits; - - if (!(ppd->host_link_state & HLS_UP)) - goto err_exit; - if (total_credits < vl15_credits) - goto err_exit; - - per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL; - - if (per_vl_credits < OPA_MIN_PER_VL_CREDITS) - goto err_exit; - - memset(&t, 0, sizeof(t)); - be_per_vl_credits = cpu_to_be16(per_vl_credits); - - for (i = 0; i < TXE_NUM_DATA_VL; i++) - t.vl[i].dedicated = be_per_vl_credits; - - t.vl[15].dedicated = cpu_to_be16(vl15_credits); - return set_buffer_control(ppd, &t); - -err_exit: - snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d", - ppd->host_link_state, total_credits, vl15_credits); - - return -EINVAL; -} - -static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) -{ - struct hfi1_devdata *dd; - void *filter_value = NULL; - long ret = 0; - int value = 0; - u8 phys_state = 0; - u8 link_state = 0; - u16 dev_state = 0; - unsigned long flags = 0; - unsigned long *argp = NULL; - struct hfi1_packet_filter_command filter_cmd = {0}; - int mode_flag = 0; - struct hfi1_pportdata *ppd = NULL; - unsigned int index; - struct hfi1_link_info link_info; - int read_cmd, write_cmd, read_ok, write_ok; - - dd = hfi1_dd_from_sc_inode(fp->f_inode); - if (!dd) - return -ENODEV; - - mode_flag = dd->hfi1_snoop.mode_flag; - read_cmd = _IOC_DIR(cmd) & _IOC_READ; - write_cmd = _IOC_DIR(cmd) & _IOC_WRITE; - write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); - read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); - - if ((read_cmd && !write_ok) || (write_cmd && !read_ok)) - return -EFAULT; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if ((mode_flag & HFI1_PORT_CAPTURE_MODE) && - (cmd != HFI1_SNOOP_IOCCLEARQUEUE) && - (cmd != HFI1_SNOOP_IOCCLEARFILTER) && - (cmd != HFI1_SNOOP_IOCSETFILTER)) - /* Capture devices are allowed only 3 operations - * 1.Clear capture queue - * 2.Clear capture filter - * 3.Set capture filter - * Other are invalid. - */ - return -EINVAL; - - switch (cmd) { - case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA: - memset(&link_info, 0, sizeof(link_info)); - - if (copy_from_user(&link_info, - (struct hfi1_link_info __user *)arg, - sizeof(link_info))) - return -EFAULT; - - value = link_info.port_state; - index = link_info.port_number; - if (index > dd->num_pports - 1) - return -EINVAL; - - ppd = &dd->pport[index]; - if (!ppd) - return -EINVAL; - - /* What we want to transition to */ - phys_state = (value >> 4) & 0xF; - link_state = value & 0xF; - snoop_dbg("Setting link state 0x%x", value); - - switch (link_state) { - case IB_PORT_NOP: - if (phys_state == 0) - break; - /* fall through */ - case IB_PORT_DOWN: - switch (phys_state) { - case 0: - dev_state = HLS_DN_DOWNDEF; - break; - case 2: - dev_state = HLS_DN_POLL; - break; - case 3: - dev_state = HLS_DN_DISABLE; - break; - default: - return -EINVAL; - } - ret = set_link_state(ppd, dev_state); - break; - case IB_PORT_ARMED: - ret = set_link_state(ppd, HLS_UP_ARMED); - if (!ret) - send_idle_sma(dd, SMA_IDLE_ARM); - break; - case IB_PORT_ACTIVE: - ret = set_link_state(ppd, HLS_UP_ACTIVE); - if (!ret) - send_idle_sma(dd, SMA_IDLE_ACTIVE); - break; - default: - return -EINVAL; - } - - if (ret) - break; - /* fall through */ - case HFI1_SNOOP_IOCGETLINKSTATE: - case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA: - if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) { - memset(&link_info, 0, sizeof(link_info)); - if (copy_from_user(&link_info, - (struct hfi1_link_info __user *)arg, - sizeof(link_info))) - return -EFAULT; - index = link_info.port_number; - } else { - ret = __get_user(index, (int __user *)arg); - if (ret != 0) - break; - } - - if (index > dd->num_pports - 1) - return -EINVAL; - - ppd = &dd->pport[index]; - if (!ppd) - return -EINVAL; - - value = hfi1_ibphys_portstate(ppd); - value <<= 4; - value |= driver_lstate(ppd); - - snoop_dbg("Link port | Link State: %d", value); - - if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) || - (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) { - link_info.port_state = value; - link_info.node_guid = cpu_to_be64(ppd->guid); - link_info.link_speed_active = - ppd->link_speed_active; - link_info.link_width_active = - ppd->link_width_active; - if (copy_to_user((struct hfi1_link_info __user *)arg, - &link_info, sizeof(link_info))) - return -EFAULT; - } else { - ret = __put_user(value, (int __user *)arg); - } - break; - - case HFI1_SNOOP_IOCCLEARQUEUE: - snoop_dbg("Clearing snoop queue"); - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - drain_snoop_list(&dd->hfi1_snoop.queue); - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - break; - - case HFI1_SNOOP_IOCCLEARFILTER: - snoop_dbg("Clearing filter"); - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - if (dd->hfi1_snoop.filter_callback) { - /* Drain packets first */ - drain_snoop_list(&dd->hfi1_snoop.queue); - dd->hfi1_snoop.filter_callback = NULL; - } - kfree(dd->hfi1_snoop.filter_value); - dd->hfi1_snoop.filter_value = NULL; - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - break; - - case HFI1_SNOOP_IOCSETFILTER: - snoop_dbg("Setting filter"); - /* just copy command structure */ - argp = (unsigned long *)arg; - if (copy_from_user(&filter_cmd, (void __user *)argp, - sizeof(filter_cmd))) - return -EFAULT; - - if (filter_cmd.opcode >= HFI1_MAX_FILTERS) { - pr_alert("Invalid opcode in request\n"); - return -EINVAL; - } - - snoop_dbg("Opcode %d Len %d Ptr %p", - filter_cmd.opcode, filter_cmd.length, - filter_cmd.value_ptr); - - filter_value = kcalloc(filter_cmd.length, sizeof(u8), - GFP_KERNEL); - if (!filter_value) - return -ENOMEM; - - /* copy remaining data from userspace */ - if (copy_from_user((u8 *)filter_value, - (void __user *)filter_cmd.value_ptr, - filter_cmd.length)) { - kfree(filter_value); - return -EFAULT; - } - /* Drain packets first */ - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - drain_snoop_list(&dd->hfi1_snoop.queue); - dd->hfi1_snoop.filter_callback = - hfi1_filters[filter_cmd.opcode].filter; - /* just in case we see back to back sets */ - kfree(dd->hfi1_snoop.filter_value); - dd->hfi1_snoop.filter_value = filter_value; - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - break; - case HFI1_SNOOP_IOCGETVERSION: - value = SNOOP_CAPTURE_VERSION; - snoop_dbg("Getting version: %d", value); - ret = __put_user(value, (int __user *)arg); - break; - case HFI1_SNOOP_IOCSET_OPTS: - snoop_flags = 0; - ret = __get_user(value, (int __user *)arg); - if (ret != 0) - break; - - snoop_dbg("Setting snoop option %d", value); - if (value & SNOOP_DROP_SEND) - snoop_flags |= SNOOP_DROP_SEND; - if (value & SNOOP_USE_METADATA) - snoop_flags |= SNOOP_USE_METADATA; - if (value & (SNOOP_SET_VL0TOVL15)) { - ppd = &dd->pport[0]; /* first port will do */ - ret = hfi1_assign_snoop_link_credits(ppd, value); - } - break; - default: - return -ENOTTY; - } - - return ret; -} - -static void snoop_list_add_tail(struct snoop_packet *packet, - struct hfi1_devdata *dd) -{ - unsigned long flags = 0; - - spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); - if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) || - (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) { - list_add_tail(&packet->list, &dd->hfi1_snoop.queue); - snoop_dbg("Added packet to list"); - } - - /* - * Technically we can could have closed the snoop device while waiting - * on the above lock and it is gone now. The snoop mode_flag will - * prevent us from adding the packet to the queue though. - */ - - spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); - wake_up_interruptible(&dd->hfi1_snoop.waitq); -} - -static inline int hfi1_filter_check(void *val, const char *msg) -{ - if (!val) { - snoop_dbg("Error invalid %s value for filter", msg); - return HFI1_FILTER_ERR; - } - return 0; -} - -static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value) -{ - struct hfi1_ib_header *hdr; - int ret; - - ret = hfi1_filter_check(ibhdr, "header"); - if (ret) - return ret; - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - hdr = (struct hfi1_ib_header *)ibhdr; - - if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */ - return HFI1_FILTER_HIT; /* matched */ - - return HFI1_FILTER_MISS; /* Not matched */ -} - -static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value) -{ - struct hfi1_ib_header *hdr; - int ret; - - ret = hfi1_filter_check(ibhdr, "header"); - if (ret) - return ret; - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - - hdr = (struct hfi1_ib_header *)ibhdr; - - if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1])) - return HFI1_FILTER_HIT; - - return HFI1_FILTER_MISS; -} - -/* Not valid for outgoing packets, send handler passes null for data*/ -static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data, - void *value) -{ - struct hfi1_ib_header *hdr; - struct hfi1_other_headers *ohdr = NULL; - struct ib_smp *smp = NULL; - u32 qpn = 0; - int ret; - - ret = hfi1_filter_check(ibhdr, "header"); - if (ret) - return ret; - ret = hfi1_filter_check(packet_data, "packet_data"); - if (ret) - return ret; - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - - hdr = (struct hfi1_ib_header *)ibhdr; - - /* Check for GRH */ - if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; /* LRH + BTH + DETH */ - else - ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */ - - qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF; - if (qpn <= 1) { - smp = (struct ib_smp *)packet_data; - if (*((u8 *)value) == smp->mgmt_class) - return HFI1_FILTER_HIT; - else - return HFI1_FILTER_MISS; - } - return HFI1_FILTER_ERR; -} - -static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value) -{ - struct hfi1_ib_header *hdr; - struct hfi1_other_headers *ohdr = NULL; - int ret; - - ret = hfi1_filter_check(ibhdr, "header"); - if (ret) - return ret; - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - - hdr = (struct hfi1_ib_header *)ibhdr; - - /* Check for GRH */ - if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; /* LRH + BTH + DETH */ - else - ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */ - if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF)) - return HFI1_FILTER_HIT; - - return HFI1_FILTER_MISS; -} - -static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data, - void *value) -{ - u32 lnh = 0; - u8 opcode = 0; - struct hfi1_ib_header *hdr; - struct hfi1_other_headers *ohdr = NULL; - int ret; - - ret = hfi1_filter_check(ibhdr, "header"); - if (ret) - return ret; - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - - hdr = (struct hfi1_ib_header *)ibhdr; - - lnh = (be16_to_cpu(hdr->lrh[0]) & 3); - - if (lnh == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; - else if (lnh == HFI1_LRH_GRH) - ohdr = &hdr->u.l.oth; - else - return HFI1_FILTER_ERR; - - opcode = be32_to_cpu(ohdr->bth[0]) >> 24; - - if (*((u8 *)value) == ((opcode >> 5) & 0x7)) - return HFI1_FILTER_HIT; - - return HFI1_FILTER_MISS; -} - -static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data, - void *value) -{ - struct hfi1_ib_header *hdr; - int ret; - - ret = hfi1_filter_check(ibhdr, "header"); - if (ret) - return ret; - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - - hdr = (struct hfi1_ib_header *)ibhdr; - - if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF)) - return HFI1_FILTER_HIT; - - return HFI1_FILTER_MISS; -} - -static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value) -{ - u32 lnh = 0; - struct hfi1_ib_header *hdr; - struct hfi1_other_headers *ohdr = NULL; - int ret; - - ret = hfi1_filter_check(ibhdr, "header"); - if (ret) - return ret; - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - - hdr = (struct hfi1_ib_header *)ibhdr; - - lnh = (be16_to_cpu(hdr->lrh[0]) & 3); - if (lnh == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; - else if (lnh == HFI1_LRH_GRH) - ohdr = &hdr->u.l.oth; - else - return HFI1_FILTER_ERR; - - /* P_key is 16-bit entity, however top most bit indicates - * type of membership. 0 for limited and 1 for Full. - * Limited members cannot accept information from other - * Limited members, but communication is allowed between - * every other combination of membership. - * Hence we'll omit comparing top-most bit while filtering - */ - - if ((*(u16 *)value & 0x7FFF) == - ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF)) - return HFI1_FILTER_HIT; - - return HFI1_FILTER_MISS; -} - -/* - * If packet_data is NULL then this is coming from one of the send functions. - * Thus we know if its an ingressed or egressed packet. - */ -static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value) -{ - u8 user_dir = *(u8 *)value; - int ret; - - ret = hfi1_filter_check(value, "user"); - if (ret) - return ret; - - if (packet_data) { - /* Incoming packet */ - if (user_dir & HFI1_SNOOP_INGRESS) - return HFI1_FILTER_HIT; - } else { - /* Outgoing packet */ - if (user_dir & HFI1_SNOOP_EGRESS) - return HFI1_FILTER_HIT; - } - - return HFI1_FILTER_MISS; -} - -/* - * Allocate a snoop packet. The structure that is stored in the ring buffer, not - * to be confused with an hfi packet type. - */ -static struct snoop_packet *allocate_snoop_packet(u32 hdr_len, - u32 data_len, - u32 md_len) -{ - struct snoop_packet *packet; - - packet = kzalloc(sizeof(*packet) + hdr_len + data_len - + md_len, - GFP_ATOMIC | __GFP_NOWARN); - if (likely(packet)) - INIT_LIST_HEAD(&packet->list); - - return packet; -} - -/* - * Instead of having snoop and capture code intermixed with the recv functions, - * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call - * and land in here for snoop/capture but if not enabled the call will go - * through as before. This gives us a single point to constrain all of the snoop - * snoop recv logic. There is nothing special that needs to happen for bypass - * packets. This routine should not try to look into the packet. It just copied - * it. There is no guarantee for filters when it comes to bypass packets as - * there is no specific support. Bottom line is this routine does now even know - * what a bypass packet is. - */ -int snoop_recv_handler(struct hfi1_packet *packet) -{ - struct hfi1_pportdata *ppd = packet->rcd->ppd; - struct hfi1_ib_header *hdr = packet->hdr; - int header_size = packet->hlen; - void *data = packet->ebuf; - u32 tlen = packet->tlen; - struct snoop_packet *s_packet = NULL; - int ret; - int snoop_mode = 0; - u32 md_len = 0; - struct capture_md md; - - snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen, - data); - - trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size, - data); - - if (!ppd->dd->hfi1_snoop.filter_callback) { - snoop_dbg("filter not set"); - ret = HFI1_FILTER_HIT; - } else { - ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data, - ppd->dd->hfi1_snoop.filter_value); - } - - switch (ret) { - case HFI1_FILTER_ERR: - snoop_dbg("Error in filter call"); - break; - case HFI1_FILTER_MISS: - snoop_dbg("Filter Miss"); - break; - case HFI1_FILTER_HIT: - - if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) - snoop_mode = 1; - if ((snoop_mode == 0) || - unlikely(snoop_flags & SNOOP_USE_METADATA)) - md_len = sizeof(struct capture_md); - - s_packet = allocate_snoop_packet(header_size, - tlen - header_size, - md_len); - - if (unlikely(!s_packet)) { - dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n"); - break; - } - - if (md_len > 0) { - memset(&md, 0, sizeof(struct capture_md)); - md.port = 1; - md.dir = PKT_DIR_INGRESS; - md.u.rhf = packet->rhf; - memcpy(s_packet->data, &md, md_len); - } - - /* We should always have a header */ - if (hdr) { - memcpy(s_packet->data + md_len, hdr, header_size); - } else { - dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n"); - kfree(s_packet); - break; - } - - /* - * Packets with no data are possible. If there is no data needed - * to take care of the last 4 bytes which are normally included - * with data buffers and are included in tlen. Since we kzalloc - * the buffer we do not need to set any values but if we decide - * not to use kzalloc we should zero them. - */ - if (data) - memcpy(s_packet->data + header_size + md_len, data, - tlen - header_size); - - s_packet->total_len = tlen + md_len; - snoop_list_add_tail(s_packet, ppd->dd); - - /* - * If we are snooping the packet not capturing then throw away - * after adding to the list. - */ - snoop_dbg("Capturing packet"); - if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) { - snoop_dbg("Throwing packet away"); - /* - * If we are dropping the packet we still may need to - * handle the case where error flags are set, this is - * normally done by the type specific handler but that - * won't be called in this case. - */ - if (unlikely(rhf_err_flags(packet->rhf))) - handle_eflags(packet); - - /* throw the packet on the floor */ - return RHF_RCV_CONTINUE; - } - break; - default: - break; - } - - /* - * We do not care what type of packet came in here - just pass it off - * to the normal handler. - */ - return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)] - (packet); -} - -/* - * Handle snooping and capturing packets when sdma is being used. - */ -int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, - u64 pbc) -{ - pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n"); - snoop_dbg("Unsupported Operation"); - return hfi1_verbs_send_dma(qp, ps, 0); -} - -/* - * Handle snooping and capturing packets when pio is being used. Does not handle - * bypass packets. The only way to send a bypass packet currently is to use the - * diagpkt interface. When that interface is enable snoop/capture is not. - */ -int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, - u64 pbc) -{ - u32 hdrwords = qp->s_hdrwords; - struct rvt_sge_state *ss = qp->s_cur_sge; - u32 len = qp->s_cur_size; - u32 dwords = (len + 3) >> 2; - u32 plen = hdrwords + dwords + 2; /* includes pbc */ - struct hfi1_pportdata *ppd = ps->ppd; - struct snoop_packet *s_packet = NULL; - u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr; - u32 length = 0; - struct rvt_sge_state temp_ss; - void *data = NULL; - void *data_start = NULL; - int ret; - int snoop_mode = 0; - int md_len = 0; - struct capture_md md; - u32 vl; - u32 hdr_len = hdrwords << 2; - u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr); - - md.u.pbc = 0; - - snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u", - hdrwords, len, plen, dwords, tlen); - if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) - snoop_mode = 1; - if ((snoop_mode == 0) || - unlikely(snoop_flags & SNOOP_USE_METADATA)) - md_len = sizeof(struct capture_md); - - /* not using ss->total_len as arg 2 b/c that does not count CRC */ - s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len); - - if (unlikely(!s_packet)) { - dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n"); - goto out; - } - - s_packet->total_len = tlen + md_len; - - if (md_len > 0) { - memset(&md, 0, sizeof(struct capture_md)); - md.port = 1; - md.dir = PKT_DIR_EGRESS; - if (likely(pbc == 0)) { - vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12; - md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen); - } else { - md.u.pbc = 0; - } - memcpy(s_packet->data, &md, md_len); - } else { - md.u.pbc = pbc; - } - - /* Copy header */ - if (likely(hdr)) { - memcpy(s_packet->data + md_len, hdr, hdr_len); - } else { - dd_dev_err(ppd->dd, - "Unable to copy header to snoop/capture packet\n"); - kfree(s_packet); - goto out; - } - - if (ss) { - data = s_packet->data + hdr_len + md_len; - data_start = data; - - /* - * Copy SGE State - * The update_sge() function below will not modify the - * individual SGEs in the array. It will make a copy each time - * and operate on that. So we only need to copy this instance - * and it won't impact PIO. - */ - temp_ss = *ss; - length = len; - - snoop_dbg("Need to copy %d bytes", length); - while (length) { - void *addr = temp_ss.sge.vaddr; - u32 slen = temp_ss.sge.length; - - if (slen > length) { - slen = length; - snoop_dbg("slen %d > len %d", slen, length); - } - snoop_dbg("copy %d to %p", slen, addr); - memcpy(data, addr, slen); - update_sge(&temp_ss, slen); - length -= slen; - data += slen; - snoop_dbg("data is now %p bytes left %d", data, length); - } - snoop_dbg("Completed SGE copy"); - } - - /* - * Why do the filter check down here? Because the event tracing has its - * own filtering and we need to have the walked the SGE list. - */ - if (!ppd->dd->hfi1_snoop.filter_callback) { - snoop_dbg("filter not set\n"); - ret = HFI1_FILTER_HIT; - } else { - ret = ppd->dd->hfi1_snoop.filter_callback( - &ps->s_txreq->phdr.hdr, - NULL, - ppd->dd->hfi1_snoop.filter_value); - } - - switch (ret) { - case HFI1_FILTER_ERR: - snoop_dbg("Error in filter call"); - /* fall through */ - case HFI1_FILTER_MISS: - snoop_dbg("Filter Miss"); - kfree(s_packet); - break; - case HFI1_FILTER_HIT: - snoop_dbg("Capturing packet"); - snoop_list_add_tail(s_packet, ppd->dd); - - if (unlikely((snoop_flags & SNOOP_DROP_SEND) && - (ppd->dd->hfi1_snoop.mode_flag & - HFI1_PORT_SNOOP_MODE))) { - unsigned long flags; - - snoop_dbg("Dropping packet"); - if (qp->s_wqe) { - spin_lock_irqsave(&qp->s_lock, flags); - hfi1_send_complete( - qp, - qp->s_wqe, - IB_WC_SUCCESS); - spin_unlock_irqrestore(&qp->s_lock, flags); - } else if (qp->ibqp.qp_type == IB_QPT_RC) { - spin_lock_irqsave(&qp->s_lock, flags); - hfi1_rc_send_complete(qp, - &ps->s_txreq->phdr.hdr); - spin_unlock_irqrestore(&qp->s_lock, flags); - } - - /* - * If snoop is dropping the packet we need to put the - * txreq back because no one else will. - */ - hfi1_put_txreq(ps->s_txreq); - return 0; - } - break; - default: - kfree(s_packet); - break; - } -out: - return hfi1_verbs_send_pio(qp, ps, md.u.pbc); -} - -/* - * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently - * this can be used anywhere, but the intention is for inline ACKs for RC and - * CCA packets. We don't restrict this usage though. - */ -void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, - u64 pbc, const void *from, size_t count) -{ - int snoop_mode = 0; - int md_len = 0; - struct capture_md md; - struct snoop_packet *s_packet = NULL; - - /* - * count is in dwords so we need to convert to bytes. - * We also need to account for CRC which would be tacked on by hardware. - */ - int packet_len = (count << 2) + 4; - int ret; - - snoop_dbg("ACK OUT: len %d", packet_len); - - if (!dd->hfi1_snoop.filter_callback) { - snoop_dbg("filter not set"); - ret = HFI1_FILTER_HIT; - } else { - ret = dd->hfi1_snoop.filter_callback( - (struct hfi1_ib_header *)from, - NULL, - dd->hfi1_snoop.filter_value); - } - - switch (ret) { - case HFI1_FILTER_ERR: - snoop_dbg("Error in filter call"); - /* fall through */ - case HFI1_FILTER_MISS: - snoop_dbg("Filter Miss"); - break; - case HFI1_FILTER_HIT: - snoop_dbg("Capturing packet"); - if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) - snoop_mode = 1; - if ((snoop_mode == 0) || - unlikely(snoop_flags & SNOOP_USE_METADATA)) - md_len = sizeof(struct capture_md); - - s_packet = allocate_snoop_packet(packet_len, 0, md_len); - - if (unlikely(!s_packet)) { - dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n"); - goto inline_pio_out; - } - - s_packet->total_len = packet_len + md_len; - - /* Fill in the metadata for the packet */ - if (md_len > 0) { - memset(&md, 0, sizeof(struct capture_md)); - md.port = 1; - md.dir = PKT_DIR_EGRESS; - md.u.pbc = pbc; - memcpy(s_packet->data, &md, md_len); - } - - /* Add the packet data which is a single buffer */ - memcpy(s_packet->data + md_len, from, packet_len); - - snoop_list_add_tail(s_packet, dd); - - if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) { - snoop_dbg("Dropping packet"); - return; - } - break; - default: - break; - } - -inline_pio_out: - pio_copy(dd, pbuf, pbc, from, count); -} diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c deleted file mode 100644 index bd8771570f81..000000000000 --- a/drivers/staging/rdma/hfi1/eprom.c +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Copyright(c) 2015, 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include <linux/delay.h> -#include "hfi.h" -#include "common.h" -#include "eprom.h" - -/* - * The EPROM is logically divided into three partitions: - * partition 0: the first 128K, visible from PCI ROM BAR - * partition 1: 4K config file (sector size) - * partition 2: the rest - */ -#define P0_SIZE (128 * 1024) -#define P1_SIZE (4 * 1024) -#define P1_START P0_SIZE -#define P2_START (P0_SIZE + P1_SIZE) - -/* erase sizes supported by the controller */ -#define SIZE_4KB (4 * 1024) -#define MASK_4KB (SIZE_4KB - 1) - -#define SIZE_32KB (32 * 1024) -#define MASK_32KB (SIZE_32KB - 1) - -#define SIZE_64KB (64 * 1024) -#define MASK_64KB (SIZE_64KB - 1) - -/* controller page size, in bytes */ -#define EP_PAGE_SIZE 256 -#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1) - -/* controller commands */ -#define CMD_SHIFT 24 -#define CMD_NOP (0) -#define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr) -#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr) -#define CMD_READ_SR1 ((0x05 << CMD_SHIFT)) -#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT)) -#define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr) -#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr) -#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT)) -#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT)) -#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT)) -#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr) - -/* controller interface speeds */ -#define EP_SPEED_FULL 0x2 /* full speed */ - -/* controller status register 1 bits */ -#define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */ - -/* sleep length while waiting for controller */ -#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */ -#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US)) - -/* GPIO pins */ -#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */ - -/* - * How long to wait for the EPROM to become available, in ms. - * The spec 32 Mb EPROM takes around 40s to erase then write. - * Double it for safety. - */ -#define EPROM_TIMEOUT 80000 /* ms */ - -/* - * Turn on external enable line that allows writing on the flash. - */ -static void write_enable(struct hfi1_devdata *dd) -{ - /* raise signal */ - write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N); - /* raise enable */ - write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N); -} - -/* - * Turn off external enable line that allows writing on the flash. - */ -static void write_disable(struct hfi1_devdata *dd) -{ - /* lower signal */ - write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N); - /* lower enable */ - write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N); -} - -/* - * Wait for the device to become not busy. Must be called after all - * write or erase operations. - */ -static int wait_for_not_busy(struct hfi1_devdata *dd) -{ - unsigned long count = 0; - u64 reg; - int ret = 0; - - /* starts page mode */ - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1); - while (1) { - udelay(WAIT_SLEEP_US); - usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5); - count++; - reg = read_csr(dd, ASIC_EEP_DATA); - if ((reg & SR1_BUSY) == 0) - break; - /* 200s is the largest time for a 128Mb device */ - if (count > COUNT_DELAY_SEC(200)) { - dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n"); - ret = -ETIMEDOUT; - break; /* break, not goto - must stop page mode */ - } - } - - /* stop page mode with a NOP */ - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); - - return ret; -} - -/* - * Read the device ID from the SPI controller. - */ -static u32 read_device_id(struct hfi1_devdata *dd) -{ - /* read the Manufacture Device ID */ - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID); - return (u32)read_csr(dd, ASIC_EEP_DATA); -} - -/* - * Erase the whole flash. - */ -static int erase_chip(struct hfi1_devdata *dd) -{ - int ret; - - write_enable(dd); - - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE); - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE); - ret = wait_for_not_busy(dd); - - write_disable(dd); - - return ret; -} - -/* - * Erase a range. - */ -static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len) -{ - u32 end = start + len; - int ret = 0; - - if (end < start) - return -EINVAL; - - /* check the end points for the minimum erase */ - if ((start & MASK_4KB) || (end & MASK_4KB)) { - dd_dev_err(dd, - "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n", - __func__, start, end); - return -EINVAL; - } - - write_enable(dd); - - while (start < end) { - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE); - /* check in order of largest to smallest */ - if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) { - write_csr(dd, ASIC_EEP_ADDR_CMD, - CMD_SECTOR_ERASE_64KB(start)); - start += SIZE_64KB; - } else if (((start & MASK_32KB) == 0) && - (start + SIZE_32KB <= end)) { - write_csr(dd, ASIC_EEP_ADDR_CMD, - CMD_SECTOR_ERASE_32KB(start)); - start += SIZE_32KB; - } else { /* 4KB will work */ - write_csr(dd, ASIC_EEP_ADDR_CMD, - CMD_SECTOR_ERASE_4KB(start)); - start += SIZE_4KB; - } - ret = wait_for_not_busy(dd); - if (ret) - goto done; - } - -done: - write_disable(dd); - - return ret; -} - -/* - * Read a 256 byte (64 dword) EPROM page. - * All callers have verified the offset is at a page boundary. - */ -static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result) -{ - int i; - - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset)); - for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++) - result[i] = (u32)read_csr(dd, ASIC_EEP_DATA); - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */ -} - -/* - * Read length bytes starting at offset. Copy to user address addr. - */ -static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) -{ - u32 offset; - u32 buffer[EP_PAGE_SIZE / sizeof(u32)]; - int ret = 0; - - /* reject anything not on an EPROM page boundary */ - if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK)) - return -EINVAL; - - for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { - read_page(dd, start + offset, buffer); - if (copy_to_user((void __user *)(addr + offset), - buffer, EP_PAGE_SIZE)) { - ret = -EFAULT; - goto done; - } - } - -done: - return ret; -} - -/* - * Write a 256 byte (64 dword) EPROM page. - * All callers have verified the offset is at a page boundary. - */ -static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data) -{ - int i; - - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE); - write_csr(dd, ASIC_EEP_DATA, data[0]); - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset)); - for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++) - write_csr(dd, ASIC_EEP_DATA, data[i]); - /* will close the open page */ - return wait_for_not_busy(dd); -} - -/* - * Write length bytes starting at offset. Read from user address addr. - */ -static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) -{ - u32 offset; - u32 buffer[EP_PAGE_SIZE / sizeof(u32)]; - int ret = 0; - - /* reject anything not on an EPROM page boundary */ - if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK)) - return -EINVAL; - - write_enable(dd); - - for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { - if (copy_from_user(buffer, (void __user *)(addr + offset), - EP_PAGE_SIZE)) { - ret = -EFAULT; - goto done; - } - ret = write_page(dd, start + offset, buffer); - if (ret) - goto done; - } - -done: - write_disable(dd); - return ret; -} - -/* convert an range composite to a length, in bytes */ -static inline u32 extract_rlen(u32 composite) -{ - return (composite & 0xffff) * EP_PAGE_SIZE; -} - -/* convert an range composite to a start, in bytes */ -static inline u32 extract_rstart(u32 composite) -{ - return (composite >> 16) * EP_PAGE_SIZE; -} - -/* - * Perform the given operation on the EPROM. Called from user space. The - * user credentials have already been checked. - * - * Return 0 on success, -ERRNO on error - */ -int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) -{ - struct hfi1_devdata *dd; - u32 dev_id; - u32 rlen; /* range length */ - u32 rstart; /* range start */ - int i_minor; - int ret = 0; - - /* - * Map the device file to device data using the relative minor. - * The device file minor number is the unit number + 1. 0 is - * the generic device file - reject it. - */ - i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; - if (i_minor <= 0) - return -EINVAL; - dd = hfi1_lookup(i_minor - 1); - if (!dd) { - pr_err("%s: cannot find unit %d!\n", __func__, i_minor); - return -EINVAL; - } - - /* some devices do not have an EPROM */ - if (!dd->eprom_available) - return -EOPNOTSUPP; - - ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); - if (ret) { - dd_dev_err(dd, "%s: unable to acquire EPROM resource\n", - __func__); - goto done_asic; - } - - dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n", - __func__, cmd->type, cmd->len, cmd->addr); - - switch (cmd->type) { - case HFI1_CMD_EP_INFO: - if (cmd->len != sizeof(u32)) { - ret = -ERANGE; - break; - } - dev_id = read_device_id(dd); - /* addr points to a u32 user buffer */ - if (copy_to_user((void __user *)cmd->addr, &dev_id, - sizeof(u32))) - ret = -EFAULT; - break; - - case HFI1_CMD_EP_ERASE_CHIP: - ret = erase_chip(dd); - break; - - case HFI1_CMD_EP_ERASE_RANGE: - rlen = extract_rlen(cmd->len); - rstart = extract_rstart(cmd->len); - ret = erase_range(dd, rstart, rlen); - break; - - case HFI1_CMD_EP_READ_RANGE: - rlen = extract_rlen(cmd->len); - rstart = extract_rstart(cmd->len); - ret = read_length(dd, rstart, rlen, cmd->addr); - break; - - case HFI1_CMD_EP_WRITE_RANGE: - rlen = extract_rlen(cmd->len); - rstart = extract_rstart(cmd->len); - ret = write_length(dd, rstart, rlen, cmd->addr); - break; - - default: - dd_dev_err(dd, "%s: unexpected command %d\n", - __func__, cmd->type); - ret = -EINVAL; - break; - } - - release_chip_resource(dd, CR_EPROM); -done_asic: - return ret; -} - -/* - * Initialize the EPROM handler. - */ -int eprom_init(struct hfi1_devdata *dd) -{ - int ret = 0; - - /* only the discrete chip has an EPROM */ - if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0) - return 0; - - /* - * It is OK if both HFIs reset the EPROM as long as they don't - * do it at the same time. - */ - ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); - if (ret) { - dd_dev_err(dd, - "%s: unable to acquire EPROM resource, no EPROM support\n", - __func__); - goto done_asic; - } - - /* reset EPROM to be sure it is in a good state */ - - /* set reset */ - write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK); - /* clear reset, set speed */ - write_csr(dd, ASIC_EEP_CTL_STAT, - EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); - - /* wake the device with command "release powerdown NoID" */ - write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); - - dd->eprom_available = true; - release_chip_resource(dd, CR_EPROM); -done_asic: - return ret; -} diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig index 8345fb457a40..bbdbf9c4e93a 100644 --- a/drivers/target/iscsi/Kconfig +++ b/drivers/target/iscsi/Kconfig @@ -7,3 +7,5 @@ config ISCSI_TARGET help Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI Target Mode Stack. + +source "drivers/target/iscsi/cxgbit/Kconfig" diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile index 0f43be9c3453..0f18295e05bc 100644 --- a/drivers/target/iscsi/Makefile +++ b/drivers/target/iscsi/Makefile @@ -18,3 +18,4 @@ iscsi_target_mod-y += iscsi_target_parameters.o \ iscsi_target_transport.o obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o +obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/ diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig new file mode 100644 index 000000000000..c9b6a3c758b1 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/Kconfig @@ -0,0 +1,7 @@ +config ISCSI_TARGET_CXGB4 + tristate "Chelsio iSCSI target offload driver" + depends on ISCSI_TARGET && CHELSIO_T4 && INET + select CHELSIO_T4_UWIRE + ---help--- + To compile this driver as module, choose M here: the module + will be called cxgbit. diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile new file mode 100644 index 000000000000..bd56c073dff6 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/Makefile @@ -0,0 +1,6 @@ +ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 +ccflags-y += -Idrivers/target/iscsi + +obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o + +cxgbit-y := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h new file mode 100644 index 000000000000..625c7f6de6b2 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit.h @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CXGBIT_H__ +#define __CXGBIT_H__ + +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/idr.h> +#include <linux/completion.h> +#include <linux/netdevice.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/inet.h> +#include <linux/wait.h> +#include <linux/kref.h> +#include <linux/timer.h> +#include <linux/io.h> + +#include <asm/byteorder.h> + +#include <net/net_namespace.h> + +#include <target/iscsi/iscsi_transport.h> +#include <iscsi_target_parameters.h> +#include <iscsi_target_login.h> + +#include "t4_regs.h" +#include "t4_msg.h" +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "l2t.h" +#include "cxgb4_ppm.h" +#include "cxgbit_lro.h" + +extern struct mutex cdev_list_lock; +extern struct list_head cdev_list_head; +struct cxgbit_np; + +struct cxgbit_sock; + +struct cxgbit_cmd { + struct scatterlist sg; + struct cxgbi_task_tag_info ttinfo; + bool setup_ddp; + bool release; +}; + +#define CXGBIT_MAX_ISO_PAYLOAD \ + min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535) + +struct cxgbit_iso_info { + u8 flags; + u32 mpdu; + u32 len; + u32 burst_len; +}; + +enum cxgbit_skcb_flags { + SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */ + SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */ + SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */ + SKCBF_RX_LRO = (1 << 3), /* lro skb */ +}; + +struct cxgbit_skb_rx_cb { + u8 opcode; + void *pdu_cb; + void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *); +}; + +struct cxgbit_skb_tx_cb { + u8 submode; + u32 extra_len; +}; + +union cxgbit_skb_cb { + struct { + u8 flags; + union { + struct cxgbit_skb_tx_cb tx; + struct cxgbit_skb_rx_cb rx; + }; + }; + + struct { + /* This member must be first. */ + struct l2t_skb_cb l2t; + struct sk_buff *wr_next; + }; +}; + +#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0])) +#define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags) +#define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode) +#define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next) +#define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len) +#define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode) +#define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn) +#define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb) + +static inline void *cplhdr(struct sk_buff *skb) +{ + return skb->data; +} + +enum cxgbit_cdev_flags { + CDEV_STATE_UP = 0, + CDEV_ISO_ENABLE, + CDEV_DDP_ENABLE, +}; + +#define NP_INFO_HASH_SIZE 32 + +struct np_info { + struct np_info *next; + struct cxgbit_np *cnp; + unsigned int stid; +}; + +struct cxgbit_list_head { + struct list_head list; + /* device lock */ + spinlock_t lock; +}; + +struct cxgbit_device { + struct list_head list; + struct cxgb4_lld_info lldi; + struct np_info *np_hash_tab[NP_INFO_HASH_SIZE]; + /* np lock */ + spinlock_t np_lock; + u8 selectq[MAX_NPORTS][2]; + struct cxgbit_list_head cskq; + u32 mdsl; + struct kref kref; + unsigned long flags; +}; + +struct cxgbit_wr_wait { + struct completion completion; + int ret; +}; + +enum cxgbit_csk_state { + CSK_STATE_IDLE = 0, + CSK_STATE_LISTEN, + CSK_STATE_CONNECTING, + CSK_STATE_ESTABLISHED, + CSK_STATE_ABORTING, + CSK_STATE_CLOSING, + CSK_STATE_MORIBUND, + CSK_STATE_DEAD, +}; + +enum cxgbit_csk_flags { + CSK_TX_DATA_SENT = 0, + CSK_LOGIN_PDU_DONE, + CSK_LOGIN_DONE, + CSK_DDP_ENABLE, +}; + +struct cxgbit_sock_common { + struct cxgbit_device *cdev; + struct sockaddr_storage local_addr; + struct sockaddr_storage remote_addr; + struct cxgbit_wr_wait wr_wait; + enum cxgbit_csk_state state; + unsigned long flags; +}; + +struct cxgbit_np { + struct cxgbit_sock_common com; + wait_queue_head_t accept_wait; + struct iscsi_np *np; + struct completion accept_comp; + struct list_head np_accept_list; + /* np accept lock */ + spinlock_t np_accept_lock; + struct kref kref; + unsigned int stid; +}; + +struct cxgbit_sock { + struct cxgbit_sock_common com; + struct cxgbit_np *cnp; + struct iscsi_conn *conn; + struct l2t_entry *l2t; + struct dst_entry *dst; + struct list_head list; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct sk_buff_head ppodq; + struct sk_buff_head backlogq; + struct sk_buff_head skbq; + struct sk_buff *wr_pending_head; + struct sk_buff *wr_pending_tail; + struct sk_buff *skb; + struct sk_buff *lro_skb; + struct sk_buff *lro_hskb; + struct list_head accept_node; + /* socket lock */ + spinlock_t lock; + wait_queue_head_t waitq; + wait_queue_head_t ack_waitq; + bool lock_owner; + struct kref kref; + u32 max_iso_npdu; + u32 wr_cred; + u32 wr_una_cred; + u32 wr_max_cred; + u32 snd_una; + u32 tid; + u32 snd_nxt; + u32 rcv_nxt; + u32 smac_idx; + u32 tx_chan; + u32 mtu; + u32 write_seq; + u32 rx_credits; + u32 snd_win; + u32 rcv_win; + u16 mss; + u16 emss; + u16 plen; + u16 rss_qid; + u16 txq_idx; + u16 ctrlq_idx; + u8 tos; + u8 port_id; +#define CXGBIT_SUBMODE_HCRC 0x1 +#define CXGBIT_SUBMODE_DCRC 0x2 + u8 submode; +#ifdef CONFIG_CHELSIO_T4_DCB + u8 dcb_priority; +#endif + u8 snd_wscale; +}; + +void _cxgbit_free_cdev(struct kref *kref); +void _cxgbit_free_csk(struct kref *kref); +void _cxgbit_free_cnp(struct kref *kref); + +static inline void cxgbit_get_cdev(struct cxgbit_device *cdev) +{ + kref_get(&cdev->kref); +} + +static inline void cxgbit_put_cdev(struct cxgbit_device *cdev) +{ + kref_put(&cdev->kref, _cxgbit_free_cdev); +} + +static inline void cxgbit_get_csk(struct cxgbit_sock *csk) +{ + kref_get(&csk->kref); +} + +static inline void cxgbit_put_csk(struct cxgbit_sock *csk) +{ + kref_put(&csk->kref, _cxgbit_free_csk); +} + +static inline void cxgbit_get_cnp(struct cxgbit_np *cnp) +{ + kref_get(&cnp->kref); +} + +static inline void cxgbit_put_cnp(struct cxgbit_np *cnp) +{ + kref_put(&cnp->kref, _cxgbit_free_cnp); +} + +static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk) +{ + csk->wr_pending_tail = NULL; + csk->wr_pending_head = NULL; +} + +static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk) +{ + return csk->wr_pending_head; +} + +static inline void +cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + cxgbit_skcb_tx_wr_next(skb) = NULL; + + skb_get(skb); + + if (!csk->wr_pending_head) + csk->wr_pending_head = skb; + else + cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb; + csk->wr_pending_tail = skb; +} + +static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk) +{ + struct sk_buff *skb = csk->wr_pending_head; + + if (likely(skb)) { + csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb); + cxgbit_skcb_tx_wr_next(skb) = NULL; + } + return skb; +} + +typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *, + struct sk_buff *); + +int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *); +int cxgbit_setup_conn_digest(struct cxgbit_sock *); +int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *); +void cxgbit_free_np(struct iscsi_np *); +void cxgbit_free_conn(struct iscsi_conn *); +extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS]; +int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); +int cxgbit_rx_data_ack(struct cxgbit_sock *); +int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *, + struct l2t_entry *); +void cxgbit_push_tx_frames(struct cxgbit_sock *); +int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); +int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *, + struct iscsi_datain_req *, const void *, u32); +void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *, + struct iscsi_r2t *); +u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *); +int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *); +void cxgbit_get_rx_pdu(struct iscsi_conn *); +int cxgbit_validate_params(struct iscsi_conn *); +struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *); + +/* DDP */ +int cxgbit_ddp_init(struct cxgbit_device *); +int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32); +int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *); +void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *); + +static inline +struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev) +{ + return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm); +} +#endif /* __CXGBIT_H__ */ diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c new file mode 100644 index 000000000000..0ae0b131abfc --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -0,0 +1,2086 @@ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/list.h> +#include <linux/workqueue.h> +#include <linux/skbuff.h> +#include <linux/timer.h> +#include <linux/notifier.h> +#include <linux/inetdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/if_vlan.h> + +#include <net/neighbour.h> +#include <net/netevent.h> +#include <net/route.h> +#include <net/tcp.h> +#include <net/ip6_route.h> +#include <net/addrconf.h> + +#include "cxgbit.h" +#include "clip_tbl.h" + +static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp) +{ + wr_waitp->ret = 0; + reinit_completion(&wr_waitp->completion); +} + +static void +cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret) +{ + if (ret == CPL_ERR_NONE) + wr_waitp->ret = 0; + else + wr_waitp->ret = -EIO; + + if (wr_waitp->ret) + pr_err("%s: err:%u", func, ret); + + complete(&wr_waitp->completion); +} + +static int +cxgbit_wait_for_reply(struct cxgbit_device *cdev, + struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout, + const char *func) +{ + int ret; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { + wr_waitp->ret = -EIO; + goto out; + } + + ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ); + if (!ret) { + pr_info("%s - Device %s not responding tid %u\n", + func, pci_name(cdev->lldi.pdev), tid); + wr_waitp->ret = -ETIMEDOUT; + } +out: + if (wr_waitp->ret) + pr_info("%s: FW reply %d tid %u\n", + pci_name(cdev->lldi.pdev), wr_waitp->ret, tid); + return wr_waitp->ret; +} + +/* Returns whether a CPL status conveys negative advice. + */ +static int cxgbit_is_neg_adv(unsigned int status) +{ + return status == CPL_ERR_RTX_NEG_ADVICE || + status == CPL_ERR_PERSIST_NEG_ADVICE || + status == CPL_ERR_KEEPALV_NEG_ADVICE; +} + +static int cxgbit_np_hashfn(const struct cxgbit_np *cnp) +{ + return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1); +} + +static struct np_info * +cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp, + unsigned int stid) +{ + struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL); + + if (p) { + int bucket = cxgbit_np_hashfn(cnp); + + p->cnp = cnp; + p->stid = stid; + spin_lock(&cdev->np_lock); + p->next = cdev->np_hash_tab[bucket]; + cdev->np_hash_tab[bucket] = p; + spin_unlock(&cdev->np_lock); + } + + return p; +} + +static int +cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid = -1, bucket = cxgbit_np_hashfn(cnp); + struct np_info *p; + + spin_lock(&cdev->np_lock); + for (p = cdev->np_hash_tab[bucket]; p; p = p->next) { + if (p->cnp == cnp) { + stid = p->stid; + break; + } + } + spin_unlock(&cdev->np_lock); + + return stid; +} + +static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid = -1, bucket = cxgbit_np_hashfn(cnp); + struct np_info *p, **prev = &cdev->np_hash_tab[bucket]; + + spin_lock(&cdev->np_lock); + for (p = *prev; p; prev = &p->next, p = p->next) { + if (p->cnp == cnp) { + stid = p->stid; + *prev = p->next; + kfree(p); + break; + } + } + spin_unlock(&cdev->np_lock); + + return stid; +} + +void _cxgbit_free_cnp(struct kref *kref) +{ + struct cxgbit_np *cnp; + + cnp = container_of(kref, struct cxgbit_np, kref); + kfree(cnp); +} + +static int +cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid, + struct cxgbit_np *cnp) +{ + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) + &cnp->com.local_addr; + int addr_type; + int ret; + + pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n", + __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port); + + addr_type = ipv6_addr_type((const struct in6_addr *) + &sin6->sin6_addr); + if (addr_type != IPV6_ADDR_ANY) { + ret = cxgb4_clip_get(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, 1); + if (ret) { + pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n", + sin6->sin6_addr.s6_addr, ret); + return -ENOMEM; + } + } + + cxgbit_get_cnp(cnp); + cxgbit_init_wr_wait(&cnp->com.wr_wait); + + ret = cxgb4_create_server6(cdev->lldi.ports[0], + stid, &sin6->sin6_addr, + sin6->sin6_port, + cdev->lldi.rxq_ids[0]); + if (!ret) + ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait, + 0, 10, __func__); + else if (ret > 0) + ret = net_xmit_errno(ret); + else + cxgbit_put_cnp(cnp); + + if (ret) { + if (ret != -ETIMEDOUT) + cxgb4_clip_release(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, 1); + + pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n", + ret, stid, sin6->sin6_addr.s6_addr, + ntohs(sin6->sin6_port)); + } + + return ret; +} + +static int +cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid, + struct cxgbit_np *cnp) +{ + struct sockaddr_in *sin = (struct sockaddr_in *) + &cnp->com.local_addr; + int ret; + + pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n", + __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port); + + cxgbit_get_cnp(cnp); + cxgbit_init_wr_wait(&cnp->com.wr_wait); + + ret = cxgb4_create_server(cdev->lldi.ports[0], + stid, sin->sin_addr.s_addr, + sin->sin_port, 0, + cdev->lldi.rxq_ids[0]); + if (!ret) + ret = cxgbit_wait_for_reply(cdev, + &cnp->com.wr_wait, + 0, 10, __func__); + else if (ret > 0) + ret = net_xmit_errno(ret); + else + cxgbit_put_cnp(cnp); + + if (ret) + pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n", + ret, stid, &sin->sin_addr, ntohs(sin->sin_port)); + return ret; +} + +struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id) +{ + struct cxgbit_device *cdev; + u8 i; + + list_for_each_entry(cdev, &cdev_list_head, list) { + struct cxgb4_lld_info *lldi = &cdev->lldi; + + for (i = 0; i < lldi->nports; i++) { + if (lldi->ports[i] == ndev) { + if (port_id) + *port_id = i; + return cdev; + } + } + } + + return NULL; +} + +static struct net_device *cxgbit_get_real_dev(struct net_device *ndev) +{ + if (ndev->priv_flags & IFF_BONDING) { + pr_err("Bond devices are not supported. Interface:%s\n", + ndev->name); + return NULL; + } + + if (is_vlan_dev(ndev)) + return vlan_dev_real_dev(ndev); + + return ndev; +} + +static struct net_device *cxgbit_ipv4_netdev(__be32 saddr) +{ + struct net_device *ndev; + + ndev = __ip_dev_find(&init_net, saddr, false); + if (!ndev) + return NULL; + + return cxgbit_get_real_dev(ndev); +} + +static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6) +{ + struct net_device *ndev = NULL; + bool found = false; + + if (IS_ENABLED(CONFIG_IPV6)) { + for_each_netdev_rcu(&init_net, ndev) + if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) { + found = true; + break; + } + } + if (!found) + return NULL; + return cxgbit_get_real_dev(ndev); +} + +static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp) +{ + struct sockaddr_storage *sockaddr = &cnp->com.local_addr; + int ss_family = sockaddr->ss_family; + struct net_device *ndev = NULL; + struct cxgbit_device *cdev = NULL; + + rcu_read_lock(); + if (ss_family == AF_INET) { + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)sockaddr; + ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr); + } else if (ss_family == AF_INET6) { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)sockaddr; + ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr); + } + if (!ndev) + goto out; + + cdev = cxgbit_find_device(ndev, NULL); +out: + rcu_read_unlock(); + return cdev; +} + +static bool cxgbit_inaddr_any(struct cxgbit_np *cnp) +{ + struct sockaddr_storage *sockaddr = &cnp->com.local_addr; + int ss_family = sockaddr->ss_family; + int addr_type; + + if (ss_family == AF_INET) { + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)sockaddr; + if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) + return true; + } else if (ss_family == AF_INET6) { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)sockaddr; + addr_type = ipv6_addr_type((const struct in6_addr *) + &sin6->sin6_addr); + if (addr_type == IPV6_ADDR_ANY) + return true; + } + return false; +} + +static int +__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid, ret; + int ss_family = cnp->com.local_addr.ss_family; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) + return -EINVAL; + + stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp); + if (stid < 0) + return -EINVAL; + + if (!cxgbit_np_hash_add(cdev, cnp, stid)) { + cxgb4_free_stid(cdev->lldi.tids, stid, ss_family); + return -EINVAL; + } + + if (ss_family == AF_INET) + ret = cxgbit_create_server4(cdev, stid, cnp); + else + ret = cxgbit_create_server6(cdev, stid, cnp); + + if (ret) { + if (ret != -ETIMEDOUT) + cxgb4_free_stid(cdev->lldi.tids, stid, + ss_family); + cxgbit_np_hash_del(cdev, cnp); + return ret; + } + return ret; +} + +static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + int ret = -1; + + mutex_lock(&cdev_list_lock); + cdev = cxgbit_find_np_cdev(cnp); + if (!cdev) + goto out; + + if (cxgbit_np_hash_find(cdev, cnp) >= 0) + goto out; + + if (__cxgbit_setup_cdev_np(cdev, cnp)) + goto out; + + cnp->com.cdev = cdev; + ret = 0; +out: + mutex_unlock(&cdev_list_lock); + return ret; +} + +static int cxgbit_setup_all_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + int ret; + u32 count = 0; + + mutex_lock(&cdev_list_lock); + list_for_each_entry(cdev, &cdev_list_head, list) { + if (cxgbit_np_hash_find(cdev, cnp) >= 0) { + mutex_unlock(&cdev_list_lock); + return -1; + } + } + + list_for_each_entry(cdev, &cdev_list_head, list) { + ret = __cxgbit_setup_cdev_np(cdev, cnp); + if (ret == -ETIMEDOUT) + break; + if (ret != 0) + continue; + count++; + } + mutex_unlock(&cdev_list_lock); + + return count ? 0 : -1; +} + +int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr) +{ + struct cxgbit_np *cnp; + int ret; + + if ((ksockaddr->ss_family != AF_INET) && + (ksockaddr->ss_family != AF_INET6)) + return -EINVAL; + + cnp = kzalloc(sizeof(*cnp), GFP_KERNEL); + if (!cnp) + return -ENOMEM; + + init_waitqueue_head(&cnp->accept_wait); + init_completion(&cnp->com.wr_wait.completion); + init_completion(&cnp->accept_comp); + INIT_LIST_HEAD(&cnp->np_accept_list); + spin_lock_init(&cnp->np_accept_lock); + kref_init(&cnp->kref); + memcpy(&np->np_sockaddr, ksockaddr, + sizeof(struct sockaddr_storage)); + memcpy(&cnp->com.local_addr, &np->np_sockaddr, + sizeof(cnp->com.local_addr)); + + cnp->np = np; + cnp->com.cdev = NULL; + + if (cxgbit_inaddr_any(cnp)) + ret = cxgbit_setup_all_np(cnp); + else + ret = cxgbit_setup_cdev_np(cnp); + + if (ret) { + cxgbit_put_cnp(cnp); + return -EINVAL; + } + + np->np_context = cnp; + cnp->com.state = CSK_STATE_LISTEN; + return 0; +} + +static void +cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, + struct cxgbit_sock *csk) +{ + conn->login_family = np->np_sockaddr.ss_family; + conn->login_sockaddr = csk->com.remote_addr; + conn->local_sockaddr = csk->com.local_addr; +} + +int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) +{ + struct cxgbit_np *cnp = np->np_context; + struct cxgbit_sock *csk; + int ret = 0; + +accept_wait: + ret = wait_for_completion_interruptible(&cnp->accept_comp); + if (ret) + return -ENODEV; + + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { + spin_unlock_bh(&np->np_thread_lock); + /** + * No point in stalling here when np_thread + * is in state RESET/SHUTDOWN/EXIT - bail + **/ + return -ENODEV; + } + spin_unlock_bh(&np->np_thread_lock); + + spin_lock_bh(&cnp->np_accept_lock); + if (list_empty(&cnp->np_accept_list)) { + spin_unlock_bh(&cnp->np_accept_lock); + goto accept_wait; + } + + csk = list_first_entry(&cnp->np_accept_list, + struct cxgbit_sock, + accept_node); + + list_del_init(&csk->accept_node); + spin_unlock_bh(&cnp->np_accept_lock); + conn->context = csk; + csk->conn = conn; + + cxgbit_set_conn_info(np, conn, csk); + return 0; +} + +static int +__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid, ret; + bool ipv6 = false; + + stid = cxgbit_np_hash_del(cdev, cnp); + if (stid < 0) + return -EINVAL; + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) + return -EINVAL; + + if (cnp->np->np_sockaddr.ss_family == AF_INET6) + ipv6 = true; + + cxgbit_get_cnp(cnp); + cxgbit_init_wr_wait(&cnp->com.wr_wait); + ret = cxgb4_remove_server(cdev->lldi.ports[0], stid, + cdev->lldi.rxq_ids[0], ipv6); + + if (ret > 0) + ret = net_xmit_errno(ret); + + if (ret) { + cxgbit_put_cnp(cnp); + return ret; + } + + ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait, + 0, 10, __func__); + if (ret == -ETIMEDOUT) + return ret; + + if (ipv6 && cnp->com.cdev) { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr; + cxgb4_clip_release(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, + 1); + } + + cxgb4_free_stid(cdev->lldi.tids, stid, + cnp->com.local_addr.ss_family); + return 0; +} + +static void cxgbit_free_all_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + int ret; + + mutex_lock(&cdev_list_lock); + list_for_each_entry(cdev, &cdev_list_head, list) { + ret = __cxgbit_free_cdev_np(cdev, cnp); + if (ret == -ETIMEDOUT) + break; + } + mutex_unlock(&cdev_list_lock); +} + +static void cxgbit_free_cdev_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + bool found = false; + + mutex_lock(&cdev_list_lock); + list_for_each_entry(cdev, &cdev_list_head, list) { + if (cdev == cnp->com.cdev) { + found = true; + break; + } + } + if (!found) + goto out; + + __cxgbit_free_cdev_np(cdev, cnp); +out: + mutex_unlock(&cdev_list_lock); +} + +void cxgbit_free_np(struct iscsi_np *np) +{ + struct cxgbit_np *cnp = np->np_context; + + cnp->com.state = CSK_STATE_DEAD; + if (cnp->com.cdev) + cxgbit_free_cdev_np(cnp); + else + cxgbit_free_all_np(cnp); + + np->np_context = NULL; + cxgbit_put_cnp(cnp); +} + +static void cxgbit_send_halfclose(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + struct cpl_close_con_req *req; + unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return; + + req = (struct cpl_close_con_req *)__skb_put(skb, len); + memset(req, 0, len); + + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, + csk->tid)); + req->rsvd = 0; + + cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL; + __skb_queue_tail(&csk->txq, skb); + cxgbit_push_tx_frames(csk); +} + +static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb) +{ + pr_debug("%s cxgbit_device %p\n", __func__, handle); + kfree_skb(skb); +} + +static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb) +{ + struct cxgbit_device *cdev = handle; + struct cpl_abort_req *req = cplhdr(skb); + + pr_debug("%s cdev %p\n", __func__, cdev); + req->cmd = CPL_ABORT_NO_RST; + cxgbit_ofld_send(cdev, skb); +} + +static int cxgbit_send_abort_req(struct cxgbit_sock *csk) +{ + struct cpl_abort_req *req; + unsigned int len = roundup(sizeof(*req), 16); + struct sk_buff *skb; + + pr_debug("%s: csk %p tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + __skb_queue_purge(&csk->txq); + + if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) + cxgbit_send_tx_flowc_wr(csk); + + skb = __skb_dequeue(&csk->skbq); + req = (struct cpl_abort_req *)__skb_put(skb, len); + memset(req, 0, len); + + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); + t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure); + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, + csk->tid)); + req->cmd = CPL_ABORT_SEND_RST; + return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); +} + +void cxgbit_free_conn(struct iscsi_conn *conn) +{ + struct cxgbit_sock *csk = conn->context; + bool release = false; + + pr_debug("%s: state %d\n", + __func__, csk->com.state); + + spin_lock_bh(&csk->lock); + switch (csk->com.state) { + case CSK_STATE_ESTABLISHED: + if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { + csk->com.state = CSK_STATE_CLOSING; + cxgbit_send_halfclose(csk); + } else { + csk->com.state = CSK_STATE_ABORTING; + cxgbit_send_abort_req(csk); + } + break; + case CSK_STATE_CLOSING: + csk->com.state = CSK_STATE_MORIBUND; + cxgbit_send_halfclose(csk); + break; + case CSK_STATE_DEAD: + release = true; + break; + default: + pr_err("%s: csk %p; state %d\n", + __func__, csk, csk->com.state); + } + spin_unlock_bh(&csk->lock); + + if (release) + cxgbit_put_csk(csk); +} + +static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) +{ + csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - + ((csk->com.remote_addr.ss_family == AF_INET) ? + sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - + sizeof(struct tcphdr); + csk->mss = csk->emss; + if (TCPOPT_TSTAMP_G(opt)) + csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4); + if (csk->emss < 128) + csk->emss = 128; + if (csk->emss & 7) + pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n", + TCPOPT_MSS_G(opt), csk->mss, csk->emss); + pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt), + csk->mss, csk->emss); +} + +static void cxgbit_free_skb(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + + __skb_queue_purge(&csk->txq); + __skb_queue_purge(&csk->rxq); + __skb_queue_purge(&csk->backlogq); + __skb_queue_purge(&csk->ppodq); + __skb_queue_purge(&csk->skbq); + + while ((skb = cxgbit_sock_dequeue_wr(csk))) + kfree_skb(skb); + + __kfree_skb(csk->lro_hskb); +} + +void _cxgbit_free_csk(struct kref *kref) +{ + struct cxgbit_sock *csk; + struct cxgbit_device *cdev; + + csk = container_of(kref, struct cxgbit_sock, kref); + + pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state); + + if (csk->com.local_addr.ss_family == AF_INET6) { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) + &csk->com.local_addr; + cxgb4_clip_release(csk->com.cdev->lldi.ports[0], + (const u32 *) + &sin6->sin6_addr.s6_addr, 1); + } + + cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid); + dst_release(csk->dst); + cxgb4_l2t_release(csk->l2t); + + cdev = csk->com.cdev; + spin_lock_bh(&cdev->cskq.lock); + list_del(&csk->list); + spin_unlock_bh(&cdev->cskq.lock); + + cxgbit_free_skb(csk); + cxgbit_put_cdev(cdev); + + kfree(csk); +} + +static void +cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype, + __u8 *local_ip, __u8 *peer_ip, __be16 *local_port, + __be16 *peer_port) +{ + u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)); + u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)); + struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); + struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); + struct tcphdr *tcp = (struct tcphdr *) + ((u8 *)(req + 1) + eth_len + ip_len); + + if (ip->version == 4) { + pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", + __func__, + ntohl(ip->saddr), ntohl(ip->daddr), + ntohs(tcp->source), + ntohs(tcp->dest)); + *iptype = 4; + memcpy(peer_ip, &ip->saddr, 4); + memcpy(local_ip, &ip->daddr, 4); + } else { + pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", + __func__, + ip6->saddr.s6_addr, ip6->daddr.s6_addr, + ntohs(tcp->source), + ntohs(tcp->dest)); + *iptype = 6; + memcpy(peer_ip, ip6->saddr.s6_addr, 16); + memcpy(local_ip, ip6->daddr.s6_addr, 16); + } + + *peer_port = tcp->source; + *local_port = tcp->dest; +} + +static int +cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev) +{ + u8 i; + + egress_dev = cxgbit_get_real_dev(egress_dev); + for (i = 0; i < cdev->lldi.nports; i++) + if (cdev->lldi.ports[i] == egress_dev) + return 1; + return 0; +} + +static struct dst_entry * +cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip, + __be16 local_port, __be16 peer_port, u8 tos, + __u32 sin6_scope_id) +{ + struct dst_entry *dst = NULL; + + if (IS_ENABLED(CONFIG_IPV6)) { + struct flowi6 fl6; + + memset(&fl6, 0, sizeof(fl6)); + memcpy(&fl6.daddr, peer_ip, 16); + memcpy(&fl6.saddr, local_ip, 16); + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = sin6_scope_id; + dst = ip6_route_output(&init_net, NULL, &fl6); + if (!dst) + goto out; + if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) && + !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { + dst_release(dst); + dst = NULL; + } + } +out: + return dst; +} + +static struct dst_entry * +cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip, + __be16 local_port, __be16 peer_port, u8 tos) +{ + struct rtable *rt; + struct flowi4 fl4; + struct neighbour *n; + + rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, + local_ip, + peer_port, local_port, IPPROTO_TCP, + tos, 0); + if (IS_ERR(rt)) + return NULL; + n = dst_neigh_lookup(&rt->dst, &peer_ip); + if (!n) + return NULL; + if (!cxgbit_our_interface(cdev, n->dev) && + !(n->dev->flags & IFF_LOOPBACK)) { + neigh_release(n); + dst_release(&rt->dst); + return NULL; + } + neigh_release(n); + return &rt->dst; +} + +static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi) +{ + unsigned int linkspeed; + u8 scale; + + linkspeed = pi->link_cfg.speed; + scale = linkspeed / SPEED_10000; + +#define CXGBIT_10G_RCV_WIN (256 * 1024) + csk->rcv_win = CXGBIT_10G_RCV_WIN; + if (scale) + csk->rcv_win *= scale; + +#define CXGBIT_10G_SND_WIN (256 * 1024) + csk->snd_win = CXGBIT_10G_SND_WIN; + if (scale) + csk->snd_win *= scale; + + pr_debug("%s snd_win %d rcv_win %d\n", + __func__, csk->snd_win, csk->rcv_win); +} + +#ifdef CONFIG_CHELSIO_T4_DCB +static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev) +{ + return ndev->dcbnl_ops->getstate(ndev); +} + +static int cxgbit_select_priority(int pri_mask) +{ + if (!pri_mask) + return 0; + + return (ffs(pri_mask) - 1); +} + +static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port) +{ + int ret; + u8 caps; + + struct dcb_app iscsi_dcb_app = { + .protocol = local_port + }; + + ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); + + if (ret) + return 0; + + if (caps & DCB_CAP_DCBX_VER_IEEE) { + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; + + ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); + + } else if (caps & DCB_CAP_DCBX_VER_CEE) { + iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; + + ret = dcb_getapp(ndev, &iscsi_dcb_app); + } + + pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret)); + + return cxgbit_select_priority(ret); +} +#endif + +static int +cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, + u16 local_port, struct dst_entry *dst, + struct cxgbit_device *cdev) +{ + struct neighbour *n; + int ret, step; + struct net_device *ndev; + u16 rxq_idx, port_id; +#ifdef CONFIG_CHELSIO_T4_DCB + u8 priority = 0; +#endif + + n = dst_neigh_lookup(dst, peer_ip); + if (!n) + return -ENODEV; + + rcu_read_lock(); + ret = -ENOMEM; + if (n->dev->flags & IFF_LOOPBACK) { + if (iptype == 4) + ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip); + else if (IS_ENABLED(CONFIG_IPV6)) + ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip); + else + ndev = NULL; + + if (!ndev) { + ret = -ENODEV; + goto out; + } + + csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, + n, ndev, 0); + if (!csk->l2t) + goto out; + csk->mtu = ndev->mtu; + csk->tx_chan = cxgb4_port_chan(ndev); + csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + step = cdev->lldi.ntxq / + cdev->lldi.nchan; + csk->txq_idx = cxgb4_port_idx(ndev) * step; + step = cdev->lldi.nrxq / + cdev->lldi.nchan; + csk->ctrlq_idx = cxgb4_port_idx(ndev); + csk->rss_qid = cdev->lldi.rxq_ids[ + cxgb4_port_idx(ndev) * step]; + csk->port_id = cxgb4_port_idx(ndev); + cxgbit_set_tcp_window(csk, + (struct port_info *)netdev_priv(ndev)); + } else { + ndev = cxgbit_get_real_dev(n->dev); + if (!ndev) { + ret = -ENODEV; + goto out; + } + +#ifdef CONFIG_CHELSIO_T4_DCB + if (cxgbit_get_iscsi_dcb_state(ndev)) + priority = cxgbit_get_iscsi_dcb_priority(ndev, + local_port); + + csk->dcb_priority = priority; + + csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority); +#else + csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0); +#endif + if (!csk->l2t) + goto out; + port_id = cxgb4_port_idx(ndev); + csk->mtu = dst_mtu(dst); + csk->tx_chan = cxgb4_port_chan(ndev); + csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; + step = cdev->lldi.ntxq / + cdev->lldi.nports; + csk->txq_idx = (port_id * step) + + (cdev->selectq[port_id][0]++ % step); + csk->ctrlq_idx = cxgb4_port_idx(ndev); + step = cdev->lldi.nrxq / + cdev->lldi.nports; + rxq_idx = (port_id * step) + + (cdev->selectq[port_id][1]++ % step); + csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx]; + csk->port_id = port_id; + cxgbit_set_tcp_window(csk, + (struct port_info *)netdev_priv(ndev)); + } + ret = 0; +out: + rcu_read_unlock(); + neigh_release(n); + return ret; +} + +int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + int ret = 0; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { + kfree_skb(skb); + pr_err("%s - device not up - dropping\n", __func__); + return -EIO; + } + + ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb); + if (ret < 0) + kfree_skb(skb); + return ret < 0 ? ret : 0; +} + +static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid) +{ + struct cpl_tid_release *req; + unsigned int len = roundup(sizeof(*req), 16); + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return; + + req = (struct cpl_tid_release *)__skb_put(skb, len); + memset(req, 0, len); + + INIT_TP_WR(req, tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID( + CPL_TID_RELEASE, tid)); + set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); + cxgbit_ofld_send(cdev, skb); +} + +int +cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb, + struct l2t_entry *l2e) +{ + int ret = 0; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { + kfree_skb(skb); + pr_err("%s - device not up - dropping\n", __func__); + return -EIO; + } + + ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e); + if (ret < 0) + kfree_skb(skb); + return ret < 0 ? ret : 0; +} + +static void +cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx, int use_ts, int ipv6) +{ + unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) : + sizeof(struct iphdr)) + + sizeof(struct tcphdr) + + (use_ts ? round_up(TCPOLEN_TIMESTAMP, + 4) : 0); + unsigned short data_size = mtu - hdr_size; + + cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); +} + +static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + if (csk->com.state != CSK_STATE_ESTABLISHED) { + __kfree_skb(skb); + return; + } + + cxgbit_ofld_send(csk->com.cdev, skb); +} + +/* + * CPL connection rx data ack: host -> + * Send RX credits through an RX_DATA_ACK CPL message. + * Returns the number of credits sent. + */ +int cxgbit_rx_data_ack(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + struct cpl_rx_data_ack *req; + unsigned int len = roundup(sizeof(*req), 16); + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -1; + + req = (struct cpl_rx_data_ack *)__skb_put(skb, len); + memset(req, 0, len); + + set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx); + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, + csk->tid)); + req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) | + RX_CREDITS_V(csk->rx_credits)); + + csk->rx_credits = 0; + + spin_lock_bh(&csk->lock); + if (csk->lock_owner) { + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits; + __skb_queue_tail(&csk->backlogq, skb); + spin_unlock_bh(&csk->lock); + return 0; + } + + cxgbit_send_rx_credits(csk, skb); + spin_unlock_bh(&csk->lock); + + return 0; +} + +#define FLOWC_WR_NPARAMS_MIN 9 +#define FLOWC_WR_NPARAMS_MAX 11 +static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + u32 len, flowclen; + u8 i; + + flowclen = offsetof(struct fw_flowc_wr, + mnemval[FLOWC_WR_NPARAMS_MAX]); + + len = max_t(u32, sizeof(struct cpl_abort_req), + sizeof(struct cpl_abort_rpl)); + + len = max(len, flowclen); + len = roundup(len, 16); + + for (i = 0; i < 3; i++) { + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + goto out; + __skb_queue_tail(&csk->skbq, skb); + } + + skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC); + if (!skb) + goto out; + + memset(skb->data, 0, LRO_SKB_MIN_HEADROOM); + csk->lro_hskb = skb; + + return 0; +out: + __skb_queue_purge(&csk->skbq); + return -ENOMEM; +} + +static u32 cxgbit_compute_wscale(u32 win) +{ + u32 wscale = 0; + + while (wscale < 14 && (65535 << wscale) < win) + wscale++; + return wscale; +} + +static void +cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) +{ + struct sk_buff *skb; + const struct tcphdr *tcph; + struct cpl_t5_pass_accept_rpl *rpl5; + unsigned int len = roundup(sizeof(*rpl5), 16); + unsigned int mtu_idx; + u64 opt0; + u32 opt2, hlen; + u32 wscale; + u32 win; + + pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + cxgbit_put_csk(csk); + return; + } + + rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len); + memset(rpl5, 0, len); + + INIT_TP_WR(rpl5, csk->tid); + OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, + csk->tid)); + cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx, + req->tcpopt.tstamp, + (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1); + wscale = cxgbit_compute_wscale(csk->rcv_win); + /* + * Specify the largest window that will fit in opt0. The + * remainder will be specified in the rx_data_ack. + */ + win = csk->rcv_win >> 10; + if (win > RCV_BUFSIZ_M) + win = RCV_BUFSIZ_M; + opt0 = TCAM_BYPASS_F | + WND_SCALE_V(wscale) | + MSS_IDX_V(mtu_idx) | + L2T_IDX_V(csk->l2t->idx) | + TX_CHAN_V(csk->tx_chan) | + SMAC_SEL_V(csk->smac_idx) | + DSCP_V(csk->tos >> 2) | + ULP_MODE_V(ULP_MODE_ISCSI) | + RCV_BUFSIZ_V(win); + + opt2 = RX_CHANNEL_V(0) | + RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); + + if (req->tcpopt.tstamp) + opt2 |= TSTAMPS_EN_F; + if (req->tcpopt.sack) + opt2 |= SACK_EN_F; + if (wscale) + opt2 |= WND_SCALE_EN_F; + + hlen = ntohl(req->hdr_len); + tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + + IP_HDR_LEN_G(hlen); + + if (tcph->ece && tcph->cwr) + opt2 |= CCTRL_ECN_V(1); + + opt2 |= RX_COALESCE_V(3); + opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); + + opt2 |= T5_ISS_F; + rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1); + + opt2 |= T5_OPT_2_VALID_F; + + rpl5->opt0 = cpu_to_be64(opt0); + rpl5->opt2 = cpu_to_be32(opt2); + set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx); + t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard); + cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); +} + +static void +cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk = NULL; + struct cxgbit_np *cnp; + struct cpl_pass_accept_req *req = cplhdr(skb); + unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); + struct tid_info *t = cdev->lldi.tids; + unsigned int tid = GET_TID(req); + u16 peer_mss = ntohs(req->tcpopt.mss); + unsigned short hdrs; + + struct dst_entry *dst; + __u8 local_ip[16], peer_ip[16]; + __be16 local_port, peer_port; + int ret; + int iptype; + + pr_debug("%s: cdev = %p; stid = %u; tid = %u\n", + __func__, cdev, stid, tid); + + cnp = lookup_stid(t, stid); + if (!cnp) { + pr_err("%s connect request on invalid stid %d\n", + __func__, stid); + goto rel_skb; + } + + if (cnp->com.state != CSK_STATE_LISTEN) { + pr_err("%s - listening parent not in CSK_STATE_LISTEN\n", + __func__); + goto reject; + } + + csk = lookup_tid(t, tid); + if (csk) { + pr_err("%s csk not null tid %u\n", + __func__, tid); + goto rel_skb; + } + + cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip, + &local_port, &peer_port); + + /* Find output route */ + if (iptype == 4) { + pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 " + "lport %d rport %d peer_mss %d\n" + , __func__, cnp, tid, + local_ip, peer_ip, ntohs(local_port), + ntohs(peer_port), peer_mss); + dst = cxgbit_find_route(cdev, *(__be32 *)local_ip, + *(__be32 *)peer_ip, + local_port, peer_port, + PASS_OPEN_TOS_G(ntohl(req->tos_stid))); + } else { + pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 " + "lport %d rport %d peer_mss %d\n" + , __func__, cnp, tid, + local_ip, peer_ip, ntohs(local_port), + ntohs(peer_port), peer_mss); + dst = cxgbit_find_route6(cdev, local_ip, peer_ip, + local_port, peer_port, + PASS_OPEN_TOS_G(ntohl(req->tos_stid)), + ((struct sockaddr_in6 *) + &cnp->com.local_addr)->sin6_scope_id); + } + if (!dst) { + pr_err("%s - failed to find dst entry!\n", + __func__); + goto reject; + } + + csk = kzalloc(sizeof(*csk), GFP_ATOMIC); + if (!csk) { + dst_release(dst); + goto rel_skb; + } + + ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port), + dst, cdev); + if (ret) { + pr_err("%s - failed to allocate l2t entry!\n", + __func__); + dst_release(dst); + kfree(csk); + goto reject; + } + + kref_init(&csk->kref); + init_completion(&csk->com.wr_wait.completion); + + INIT_LIST_HEAD(&csk->accept_node); + + hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0); + if (peer_mss && csk->mtu > (peer_mss + hdrs)) + csk->mtu = peer_mss + hdrs; + + csk->com.state = CSK_STATE_CONNECTING; + csk->com.cdev = cdev; + csk->cnp = cnp; + csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); + csk->dst = dst; + csk->tid = tid; + csk->wr_cred = cdev->lldi.wr_cred - + DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); + csk->wr_max_cred = csk->wr_cred; + csk->wr_una_cred = 0; + + if (iptype == 4) { + struct sockaddr_in *sin = (struct sockaddr_in *) + &csk->com.local_addr; + sin->sin_family = AF_INET; + sin->sin_port = local_port; + sin->sin_addr.s_addr = *(__be32 *)local_ip; + + sin = (struct sockaddr_in *)&csk->com.remote_addr; + sin->sin_family = AF_INET; + sin->sin_port = peer_port; + sin->sin_addr.s_addr = *(__be32 *)peer_ip; + } else { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) + &csk->com.local_addr; + + sin6->sin6_family = PF_INET6; + sin6->sin6_port = local_port; + memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); + cxgb4_clip_get(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, + 1); + + sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr; + sin6->sin6_family = PF_INET6; + sin6->sin6_port = peer_port; + memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); + } + + skb_queue_head_init(&csk->rxq); + skb_queue_head_init(&csk->txq); + skb_queue_head_init(&csk->ppodq); + skb_queue_head_init(&csk->backlogq); + skb_queue_head_init(&csk->skbq); + cxgbit_sock_reset_wr_list(csk); + spin_lock_init(&csk->lock); + init_waitqueue_head(&csk->waitq); + init_waitqueue_head(&csk->ack_waitq); + csk->lock_owner = false; + + if (cxgbit_alloc_csk_skb(csk)) { + dst_release(dst); + kfree(csk); + goto rel_skb; + } + + cxgbit_get_cdev(cdev); + + spin_lock(&cdev->cskq.lock); + list_add_tail(&csk->list, &cdev->cskq.list); + spin_unlock(&cdev->cskq.lock); + + cxgb4_insert_tid(t, csk, tid); + cxgbit_pass_accept_rpl(csk, req); + goto rel_skb; + +reject: + cxgbit_release_tid(cdev, tid); +rel_skb: + __kfree_skb(skb); +} + +static u32 +cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp, + u32 *flowclenp) +{ + u32 nparams, flowclen16, flowclen; + + nparams = FLOWC_WR_NPARAMS_MIN; + + if (csk->snd_wscale) + nparams++; + +#ifdef CONFIG_CHELSIO_T4_DCB + nparams++; +#endif + flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); + flowclen16 = DIV_ROUND_UP(flowclen, 16); + flowclen = flowclen16 * 16; + /* + * Return the number of 16-byte credits used by the flowc request. + * Pass back the nparams and actual flowc length if requested. + */ + if (nparamsp) + *nparamsp = nparams; + if (flowclenp) + *flowclenp = flowclen; + return flowclen16; +} + +u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk) +{ + struct cxgbit_device *cdev = csk->com.cdev; + struct fw_flowc_wr *flowc; + u32 nparams, flowclen16, flowclen; + struct sk_buff *skb; + u8 index; + +#ifdef CONFIG_CHELSIO_T4_DCB + u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; +#endif + + flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen); + + skb = __skb_dequeue(&csk->skbq); + flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); + memset(flowc, 0, flowclen); + + flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | + FW_FLOWC_WR_NPARAMS_V(nparams)); + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) | + FW_WR_FLOWID_V(csk->tid)); + flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; + flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V + (csk->com.cdev->lldi.pf)); + flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; + flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan); + flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; + flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan); + flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; + flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid); + flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; + flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt); + flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; + flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt); + flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; + flowc->mnemval[6].val = cpu_to_be32(csk->snd_win); + flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; + flowc->mnemval[7].val = cpu_to_be32(csk->emss); + + flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) + flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD); + else + flowc->mnemval[8].val = cpu_to_be32(16384); + + index = 9; + + if (csk->snd_wscale) { + flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE; + flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale); + index++; + } + +#ifdef CONFIG_CHELSIO_T4_DCB + flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO; + if (vlan == VLAN_NONE) { + pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid); + flowc->mnemval[index].val = cpu_to_be32(0); + } else + flowc->mnemval[index].val = cpu_to_be32( + (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT); +#endif + + pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;" + " rcv_seq = %u; snd_win = %u; emss = %u\n", + __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt, + csk->rcv_nxt, csk->snd_win, csk->emss); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); + cxgbit_ofld_send(csk->com.cdev, skb); + return flowclen16; +} + +int cxgbit_setup_conn_digest(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC; + u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC; + unsigned int len = roundup(sizeof(*req), 16); + int ret; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* set up ulp submode */ + req = (struct cpl_set_tcb_field *)__skb_put(skb, len); + memset(req, 0, len); + + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 4); + req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | + (dcrc ? ULP_CRC_DATA : 0)) << 4); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); + + cxgbit_get_csk(csk); + cxgbit_init_wr_wait(&csk->com.wr_wait); + + cxgbit_ofld_send(csk->com.cdev, skb); + + ret = cxgbit_wait_for_reply(csk->com.cdev, + &csk->com.wr_wait, + csk->tid, 5, __func__); + if (ret) + return -1; + + return 0; +} + +int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + unsigned int len = roundup(sizeof(*req), 16); + int ret; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + req = (struct cpl_set_tcb_field *)__skb_put(skb, len); + memset(req, 0, len); + + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 8); + req->val = cpu_to_be64(pg_idx << 8); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); + + cxgbit_get_csk(csk); + cxgbit_init_wr_wait(&csk->com.wr_wait); + + cxgbit_ofld_send(csk->com.cdev, skb); + + ret = cxgbit_wait_for_reply(csk->com.cdev, + &csk->com.wr_wait, + csk->tid, 5, __func__); + if (ret) + return -1; + + return 0; +} + +static void +cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cpl_pass_open_rpl *rpl = cplhdr(skb); + struct tid_info *t = cdev->lldi.tids; + unsigned int stid = GET_TID(rpl); + struct cxgbit_np *cnp = lookup_stid(t, stid); + + pr_debug("%s: cnp = %p; stid = %u; status = %d\n", + __func__, cnp, stid, rpl->status); + + if (!cnp) { + pr_info("%s stid %d lookup failure\n", __func__, stid); + return; + } + + cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); + cxgbit_put_cnp(cnp); +} + +static void +cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); + struct tid_info *t = cdev->lldi.tids; + unsigned int stid = GET_TID(rpl); + struct cxgbit_np *cnp = lookup_stid(t, stid); + + pr_debug("%s: cnp = %p; stid = %u; status = %d\n", + __func__, cnp, stid, rpl->status); + + if (!cnp) { + pr_info("%s stid %d lookup failure\n", __func__, stid); + return; + } + + cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); + cxgbit_put_cnp(cnp); +} + +static void +cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cpl_pass_establish *req = cplhdr(skb); + struct tid_info *t = cdev->lldi.tids; + unsigned int tid = GET_TID(req); + struct cxgbit_sock *csk; + struct cxgbit_np *cnp; + u16 tcp_opt = be16_to_cpu(req->tcp_opt); + u32 snd_isn = be32_to_cpu(req->snd_isn); + u32 rcv_isn = be32_to_cpu(req->rcv_isn); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + cnp = csk->cnp; + + pr_debug("%s: csk %p; tid %u; cnp %p\n", + __func__, csk, tid, cnp); + + csk->write_seq = snd_isn; + csk->snd_una = snd_isn; + csk->snd_nxt = snd_isn; + + csk->rcv_nxt = rcv_isn; + + if (csk->rcv_win > (RCV_BUFSIZ_M << 10)) + csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10)); + + csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); + cxgbit_set_emss(csk, tcp_opt); + dst_confirm(csk->dst); + csk->com.state = CSK_STATE_ESTABLISHED; + spin_lock_bh(&cnp->np_accept_lock); + list_add_tail(&csk->accept_node, &cnp->np_accept_list); + spin_unlock_bh(&cnp->np_accept_lock); + complete(&cnp->accept_comp); +rel_skb: + __kfree_skb(skb); +} + +static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + cxgbit_skcb_flags(skb) = 0; + spin_lock_bh(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + spin_unlock_bh(&csk->rxq.lock); + wake_up(&csk->waitq); +} + +static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + switch (csk->com.state) { + case CSK_STATE_ESTABLISHED: + csk->com.state = CSK_STATE_CLOSING; + cxgbit_queue_rx_skb(csk, skb); + return; + case CSK_STATE_CLOSING: + /* simultaneous close */ + csk->com.state = CSK_STATE_MORIBUND; + break; + case CSK_STATE_MORIBUND: + csk->com.state = CSK_STATE_DEAD; + cxgbit_put_csk(csk); + break; + case CSK_STATE_ABORTING: + break; + default: + pr_info("%s: cpl_peer_close in bad state %d\n", + __func__, csk->com.state); + } + + __kfree_skb(skb); +} + +static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + switch (csk->com.state) { + case CSK_STATE_CLOSING: + csk->com.state = CSK_STATE_MORIBUND; + break; + case CSK_STATE_MORIBUND: + csk->com.state = CSK_STATE_DEAD; + cxgbit_put_csk(csk); + break; + case CSK_STATE_ABORTING: + case CSK_STATE_DEAD: + break; + default: + pr_info("%s: cpl_close_con_rpl in bad state %d\n", + __func__, csk->com.state); + } + + __kfree_skb(skb); +} + +static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cpl_abort_req_rss *hdr = cplhdr(skb); + unsigned int tid = GET_TID(hdr); + struct cpl_abort_rpl *rpl; + struct sk_buff *rpl_skb; + bool release = false; + bool wakeup_thread = false; + unsigned int len = roundup(sizeof(*rpl), 16); + + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, tid, csk->com.state); + + if (cxgbit_is_neg_adv(hdr->status)) { + pr_err("%s: got neg advise %d on tid %u\n", + __func__, hdr->status, tid); + goto rel_skb; + } + + switch (csk->com.state) { + case CSK_STATE_CONNECTING: + case CSK_STATE_MORIBUND: + csk->com.state = CSK_STATE_DEAD; + release = true; + break; + case CSK_STATE_ESTABLISHED: + csk->com.state = CSK_STATE_DEAD; + wakeup_thread = true; + break; + case CSK_STATE_CLOSING: + csk->com.state = CSK_STATE_DEAD; + if (!csk->conn) + release = true; + break; + case CSK_STATE_ABORTING: + break; + default: + pr_info("%s: cpl_abort_req_rss in bad state %d\n", + __func__, csk->com.state); + csk->com.state = CSK_STATE_DEAD; + } + + __skb_queue_purge(&csk->txq); + + if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) + cxgbit_send_tx_flowc_wr(csk); + + rpl_skb = __skb_dequeue(&csk->skbq); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); + + rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len); + memset(rpl, 0, len); + + INIT_TP_WR(rpl, csk->tid); + OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); + rpl->cmd = CPL_ABORT_NO_RST; + cxgbit_ofld_send(csk->com.cdev, rpl_skb); + + if (wakeup_thread) { + cxgbit_queue_rx_skb(csk, skb); + return; + } + + if (release) + cxgbit_put_csk(csk); +rel_skb: + __kfree_skb(skb); +} + +static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + switch (csk->com.state) { + case CSK_STATE_ABORTING: + csk->com.state = CSK_STATE_DEAD; + cxgbit_put_csk(csk); + break; + default: + pr_info("%s: cpl_abort_rpl_rss in state %d\n", + __func__, csk->com.state); + } + + __kfree_skb(skb); +} + +static bool cxgbit_credit_err(const struct cxgbit_sock *csk) +{ + const struct sk_buff *skb = csk->wr_pending_head; + u32 credit = 0; + + if (unlikely(csk->wr_cred > csk->wr_max_cred)) { + pr_err("csk 0x%p, tid %u, credit %u > %u\n", + csk, csk->tid, csk->wr_cred, csk->wr_max_cred); + return true; + } + + while (skb) { + credit += skb->csum; + skb = cxgbit_skcb_tx_wr_next(skb); + } + + if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) { + pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", + csk, csk->tid, csk->wr_cred, + credit, csk->wr_max_cred); + + return true; + } + + return false; +} + +static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb); + u32 credits = rpl->credits; + u32 snd_una = ntohl(rpl->snd_una); + + csk->wr_cred += credits; + if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred)) + csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; + + while (credits) { + struct sk_buff *p = cxgbit_sock_peek_wr(csk); + + if (unlikely(!p)) { + pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", + csk, csk->tid, credits, + csk->wr_cred, csk->wr_una_cred); + break; + } + + if (unlikely(credits < p->csum)) { + pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", + csk, csk->tid, + credits, csk->wr_cred, csk->wr_una_cred, + p->csum); + p->csum -= credits; + break; + } + + cxgbit_sock_dequeue_wr(csk); + credits -= p->csum; + kfree_skb(p); + } + + if (unlikely(cxgbit_credit_err(csk))) { + cxgbit_queue_rx_skb(csk, skb); + return; + } + + if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) { + if (unlikely(before(snd_una, csk->snd_una))) { + pr_warn("csk 0x%p,%u, snd_una %u/%u.", + csk, csk->tid, snd_una, + csk->snd_una); + goto rel_skb; + } + + if (csk->snd_una != snd_una) { + csk->snd_una = snd_una; + dst_confirm(csk->dst); + wake_up(&csk->ack_waitq); + } + } + + if (skb_queue_len(&csk->txq)) + cxgbit_push_tx_frames(csk); + +rel_skb: + __kfree_skb(skb); +} + +static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk; + struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) + pr_err("can't find connection for tid %u.\n", tid); + else + cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); + + cxgbit_put_csk(csk); +} + +static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk; + struct cpl_rx_data *cpl = cplhdr(skb); + unsigned int tid = GET_TID(cpl); + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + cxgbit_queue_rx_skb(csk, skb); + return; +rel_skb: + __kfree_skb(skb); +} + +static void +__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + spin_lock(&csk->lock); + if (csk->lock_owner) { + __skb_queue_tail(&csk->backlogq, skb); + spin_unlock(&csk->lock); + return; + } + + cxgbit_skcb_rx_backlog_fn(skb)(csk, skb); + spin_unlock(&csk->lock); +} + +static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + cxgbit_get_csk(csk); + __cxgbit_process_rx_cpl(csk, skb); + cxgbit_put_csk(csk); +} + +static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk; + struct cpl_tx_data *cpl = cplhdr(skb); + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct tid_info *t = lldi->tids; + unsigned int tid = GET_TID(cpl); + u8 opcode = cxgbit_skcb_rx_opcode(skb); + bool ref = true; + + switch (opcode) { + case CPL_FW4_ACK: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack; + ref = false; + break; + case CPL_PEER_CLOSE: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close; + break; + case CPL_CLOSE_CON_RPL: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl; + break; + case CPL_ABORT_REQ_RSS: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss; + break; + case CPL_ABORT_RPL_RSS: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss; + break; + default: + goto rel_skb; + } + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + if (ref) + cxgbit_process_rx_cpl(csk, skb); + else + __cxgbit_process_rx_cpl(csk, skb); + + return; +rel_skb: + __kfree_skb(skb); +} + +cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = { + [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl, + [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl, + [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req, + [CPL_PASS_ESTABLISH] = cxgbit_pass_establish, + [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl, + [CPL_RX_DATA] = cxgbit_rx_data, + [CPL_FW4_ACK] = cxgbit_rx_cpl, + [CPL_PEER_CLOSE] = cxgbit_rx_cpl, + [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl, + [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl, + [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl, +}; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c new file mode 100644 index 000000000000..5d78bdb7fc64 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "cxgbit.h" + +static void +cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod, + struct cxgbi_task_tag_info *ttinfo, + struct scatterlist **sg_pp, unsigned int *sg_off) +{ + struct scatterlist *sg = sg_pp ? *sg_pp : NULL; + unsigned int offset = sg_off ? *sg_off : 0; + dma_addr_t addr = 0UL; + unsigned int len = 0; + int i; + + memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); + + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } + + for (i = 0; i < PPOD_PAGES_MAX; i++) { + if (sg) { + ppod->addr[i] = cpu_to_be64(addr + offset); + offset += PAGE_SIZE; + if (offset == (len + sg->offset)) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } + } + } else { + ppod->addr[i] = 0ULL; + } + } + + /* + * the fifth address needs to be repeated in the next ppod, so do + * not move sg + */ + if (sg_pp) { + *sg_pp = sg; + *sg_off = offset; + } + + if (offset == len) { + offset = 0; + if (sg) { + sg = sg_next(sg); + if (sg) + addr = sg_dma_address(sg); + } + } + ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; +} + +static struct sk_buff * +cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm, + unsigned int idx, unsigned int npods, unsigned int tid) +{ + struct ulp_mem_io *req; + struct ulptx_idata *idata; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int dlen = npods << PPOD_SIZE_SHIFT; + unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + + sizeof(struct ulptx_idata) + dlen, 16); + struct sk_buff *skb; + + skb = alloc_skb(wr_len, GFP_KERNEL); + if (!skb) + return NULL; + + req = (struct ulp_mem_io *)__skb_put(skb, wr_len); + INIT_ULPTX_WR(req, wr_len, 0, tid); + req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | + FW_WR_ATOMIC_V(0)); + req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + ULP_MEMIO_ORDER_V(0) | + T5_ULP_MEMIO_IMM_V(1)); + req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); + req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); + req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); + + idata = (struct ulptx_idata *)(req + 1); + idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); + idata->len = htonl(dlen); + + return skb; +} + +static int +cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, + struct cxgbi_task_tag_info *ttinfo, unsigned int idx, + unsigned int npods, struct scatterlist **sg_pp, + unsigned int *sg_off) +{ + struct cxgbit_device *cdev = csk->com.cdev; + struct sk_buff *skb; + struct ulp_mem_io *req; + struct ulptx_idata *idata; + struct cxgbi_pagepod *ppod; + unsigned int i; + + skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid); + if (!skb) + return -ENOMEM; + + req = (struct ulp_mem_io *)skb->data; + idata = (struct ulptx_idata *)(req + 1); + ppod = (struct cxgbi_pagepod *)(idata + 1); + + for (i = 0; i < npods; i++, ppod++) + cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); + + __skb_queue_tail(&csk->ppodq, skb); + + return 0; +} + +static int +cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, + struct cxgbi_task_tag_info *ttinfo) +{ + unsigned int pidx = ttinfo->idx; + unsigned int npods = ttinfo->npods; + unsigned int i, cnt; + struct scatterlist *sg = ttinfo->sgl; + unsigned int offset = 0; + int ret = 0; + + for (i = 0; i < npods; i += cnt, pidx += cnt) { + cnt = npods - i; + + if (cnt > ULPMEM_IDATA_MAX_NPPODS) + cnt = ULPMEM_IDATA_MAX_NPPODS; + + ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, + &sg, &offset); + if (ret < 0) + break; + } + + return ret; +} + +static int cxgbit_ddp_sgl_check(struct scatterlist *sg, + unsigned int nents) +{ + unsigned int last_sgidx = nents - 1; + unsigned int i; + + for (i = 0; i < nents; i++, sg = sg_next(sg)) { + unsigned int len = sg->length + sg->offset; + + if ((sg->offset & 0x3) || (i && sg->offset) || + ((i != last_sgidx) && (len != PAGE_SIZE))) { + return -EINVAL; + } + } + + return 0; +} + +static int +cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, + unsigned int xferlen) +{ + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbi_ppm *ppm = cdev2ppm(cdev); + struct scatterlist *sgl = ttinfo->sgl; + unsigned int sgcnt = ttinfo->nents; + unsigned int sg_offset = sgl->offset; + int ret; + + if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) { + pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", + ppm, ppm->tformat.pgsz_idx_dflt, + xferlen, ttinfo->nents); + return -EINVAL; + } + + if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0) + return -EINVAL; + + ttinfo->nr_pages = (xferlen + sgl->offset + + (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; + + /* + * the ddp tag will be used for the ttt in the outgoing r2t pdu + */ + ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, + &ttinfo->tag, 0); + if (ret < 0) + return ret; + ttinfo->npods = ret; + + sgl->offset = 0; + ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + sgl->offset = sg_offset; + if (!ret) { + pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", + __func__, 0, xferlen, sgcnt); + goto rel_ppods; + } + + cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, + xferlen, &ttinfo->hdr); + + ret = cxgbit_ddp_set_map(ppm, csk, ttinfo); + if (ret < 0) { + __skb_queue_purge(&csk->ppodq); + dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + goto rel_ppods; + } + + return 0; + +rel_ppods: + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + return -EINVAL; +} + +void +cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_r2t *r2t) +{ + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); + struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; + int ret = -EINVAL; + + if ((!ccmd->setup_ddp) || + (!test_bit(CSK_DDP_ENABLE, &csk->com.flags))) + goto out; + + ccmd->setup_ddp = false; + + ttinfo->sgl = cmd->se_cmd.t_data_sg; + ttinfo->nents = cmd->se_cmd.t_data_nents; + + ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); + if (ret < 0) { + pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", + csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); + + ttinfo->sgl = NULL; + ttinfo->nents = 0; + } else { + ccmd->release = true; + } +out: + pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag); + r2t->targ_xfer_tag = ttinfo->tag; +} + +void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); + + if (ccmd->release) { + struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; + + if (ttinfo->sgl) { + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbi_ppm *ppm = cdev2ppm(cdev); + + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + + dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, + ttinfo->nents, DMA_FROM_DEVICE); + } else { + put_page(sg_page(&ccmd->sg)); + } + + ccmd->release = false; + } +} + +int cxgbit_ddp_init(struct cxgbit_device *cdev) +{ + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct net_device *ndev = cdev->lldi.ports[0]; + struct cxgbi_tag_format tformat; + unsigned int ppmax; + int ret, i; + + if (!lldi->vr->iscsi.size) { + pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); + return -EACCES; + } + + ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; + + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); + for (i = 0; i < 4; i++) + tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) + & 0xF; + cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); + + ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0], + cdev->lldi.pdev, &cdev->lldi, &tformat, + ppmax, lldi->iscsi_llimit, + lldi->vr->iscsi.start, 2); + if (ret >= 0) { + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm); + + if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) && + (ppm->ppmax >= 1024)) + set_bit(CDEV_DDP_ENABLE, &cdev->flags); + ret = 0; + } + + return ret; +} diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h new file mode 100644 index 000000000000..28c11bd1b930 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __CXGBIT_LRO_H__ +#define __CXGBIT_LRO_H__ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/skbuff.h> + +#define LRO_FLUSH_LEN_MAX 65535 + +struct cxgbit_lro_cb { + struct cxgbit_sock *csk; + u32 pdu_totallen; + u32 offset; + u8 pdu_idx; + bool complete; +}; + +enum cxgbit_pducb_flags { + PDUCBF_RX_HDR = (1 << 0), /* received pdu header */ + PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */ + PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */ + PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */ + PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */ + PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */ +}; + +struct cxgbit_lro_pdu_cb { + u8 flags; + u8 frags; + u8 hfrag_idx; + u8 nr_dfrags; + u8 dfrag_idx; + bool complete; + u32 seq; + u32 pdulen; + u32 hlen; + u32 dlen; + u32 doffset; + u32 ddigest; + void *hdr; +}; + +#define LRO_SKB_MAX_HEADROOM \ + (sizeof(struct cxgbit_lro_cb) + \ + (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb))) + +#define LRO_SKB_MIN_HEADROOM \ + (sizeof(struct cxgbit_lro_cb) + \ + sizeof(struct cxgbit_lro_pdu_cb)) + +#define cxgbit_skb_lro_cb(skb) ((struct cxgbit_lro_cb *)skb->data) +#define cxgbit_skb_lro_pdu_cb(skb, i) \ + ((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \ + + (i * sizeof(struct cxgbit_lro_pdu_cb)))) + +#define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ +#define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT 19 /* pad error */ +#define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ +#define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ + +#endif /*__CXGBIT_LRO_H_*/ diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c new file mode 100644 index 000000000000..60dccd02bd85 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -0,0 +1,702 @@ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define DRV_NAME "cxgbit" +#define DRV_VERSION "1.0.0-ko" +#define pr_fmt(fmt) DRV_NAME ": " fmt + +#include "cxgbit.h" + +#ifdef CONFIG_CHELSIO_T4_DCB +#include <net/dcbevent.h> +#include "cxgb4_dcb.h" +#endif + +LIST_HEAD(cdev_list_head); +/* cdev list lock */ +DEFINE_MUTEX(cdev_list_lock); + +void _cxgbit_free_cdev(struct kref *kref) +{ + struct cxgbit_device *cdev; + + cdev = container_of(kref, struct cxgbit_device, kref); + kfree(cdev); +} + +static void cxgbit_set_mdsl(struct cxgbit_device *cdev) +{ + struct cxgb4_lld_info *lldi = &cdev->lldi; + u32 mdsl; + +#define ULP2_MAX_PKT_LEN 16224 +#define ISCSI_PDU_NONPAYLOAD_LEN 312 + mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN, + ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN); + mdsl = min_t(u32, mdsl, 8192); + mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); + + cdev->mdsl = mdsl; +} + +static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi) +{ + struct cxgbit_device *cdev; + + if (is_t4(lldi->adapter_type)) + return ERR_PTR(-ENODEV); + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return ERR_PTR(-ENOMEM); + + kref_init(&cdev->kref); + + cdev->lldi = *lldi; + + cxgbit_set_mdsl(cdev); + + if (cxgbit_ddp_init(cdev) < 0) { + kfree(cdev); + return ERR_PTR(-EINVAL); + } + + if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags)) + pr_info("cdev %s ddp init failed\n", + pci_name(lldi->pdev)); + + if (lldi->fw_vers >= 0x10d2b00) + set_bit(CDEV_ISO_ENABLE, &cdev->flags); + + spin_lock_init(&cdev->cskq.lock); + INIT_LIST_HEAD(&cdev->cskq.list); + + mutex_lock(&cdev_list_lock); + list_add_tail(&cdev->list, &cdev_list_head); + mutex_unlock(&cdev_list_lock); + + pr_info("cdev %s added for iSCSI target transport\n", + pci_name(lldi->pdev)); + + return cdev; +} + +static void cxgbit_close_conn(struct cxgbit_device *cdev) +{ + struct cxgbit_sock *csk; + struct sk_buff *skb; + bool wakeup_thread = false; + + spin_lock_bh(&cdev->cskq.lock); + list_for_each_entry(csk, &cdev->cskq.list, list) { + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + continue; + + spin_lock_bh(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + if (skb_queue_len(&csk->rxq) == 1) + wakeup_thread = true; + spin_unlock_bh(&csk->rxq.lock); + + if (wakeup_thread) { + wake_up(&csk->waitq); + wakeup_thread = false; + } + } + spin_unlock_bh(&cdev->cskq.lock); +} + +static void cxgbit_detach_cdev(struct cxgbit_device *cdev) +{ + bool free_cdev = false; + + spin_lock_bh(&cdev->cskq.lock); + if (list_empty(&cdev->cskq.list)) + free_cdev = true; + spin_unlock_bh(&cdev->cskq.lock); + + if (free_cdev) { + mutex_lock(&cdev_list_lock); + list_del(&cdev->list); + mutex_unlock(&cdev_list_lock); + + cxgbit_put_cdev(cdev); + } else { + cxgbit_close_conn(cdev); + } +} + +static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) +{ + struct cxgbit_device *cdev = handle; + + switch (state) { + case CXGB4_STATE_UP: + set_bit(CDEV_STATE_UP, &cdev->flags); + pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev)); + break; + case CXGB4_STATE_START_RECOVERY: + clear_bit(CDEV_STATE_UP, &cdev->flags); + cxgbit_close_conn(cdev); + pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev)); + break; + case CXGB4_STATE_DOWN: + pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev)); + break; + case CXGB4_STATE_DETACH: + clear_bit(CDEV_STATE_UP, &cdev->flags); + pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev)); + cxgbit_detach_cdev(cdev); + break; + default: + pr_info("cdev %s unknown state %d.\n", + pci_name(cdev->lldi.pdev), state); + break; + } + return 0; +} + +static void +cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl, + struct cxgbit_lro_pdu_cb *pdu_cb) +{ + unsigned int status = ntohl(cpl->ddpvld); + + pdu_cb->flags |= PDUCBF_RX_STATUS; + pdu_cb->ddigest = ntohl(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); + + if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status); + pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; + } + + if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status); + pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; + } + + if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) + pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status); + + if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && + (!(pdu_cb->flags & PDUCBF_RX_DATA))) { + pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; + } +} + +static void +cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, + lro_cb->pdu_idx); + struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); + + cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb); + + if (pdu_cb->flags & PDUCBF_RX_HDR) + pdu_cb->complete = true; + + lro_cb->complete = true; + lro_cb->pdu_totallen += pdu_cb->pdulen; + lro_cb->pdu_idx++; +} + +static void +cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, + unsigned int offset) +{ + u8 skb_frag_idx = skb_shinfo(skb)->nr_frags; + u8 i; + + /* usually there's just one frag */ + __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, + gl->frags[0].offset + offset, + gl->frags[0].size - offset); + for (i = 1; i < gl->nfrags; i++) + __skb_fill_page_desc(skb, skb_frag_idx + i, + gl->frags[i].page, + gl->frags[i].offset, + gl->frags[i].size); + + skb_shinfo(skb)->nr_frags += gl->nfrags; + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); +} + +static void +cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, + lro_cb->pdu_idx); + u32 len, offset; + + if (op == CPL_ISCSI_HDR) { + struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va; + + offset = sizeof(struct cpl_iscsi_hdr); + pdu_cb->flags |= PDUCBF_RX_HDR; + pdu_cb->seq = ntohl(cpl->seq); + len = ntohs(cpl->len); + pdu_cb->hdr = gl->va + offset; + pdu_cb->hlen = len; + pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; + + if (unlikely(gl->nfrags > 1)) + cxgbit_skcb_flags(skb) = 0; + + lro_cb->complete = false; + } else { + struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; + + offset = sizeof(struct cpl_iscsi_data); + pdu_cb->flags |= PDUCBF_RX_DATA; + len = ntohs(cpl->len); + pdu_cb->dlen = len; + pdu_cb->doffset = lro_cb->offset; + pdu_cb->nr_dfrags = gl->nfrags; + pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; + } + + cxgbit_copy_frags(skb, gl, offset); + + pdu_cb->frags += gl->nfrags; + lro_cb->offset += len; + skb->len += len; + skb->data_len += len; + skb->truesize += len; +} + +static struct sk_buff * +cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl, + const __be64 *rsp, struct napi_struct *napi) +{ + struct sk_buff *skb; + struct cxgbit_lro_cb *lro_cb; + + skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM); + + if (unlikely(!skb)) + return NULL; + + memset(skb->data, 0, LRO_SKB_MAX_HEADROOM); + + cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO; + + lro_cb = cxgbit_skb_lro_cb(skb); + + cxgbit_get_csk(csk); + + lro_cb->csk = csk; + + return skb; +} + +static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + bool wakeup_thread = false; + + spin_lock(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + if (skb_queue_len(&csk->rxq) == 1) + wakeup_thread = true; + spin_unlock(&csk->rxq.lock); + + if (wakeup_thread) + wake_up(&csk->waitq); +} + +static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_sock *csk = lro_cb->csk; + + csk->lro_skb = NULL; + + __skb_unlink(skb, &lro_mgr->lroq); + cxgbit_queue_lro_skb(csk, skb); + + cxgbit_put_csk(csk); + + lro_mgr->lro_pkts++; + lro_mgr->lro_session_cnt--; +} + +static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr) +{ + struct sk_buff *skb; + + while ((skb = skb_peek(&lro_mgr->lroq))) + cxgbit_lro_flush(lro_mgr, skb); +} + +static int +cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp, + const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi) +{ + struct sk_buff *skb; + struct cxgbit_lro_cb *lro_cb; + + if (!csk) { + pr_err("%s: csk NULL, op 0x%x.\n", __func__, op); + goto out; + } + + if (csk->lro_skb) + goto add_packet; + +start_lro: + if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) { + cxgbit_uld_lro_flush(lro_mgr); + goto start_lro; + } + + skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi); + if (unlikely(!skb)) + goto out; + + csk->lro_skb = skb; + + __skb_queue_tail(&lro_mgr->lroq, skb); + lro_mgr->lro_session_cnt++; + +add_packet: + skb = csk->lro_skb; + lro_cb = cxgbit_skb_lro_cb(skb); + + if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) > + MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) || + (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) { + cxgbit_lro_flush(lro_mgr, skb); + goto start_lro; + } + + if (gl) + cxgbit_lro_add_packet_gl(skb, op, gl); + else + cxgbit_lro_add_packet_rsp(skb, op, rsp); + + lro_mgr->lro_merged++; + + return 0; + +out: + return -1; +} + +static int +cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, + const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi) +{ + struct cxgbit_device *cdev = hndl; + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct cpl_tx_data *rpl = NULL; + struct cxgbit_sock *csk = NULL; + unsigned int tid = 0; + struct sk_buff *skb; + unsigned int op = *(u8 *)rsp; + bool lro_flush = true; + + switch (op) { + case CPL_ISCSI_HDR: + case CPL_ISCSI_DATA: + case CPL_RX_ISCSI_DDP: + case CPL_FW4_ACK: + lro_flush = false; + case CPL_ABORT_RPL_RSS: + case CPL_PASS_ESTABLISH: + case CPL_PEER_CLOSE: + case CPL_CLOSE_CON_RPL: + case CPL_ABORT_REQ_RSS: + case CPL_SET_TCB_RPL: + case CPL_RX_DATA: + rpl = gl ? (struct cpl_tx_data *)gl->va : + (struct cpl_tx_data *)(rsp + 1); + tid = GET_TID(rpl); + csk = lookup_tid(lldi->tids, tid); + break; + default: + break; + } + + if (csk && csk->lro_skb && lro_flush) + cxgbit_lro_flush(lro_mgr, csk->lro_skb); + + if (!gl) { + unsigned int len; + + if (op == CPL_RX_ISCSI_DDP) { + if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr, + napi)) + return 0; + } + + len = 64 - sizeof(struct rsp_ctrl) - 8; + skb = napi_alloc_skb(napi, len); + if (!skb) + goto nomem; + __skb_put(skb, len); + skb_copy_to_linear_data(skb, &rsp[1], len); + } else { + if (unlikely(op != *(u8 *)gl->va)) { + pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", + gl->va, be64_to_cpu(*rsp), + be64_to_cpu(*(u64 *)gl->va), + gl->tot_len); + return 0; + } + + if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) { + if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, + napi)) + return 0; + } + +#define RX_PULL_LEN 128 + skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) + goto nomem; + } + + rpl = (struct cpl_tx_data *)skb->data; + op = rpl->ot.opcode; + cxgbit_skcb_rx_opcode(skb) = op; + + pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", + cdev, op, rpl->ot.opcode_tid, + ntohl(rpl->ot.opcode_tid), skb); + + if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) { + cxgbit_cplhandlers[op](cdev, skb); + } else { + pr_err("No handler for opcode 0x%x.\n", op); + __kfree_skb(skb); + } + return 0; +nomem: + pr_err("%s OOM bailing out.\n", __func__); + return 1; +} + +#ifdef CONFIG_CHELSIO_T4_DCB +struct cxgbit_dcb_work { + struct dcb_app_type dcb_app; + struct work_struct work; +}; + +static void +cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id, + u8 dcb_priority, u16 port_num) +{ + struct cxgbit_sock *csk; + struct sk_buff *skb; + u16 local_port; + bool wakeup_thread = false; + + spin_lock_bh(&cdev->cskq.lock); + list_for_each_entry(csk, &cdev->cskq.list, list) { + if (csk->port_id != port_id) + continue; + + if (csk->com.local_addr.ss_family == AF_INET6) { + struct sockaddr_in6 *sock_in6; + + sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr; + local_port = ntohs(sock_in6->sin6_port); + } else { + struct sockaddr_in *sock_in; + + sock_in = (struct sockaddr_in *)&csk->com.local_addr; + local_port = ntohs(sock_in->sin_port); + } + + if (local_port != port_num) + continue; + + if (csk->dcb_priority == dcb_priority) + continue; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + continue; + + spin_lock(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + if (skb_queue_len(&csk->rxq) == 1) + wakeup_thread = true; + spin_unlock(&csk->rxq.lock); + + if (wakeup_thread) { + wake_up(&csk->waitq); + wakeup_thread = false; + } + } + spin_unlock_bh(&cdev->cskq.lock); +} + +static void cxgbit_dcb_workfn(struct work_struct *work) +{ + struct cxgbit_dcb_work *dcb_work; + struct net_device *ndev; + struct cxgbit_device *cdev = NULL; + struct dcb_app_type *iscsi_app; + u8 priority, port_id = 0xff; + + dcb_work = container_of(work, struct cxgbit_dcb_work, work); + iscsi_app = &dcb_work->dcb_app; + + if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { + if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) + goto out; + + priority = iscsi_app->app.priority; + + } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { + if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) + goto out; + + if (!iscsi_app->app.priority) + goto out; + + priority = ffs(iscsi_app->app.priority) - 1; + } else { + goto out; + } + + pr_debug("priority for ifid %d is %u\n", + iscsi_app->ifindex, priority); + + ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); + + if (!ndev) + goto out; + + mutex_lock(&cdev_list_lock); + cdev = cxgbit_find_device(ndev, &port_id); + + dev_put(ndev); + + if (!cdev) { + mutex_unlock(&cdev_list_lock); + goto out; + } + + cxgbit_update_dcb_priority(cdev, port_id, priority, + iscsi_app->app.protocol); + mutex_unlock(&cdev_list_lock); +out: + kfree(dcb_work); +} + +static int +cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct cxgbit_dcb_work *dcb_work; + struct dcb_app_type *dcb_app = data; + + dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC); + if (!dcb_work) + return NOTIFY_DONE; + + dcb_work->dcb_app = *dcb_app; + INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn); + schedule_work(&dcb_work->work); + return NOTIFY_OK; +} +#endif + +static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn) +{ + return TARGET_PROT_NORMAL; +} + +static struct iscsit_transport cxgbit_transport = { + .name = DRV_NAME, + .transport_type = ISCSI_CXGBIT, + .rdma_shutdown = false, + .priv_size = sizeof(struct cxgbit_cmd), + .owner = THIS_MODULE, + .iscsit_setup_np = cxgbit_setup_np, + .iscsit_accept_np = cxgbit_accept_np, + .iscsit_free_np = cxgbit_free_np, + .iscsit_free_conn = cxgbit_free_conn, + .iscsit_get_login_rx = cxgbit_get_login_rx, + .iscsit_put_login_tx = cxgbit_put_login_tx, + .iscsit_immediate_queue = iscsit_immediate_queue, + .iscsit_response_queue = iscsit_response_queue, + .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, + .iscsit_queue_data_in = iscsit_queue_rsp, + .iscsit_queue_status = iscsit_queue_rsp, + .iscsit_xmit_pdu = cxgbit_xmit_pdu, + .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt, + .iscsit_get_rx_pdu = cxgbit_get_rx_pdu, + .iscsit_validate_params = cxgbit_validate_params, + .iscsit_release_cmd = cxgbit_release_cmd, + .iscsit_aborted_task = iscsit_aborted_task, + .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops, +}; + +static struct cxgb4_uld_info cxgbit_uld_info = { + .name = DRV_NAME, + .add = cxgbit_uld_add, + .state_change = cxgbit_uld_state_change, + .lro_rx_handler = cxgbit_uld_lro_rx_handler, + .lro_flush = cxgbit_uld_lro_flush, +}; + +#ifdef CONFIG_CHELSIO_T4_DCB +static struct notifier_block cxgbit_dcbevent_nb = { + .notifier_call = cxgbit_dcbevent_notify, +}; +#endif + +static int __init cxgbit_init(void) +{ + cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info); + iscsit_register_transport(&cxgbit_transport); + +#ifdef CONFIG_CHELSIO_T4_DCB + pr_info("%s dcb enabled.\n", DRV_NAME); + register_dcbevent_notifier(&cxgbit_dcbevent_nb); +#endif + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < + sizeof(union cxgbit_skb_cb)); + return 0; +} + +static void __exit cxgbit_exit(void) +{ + struct cxgbit_device *cdev, *tmp; + +#ifdef CONFIG_CHELSIO_T4_DCB + unregister_dcbevent_notifier(&cxgbit_dcbevent_nb); +#endif + mutex_lock(&cdev_list_lock); + list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) { + list_del(&cdev->list); + cxgbit_put_cdev(cdev); + } + mutex_unlock(&cdev_list_lock); + iscsit_unregister_transport(&cxgbit_transport); + cxgb4_unregister_uld(CXGB4_ULD_ISCSIT); +} + +module_init(cxgbit_init); +module_exit(cxgbit_exit); + +MODULE_DESCRIPTION("Chelsio iSCSI target offload driver"); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c new file mode 100644 index 000000000000..d02bf58aea6d --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -0,0 +1,1561 @@ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/workqueue.h> +#include <linux/kthread.h> +#include <asm/unaligned.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include "cxgbit.h" + +struct sge_opaque_hdr { + void *dev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +static const u8 cxgbit_digest_len[] = {0, 4, 4, 8}; + +#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \ + sizeof(struct fw_ofld_tx_data_wr)) + +static struct sk_buff * +__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso) +{ + struct sk_buff *skb = NULL; + u8 submode = 0; + int errcode; + static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN; + + if (len) { + skb = alloc_skb_with_frags(hdr_len, len, + 0, &errcode, + GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, TX_HDR_LEN); + skb_reset_transport_header(skb); + __skb_put(skb, ISCSI_HDR_LEN); + skb->data_len = len; + skb->len += len; + submode |= (csk->submode & CXGBIT_SUBMODE_DCRC); + + } else { + u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0; + + skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, TX_HDR_LEN + iso_len); + skb_reset_transport_header(skb); + __skb_put(skb, ISCSI_HDR_LEN); + } + + submode |= (csk->submode & CXGBIT_SUBMODE_HCRC); + cxgbit_skcb_submode(skb) = submode; + cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode]; + cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR; + return skb; +} + +static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len) +{ + return __cxgbit_alloc_skb(csk, len, false); +} + +/* + * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static int cxgbit_is_ofld_imm(const struct sk_buff *skb) +{ + int length = skb->len; + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) + length += sizeof(struct fw_ofld_tx_data_wr); + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)) + length += sizeof(struct cpl_tx_data_iso); + +#define MAX_IMM_TX_PKT_LEN 256 + return length <= MAX_IMM_TX_PKT_LEN; +} + +/* + * cxgbit_sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int cxgbit_sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/* + * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (cxgbit_is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + flits = skb_transport_offset(skb) / 8; + cnt = skb_shinfo(skb)->nr_frags; + if (skb_tail_pointer(skb) != skb_transport_header(skb)) + cnt++; + return flits + cxgbit_sgl_len(cnt); +} + +#define CXGBIT_ISO_FSLICE 0x1 +#define CXGBIT_ISO_LSLICE 0x2 +static void +cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info) +{ + struct cpl_tx_data_iso *cpl; + unsigned int submode = cxgbit_skcb_submode(skb); + unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE); + unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE); + + cpl = (struct cpl_tx_data_iso *)__skb_push(skb, sizeof(*cpl)); + + cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) | + CPL_TX_DATA_ISO_FIRST_V(fslice) | + CPL_TX_DATA_ISO_LAST_V(lslice) | + CPL_TX_DATA_ISO_CPLHDRLEN_V(0) | + CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) | + CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) | + CPL_TX_DATA_ISO_IMMEDIATE_V(0) | + CPL_TX_DATA_ISO_SCSI_V(2)); + + cpl->ahs_len = 0; + cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4)); + cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4)); + cpl->len = htonl(iso_info->len); + cpl->reserved2_seglen_offset = htonl(0); + cpl->datasn_offset = htonl(0); + cpl->buffer_offset = htonl(0); + cpl->reserved3 = 0; + + __skb_pull(skb, sizeof(*cpl)); +} + +static void +cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, + u32 len, u32 credits, u32 compl) +{ + struct fw_ofld_tx_data_wr *req; + u32 submode = cxgbit_skcb_submode(skb); + u32 wr_ulp_mode = 0; + u32 hdr_size = sizeof(*req); + u32 opcode = FW_OFLD_TX_DATA_WR; + u32 immlen = 0; + u32 force = TX_FORCE_V(!submode); + + if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) { + opcode = FW_ISCSI_TX_DATA_WR; + immlen += sizeof(struct cpl_tx_data_iso); + hdr_size += sizeof(struct cpl_tx_data_iso); + submode |= 8; + } + + if (cxgbit_is_ofld_imm(skb)) + immlen += dlen; + + req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, + hdr_size); + req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) | + FW_WR_COMPL_V(compl) | + FW_WR_IMMDLEN_V(immlen)); + req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | + FW_WR_LEN16_V(credits)); + req->plen = htonl(len); + wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | + FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); + + req->tunnel_to_proxy = htonl((wr_ulp_mode) | force | + FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); +} + +static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) +{ + kfree_skb(skb); +} + +void cxgbit_push_tx_frames(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + + while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) { + u32 dlen = skb->len; + u32 len = skb->len; + u32 credits_needed; + u32 compl = 0; + u32 flowclen16 = 0; + u32 iso_cpl_len = 0; + + if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) + iso_cpl_len = sizeof(struct cpl_tx_data_iso); + + if (cxgbit_is_ofld_imm(skb)) + credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16); + else + credits_needed = DIV_ROUND_UP((8 * + cxgbit_calc_tx_flits_ofld(skb)) + + iso_cpl_len, 16); + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) + credits_needed += DIV_ROUND_UP( + sizeof(struct fw_ofld_tx_data_wr), 16); + /* + * Assumes the initial credits is large enough to support + * fw_flowc_wr plus largest possible first payload + */ + + if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) { + flowclen16 = cxgbit_send_tx_flowc_wr(csk); + csk->wr_cred -= flowclen16; + csk->wr_una_cred += flowclen16; + } + + if (csk->wr_cred < credits_needed) { + pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n", + csk, skb->len, skb->data_len, + credits_needed, csk->wr_cred); + break; + } + __skb_unlink(skb, &csk->txq); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); + skb->csum = credits_needed + flowclen16; + csk->wr_cred -= credits_needed; + csk->wr_una_cred += credits_needed; + + pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", + csk, skb->len, skb->data_len, credits_needed, + csk->wr_cred, csk->wr_una_cred); + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) { + len += cxgbit_skcb_tx_extralen(skb); + + if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || + (!before(csk->write_seq, + csk->snd_una + csk->snd_win))) { + compl = 1; + csk->wr_una_cred = 0; + } + + cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed, + compl); + csk->snd_nxt += len; + + } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) || + (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { + struct cpl_close_con_req *req = + (struct cpl_close_con_req *)skb->data; + req->wr.wr_hi |= htonl(FW_WR_COMPL_F); + csk->wr_una_cred = 0; + } + + cxgbit_sock_enqueue_wr(csk, skb); + t4_set_arp_err_handler(skb, csk, + cxgbit_arp_failure_skb_discard); + + pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n", + csk, csk->tid, skb, len); + + cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); + } +} + +static bool cxgbit_lock_sock(struct cxgbit_sock *csk) +{ + spin_lock_bh(&csk->lock); + + if (before(csk->write_seq, csk->snd_una + csk->snd_win)) + csk->lock_owner = true; + + spin_unlock_bh(&csk->lock); + + return csk->lock_owner; +} + +static void cxgbit_unlock_sock(struct cxgbit_sock *csk) +{ + struct sk_buff_head backlogq; + struct sk_buff *skb; + void (*fn)(struct cxgbit_sock *, struct sk_buff *); + + skb_queue_head_init(&backlogq); + + spin_lock_bh(&csk->lock); + while (skb_queue_len(&csk->backlogq)) { + skb_queue_splice_init(&csk->backlogq, &backlogq); + spin_unlock_bh(&csk->lock); + + while ((skb = __skb_dequeue(&backlogq))) { + fn = cxgbit_skcb_rx_backlog_fn(skb); + fn(csk, skb); + } + + spin_lock_bh(&csk->lock); + } + + csk->lock_owner = false; + spin_unlock_bh(&csk->lock); +} + +static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + int ret = 0; + + wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk)); + + if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) || + signal_pending(current))) { + __kfree_skb(skb); + __skb_queue_purge(&csk->ppodq); + ret = -1; + spin_lock_bh(&csk->lock); + if (csk->lock_owner) { + spin_unlock_bh(&csk->lock); + goto unlock; + } + spin_unlock_bh(&csk->lock); + return ret; + } + + csk->write_seq += skb->len + + cxgbit_skcb_tx_extralen(skb); + + skb_queue_splice_tail_init(&csk->ppodq, &csk->txq); + __skb_queue_tail(&csk->txq, skb); + cxgbit_push_tx_frames(csk); + +unlock: + cxgbit_unlock_sock(csk); + return ret; +} + +static int +cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset, + u32 data_length) +{ + u32 i = 0, nr_frags = MAX_SKB_FRAGS; + u32 padding = ((-data_length) & 3); + struct scatterlist *sg; + struct page *page; + unsigned int page_off; + + if (padding) + nr_frags--; + + /* + * We know each entry in t_data_sg contains a page. + */ + sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; + page_off = (data_offset % PAGE_SIZE); + + while (data_length && (i < nr_frags)) { + u32 cur_len = min_t(u32, data_length, sg->length - page_off); + + page = sg_page(sg); + + get_page(page); + skb_fill_page_desc(skb, i, page, sg->offset + page_off, + cur_len); + skb->data_len += cur_len; + skb->len += cur_len; + skb->truesize += cur_len; + + data_length -= cur_len; + page_off = 0; + sg = sg_next(sg); + i++; + } + + if (data_length) + return -1; + + if (padding) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -1; + skb_fill_page_desc(skb, i, page, 0, padding); + skb->data_len += padding; + skb->len += padding; + skb->truesize += padding; + } + + return 0; +} + +static int +cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd, + struct iscsi_datain_req *dr) +{ + struct iscsi_conn *conn = csk->conn; + struct sk_buff *skb; + struct iscsi_datain datain; + struct cxgbit_iso_info iso_info; + u32 data_length = cmd->se_cmd.data_length; + u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; + u32 num_pdu, plen, tx_data = 0; + bool task_sense = !!(cmd->se_cmd.se_cmd_flags & + SCF_TRANSPORT_TASK_SENSE); + bool set_statsn = false; + int ret = -1; + + while (data_length) { + num_pdu = (data_length + mrdsl - 1) / mrdsl; + if (num_pdu > csk->max_iso_npdu) + num_pdu = csk->max_iso_npdu; + + plen = num_pdu * mrdsl; + if (plen > data_length) + plen = data_length; + + skb = __cxgbit_alloc_skb(csk, 0, true); + if (unlikely(!skb)) + return -ENOMEM; + + memset(skb->data, 0, ISCSI_HDR_LEN); + cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO; + cxgbit_skcb_submode(skb) |= (csk->submode & + CXGBIT_SUBMODE_DCRC); + cxgbit_skcb_tx_extralen(skb) = (num_pdu * + cxgbit_digest_len[cxgbit_skcb_submode(skb)]) + + ((num_pdu - 1) * ISCSI_HDR_LEN); + + memset(&datain, 0, sizeof(struct iscsi_datain)); + memset(&iso_info, 0, sizeof(iso_info)); + + if (!tx_data) + iso_info.flags |= CXGBIT_ISO_FSLICE; + + if (!(data_length - plen)) { + iso_info.flags |= CXGBIT_ISO_LSLICE; + if (!task_sense) { + datain.flags = ISCSI_FLAG_DATA_STATUS; + iscsit_increment_maxcmdsn(cmd, conn->sess); + cmd->stat_sn = conn->stat_sn++; + set_statsn = true; + } + } + + iso_info.burst_len = num_pdu * mrdsl; + iso_info.mpdu = mrdsl; + iso_info.len = ISCSI_HDR_LEN + plen; + + cxgbit_cpl_tx_data_iso(skb, &iso_info); + + datain.offset = tx_data; + datain.data_sn = cmd->data_sn - 1; + + iscsit_build_datain_pdu(cmd, conn, &datain, + (struct iscsi_data_rsp *)skb->data, + set_statsn); + + ret = cxgbit_map_skb(cmd, skb, tx_data, plen); + if (unlikely(ret)) { + __kfree_skb(skb); + goto out; + } + + ret = cxgbit_queue_skb(csk, skb); + if (unlikely(ret)) + goto out; + + tx_data += plen; + data_length -= plen; + + cmd->read_data_done += plen; + cmd->data_sn += num_pdu; + } + + dr->dr_complete = DATAIN_COMPLETE_NORMAL; + + return 0; + +out: + return ret; +} + +static int +cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd, + const struct iscsi_datain *datain) +{ + struct sk_buff *skb; + int ret = 0; + + skb = cxgbit_alloc_skb(csk, 0); + if (unlikely(!skb)) + return -ENOMEM; + + memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); + + if (datain->length) { + cxgbit_skcb_submode(skb) |= (csk->submode & + CXGBIT_SUBMODE_DCRC); + cxgbit_skcb_tx_extralen(skb) = + cxgbit_digest_len[cxgbit_skcb_submode(skb)]; + } + + ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length); + if (ret < 0) { + __kfree_skb(skb); + return ret; + } + + return cxgbit_queue_skb(csk, skb); +} + +static int +cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_datain_req *dr, + const struct iscsi_datain *datain) +{ + struct cxgbit_sock *csk = conn->context; + u32 data_length = cmd->se_cmd.data_length; + u32 padding = ((-data_length) & 3); + u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; + + if ((data_length > mrdsl) && (!dr->recovery) && + (!padding) && (!datain->offset) && csk->max_iso_npdu) { + atomic_long_add(data_length - datain->length, + &conn->sess->tx_data_octets); + return cxgbit_tx_datain_iso(csk, cmd, dr); + } + + return cxgbit_tx_datain(csk, cmd, datain); +} + +static int +cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + const void *data_buf, u32 data_buf_len) +{ + struct cxgbit_sock *csk = conn->context; + struct sk_buff *skb; + u32 padding = ((-data_buf_len) & 3); + + skb = cxgbit_alloc_skb(csk, data_buf_len + padding); + if (unlikely(!skb)) + return -ENOMEM; + + memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); + + if (data_buf_len) { + u32 pad_bytes = 0; + + skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len); + + if (padding) + skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len, + &pad_bytes, padding); + } + + cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[ + cxgbit_skcb_submode(skb)]; + + return cxgbit_queue_skb(csk, skb); +} + +int +cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_datain_req *dr, const void *buf, u32 buf_len) +{ + if (dr) + return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf); + else + return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); +} + +int cxgbit_validate_params(struct iscsi_conn *conn) +{ + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct iscsi_param *param; + u32 max_xmitdsl; + + param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH, + conn->param_list); + if (!param) + return -1; + + if (kstrtou32(param->value, 0, &max_xmitdsl) < 0) + return -1; + + if (max_xmitdsl > cdev->mdsl) { + if (iscsi_change_param_sprintf( + conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl)) + return -1; + } + + return 0; +} + +static int cxgbit_set_digest(struct cxgbit_sock *csk) +{ + struct iscsi_conn *conn = csk->conn; + struct iscsi_param *param; + + param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list); + if (!param) { + pr_err("param not found key %s\n", HEADERDIGEST); + return -1; + } + + if (!strcmp(param->value, CRC32C)) + csk->submode |= CXGBIT_SUBMODE_HCRC; + + param = iscsi_find_param_from_key(DATADIGEST, conn->param_list); + if (!param) { + csk->submode = 0; + pr_err("param not found key %s\n", DATADIGEST); + return -1; + } + + if (!strcmp(param->value, CRC32C)) + csk->submode |= CXGBIT_SUBMODE_DCRC; + + if (cxgbit_setup_conn_digest(csk)) { + csk->submode = 0; + return -1; + } + + return 0; +} + +static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) +{ + struct iscsi_conn *conn = csk->conn; + struct iscsi_conn_ops *conn_ops = conn->conn_ops; + struct iscsi_param *param; + u32 mrdsl, mbl; + u32 max_npdu, max_iso_npdu; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(DATASEQUENCEINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATASEQUENCEINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 0; + + param = iscsi_find_param_from_key(DATAPDUINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATAPDUINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 0; + + param = iscsi_find_param_from_key(MAXBURSTLENGTH, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", MAXBURSTLENGTH); + return -1; + } + + if (kstrtou32(param->value, 0, &mbl) < 0) + return -1; + } else { + if (!conn->sess->sess_ops->DataSequenceInOrder) + return 0; + if (!conn->sess->sess_ops->DataPDUInOrder) + return 0; + + mbl = conn->sess->sess_ops->MaxBurstLength; + } + + mrdsl = conn_ops->MaxRecvDataSegmentLength; + max_npdu = mbl / mrdsl; + + max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / + (ISCSI_HDR_LEN + mrdsl + + cxgbit_digest_len[csk->submode]); + + csk->max_iso_npdu = min(max_npdu, max_iso_npdu); + + if (csk->max_iso_npdu <= 1) + csk->max_iso_npdu = 0; + + return 0; +} + +static int cxgbit_set_params(struct iscsi_conn *conn) +{ + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm; + struct iscsi_conn_ops *conn_ops = conn->conn_ops; + struct iscsi_param *param; + u8 erl; + + if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) + conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", ERRORRECOVERYLEVEL); + return -1; + } + if (kstrtou8(param->value, 0, &erl) < 0) + return -1; + } else { + erl = conn->sess->sess_ops->ErrorRecoveryLevel; + } + + if (!erl) { + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { + if (cxgbit_set_iso_npdu(csk)) + return -1; + } + + if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) { + if (cxgbit_setup_conn_pgidx(csk, + ppm->tformat.pgsz_idx_dflt)) + return -1; + set_bit(CSK_DDP_ENABLE, &csk->com.flags); + } + } + + if (cxgbit_set_digest(csk)) + return -1; + + return 0; +} + +int +cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, + u32 length) +{ + struct cxgbit_sock *csk = conn->context; + struct sk_buff *skb; + u32 padding_buf = 0; + u8 padding = ((-length) & 3); + + skb = cxgbit_alloc_skb(csk, length + padding); + if (!skb) + return -ENOMEM; + skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN); + skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length); + + if (padding) + skb_store_bits(skb, ISCSI_HDR_LEN + length, + &padding_buf, padding); + + if (login->login_complete) { + if (cxgbit_set_params(conn)) { + kfree_skb(skb); + return -1; + } + + set_bit(CSK_LOGIN_DONE, &csk->com.flags); + } + + if (cxgbit_queue_skb(csk, skb)) + return -1; + + if ((!login->login_complete) && (!login->login_failed)) + schedule_delayed_work(&conn->login_work, 0); + + return 0; +} + +static void +cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, + unsigned int nents) +{ + struct skb_seq_state st; + const u8 *buf; + unsigned int consumed = 0, buf_len; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb); + + skb_prepare_seq_read(skb, pdu_cb->doffset, + pdu_cb->doffset + pdu_cb->dlen, + &st); + + while (true) { + buf_len = skb_seq_read(consumed, &buf, &st); + if (!buf_len) { + skb_abort_seq_read(&st); + break; + } + + consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, + buf_len, consumed); + } +} + +static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk) +{ + struct iscsi_conn *conn = csk->conn; + struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev); + struct cxgbit_cmd *ccmd; + struct iscsi_cmd *cmd; + + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) { + pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n"); + return NULL; + } + + ccmd = iscsit_priv_cmd(cmd); + ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask; + ccmd->setup_ddp = true; + + return cmd; +} + +static int +cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, + u32 length) +{ + struct iscsi_conn *conn = cmd->conn; + struct cxgbit_sock *csk = conn->context; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + pr_err("ImmediateData CRC32C DataDigest error\n"); + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " Immediate Data digest failure while" + " in ERL=0.\n"); + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, + (unsigned char *)hdr); + return IMMEDIATE_DATA_CANNOT_RECOVER; + } + + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, + (unsigned char *)hdr); + return IMMEDIATE_DATA_ERL1_CRC_FAILURE; + } + + if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); + struct skb_shared_info *ssi = skb_shinfo(csk->skb); + skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx]; + + sg_init_table(&ccmd->sg, 1); + sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag), + dfrag->page_offset); + get_page(dfrag->page.p); + + cmd->se_cmd.t_data_sg = &ccmd->sg; + cmd->se_cmd.t_data_nents = 1; + + ccmd->release = true; + } else { + struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; + u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); + + cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); + } + + cmd->write_data_done += pdu_cb->dlen; + + if (cmd->write_data_done == cmd->se_cmd.data_length) { + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + } + + return IMMEDIATE_DATA_NORMAL_OPERATION; +} + +static int +cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, + bool dump_payload) +{ + struct iscsi_conn *conn = cmd->conn; + int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; + /* + * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. + */ + if (dump_payload) + goto after_immediate_data; + + immed_ret = cxgbit_handle_immediate_data(cmd, hdr, + cmd->first_burst_len); +after_immediate_data: + if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { + /* + * A PDU/CmdSN carrying Immediate Data passed + * DataCRC, check against ExpCmdSN/MaxCmdSN if + * Immediate Bit is not set. + */ + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, + hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + + if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); + return 0; + } else if (cmd->unsolicited_data) { + iscsit_set_unsoliticed_dataout(cmd); + } + + } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { + /* + * Immediate Data failed DataCRC and ERL>=1, + * silently drop this PDU and let the initiator + * plug the CmdSN gap. + * + * FIXME: Send Unsolicited NOPIN with reserved + * TTT here to help the initiator figure out + * the missing CmdSN, although they should be + * intelligent enough to determine the missing + * CmdSN and issue a retry to plug the sequence. + */ + cmd->i_state = ISTATE_REMOVE; + iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); + } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ + return -1; + + return 0; +} + +static int +cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) +{ + struct iscsi_conn *conn = csk->conn; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr; + int rc; + bool dump_payload = false; + + rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr); + if (rc < 0) + return rc; + + if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) && + (pdu_cb->nr_dfrags == 1)) + cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); + if (rc < 0) + return 0; + else if (rc > 0) + dump_payload = true; + + if (!pdu_cb->dlen) + return 0; + + return cxgbit_get_immediate_data(cmd, hdr, dump_payload); +} + +static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) +{ + struct scatterlist *sg_start; + struct iscsi_conn *conn = csk->conn; + struct iscsi_cmd *cmd = NULL; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr; + u32 data_offset = be32_to_cpu(hdr->offset); + u32 data_len = pdu_cb->dlen; + int rc, sg_nents, sg_off; + bool dcrc_err = false; + + rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); + if (rc < 0) + return rc; + else if (!cmd) + return 0; + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + pr_err("ITT: 0x%08x, Offset: %u, Length: %u," + " DataSN: 0x%08x\n", + hdr->itt, hdr->offset, data_len, + hdr->datasn); + + dcrc_err = true; + goto check_payload; + } + + pr_debug("DataOut data_len: %u, " + "write_data_done: %u, data_length: %u\n", + data_len, cmd->write_data_done, + cmd->se_cmd.data_length); + + if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { + sg_off = data_offset / PAGE_SIZE; + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; + sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); + + cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); + } + +check_payload: + + rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err); + if (rc < 0) + return rc; + + return 0; +} + +static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) +{ + struct iscsi_conn *conn = csk->conn; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr; + unsigned char *ping_data = NULL; + u32 payload_length = pdu_cb->dlen; + int ret; + + ret = iscsit_setup_nop_out(conn, cmd, hdr); + if (ret < 0) + return 0; + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " NOPOUT Ping DataCRC failure while in" + " ERL=0.\n"); + ret = -1; + goto out; + } else { + /* + * drop this PDU and let the + * initiator plug the CmdSN gap. + */ + pr_info("Dropping NOPOUT" + " Command CmdSN: 0x%08x due to" + " DataCRC error.\n", hdr->cmdsn); + ret = 0; + goto out; + } + } + + /* + * Handle NOP-OUT payload for traditional iSCSI sockets + */ + if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + ping_data = kzalloc(payload_length + 1, GFP_KERNEL); + if (!ping_data) { + pr_err("Unable to allocate memory for" + " NOPOUT ping data.\n"); + ret = -1; + goto out; + } + + skb_copy_bits(csk->skb, pdu_cb->doffset, + ping_data, payload_length); + + ping_data[payload_length] = '\0'; + /* + * Attach ping data to struct iscsi_cmd->buf_ptr. + */ + cmd->buf_ptr = ping_data; + cmd->buf_ptr_size = payload_length; + + pr_debug("Got %u bytes of NOPOUT ping" + " data.\n", payload_length); + pr_debug("Ping Data: \"%s\"\n", ping_data); + } + + return iscsit_process_nop_out(conn, cmd, hdr); +out: + if (cmd) + iscsit_free_cmd(cmd, false); + return ret; +} + +static int +cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) +{ + struct iscsi_conn *conn = csk->conn; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr; + u32 payload_length = pdu_cb->dlen; + int rc; + unsigned char *text_in = NULL; + + rc = iscsit_setup_text_cmd(conn, cmd, hdr); + if (rc < 0) + return rc; + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " Text Data digest failure while in" + " ERL=0.\n"); + goto reject; + } else { + /* + * drop this PDU and let the + * initiator plug the CmdSN gap. + */ + pr_info("Dropping Text" + " Command CmdSN: 0x%08x due to" + " DataCRC error.\n", hdr->cmdsn); + return 0; + } + } + + if (payload_length) { + text_in = kzalloc(payload_length, GFP_KERNEL); + if (!text_in) { + pr_err("Unable to allocate text_in of payload_length: %u\n", + payload_length); + return -ENOMEM; + } + skb_copy_bits(csk->skb, pdu_cb->doffset, + text_in, payload_length); + + text_in[payload_length - 1] = '\0'; + + cmd->text_in_ptr = text_in; + } + + return iscsit_process_text_cmd(conn, cmd, hdr); + +reject: + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + pdu_cb->hdr); +} + +static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk) +{ + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr; + struct iscsi_conn *conn = csk->conn; + struct iscsi_cmd *cmd = NULL; + u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); + int ret = -EINVAL; + + switch (opcode) { + case ISCSI_OP_SCSI_CMD: + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + + ret = cxgbit_handle_scsi_cmd(csk, cmd); + break; + case ISCSI_OP_SCSI_DATA_OUT: + ret = cxgbit_handle_iscsi_dataout(csk); + break; + case ISCSI_OP_NOOP_OUT: + if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + } + + ret = cxgbit_handle_nop_out(csk, cmd); + break; + case ISCSI_OP_SCSI_TMFUNC: + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + + ret = iscsit_handle_task_mgt_cmd(conn, cmd, + (unsigned char *)hdr); + break; + case ISCSI_OP_TEXT: + if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { + cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); + if (!cmd) + goto reject; + } else { + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + } + + ret = cxgbit_handle_text_cmd(csk, cmd); + break; + case ISCSI_OP_LOGOUT: + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + + ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); + if (ret > 0) + wait_for_completion_timeout(&conn->conn_logout_comp, + SECONDS_FOR_LOGOUT_COMP + * HZ); + break; + case ISCSI_OP_SNACK: + ret = iscsit_handle_snack(conn, (unsigned char *)hdr); + break; + default: + pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); + dump_stack(); + break; + } + + return ret; + +reject: + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, + (unsigned char *)hdr); + return ret; +} + +static int cxgbit_rx_opcode(struct cxgbit_sock *csk) +{ + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_conn *conn = csk->conn; + struct iscsi_hdr *hdr = pdu_cb->hdr; + u8 opcode; + + if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) { + atomic_long_inc(&conn->sess->conn_digest_errors); + goto transport_err; + } + + if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) + goto transport_err; + + opcode = hdr->opcode & ISCSI_OPCODE_MASK; + + if (conn->sess->sess_ops->SessionType && + ((!(opcode & ISCSI_OP_TEXT)) || + (!(opcode & ISCSI_OP_LOGOUT)))) { + pr_err("Received illegal iSCSI Opcode: 0x%02x" + " while in Discovery Session, rejecting.\n", opcode); + iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + goto transport_err; + } + + if (cxgbit_target_rx_opcode(csk) < 0) + goto transport_err; + + return 0; + +transport_err: + return -1; +} + +static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk) +{ + struct iscsi_conn *conn = csk->conn; + struct iscsi_login *login = conn->login; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_login_req *login_req; + + login_req = (struct iscsi_login_req *)login->req; + memcpy(login_req, pdu_cb->hdr, sizeof(*login_req)); + + pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," + " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", + login_req->flags, login_req->itt, login_req->cmdsn, + login_req->exp_statsn, login_req->cid, pdu_cb->dlen); + /* + * Setup the initial iscsi_login values from the leading + * login request PDU. + */ + if (login->first_request) { + login_req = (struct iscsi_login_req *)login->req; + login->leading_connection = (!login_req->tsih) ? 1 : 0; + login->current_stage = ISCSI_LOGIN_CURRENT_STAGE( + login_req->flags); + login->version_min = login_req->min_version; + login->version_max = login_req->max_version; + memcpy(login->isid, login_req->isid, 6); + login->cmd_sn = be32_to_cpu(login_req->cmdsn); + login->init_task_tag = login_req->itt; + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); + login->cid = be16_to_cpu(login_req->cid); + login->tsih = be16_to_cpu(login_req->tsih); + } + + if (iscsi_target_check_login_request(conn, login) < 0) + return -1; + + memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); + skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen); + + return 0; +} + +static int +cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx) +{ + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx); + int ret; + + cxgbit_rx_pdu_cb(skb) = pdu_cb; + + csk->skb = skb; + + if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) { + ret = cxgbit_rx_login_pdu(csk); + set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); + } else { + ret = cxgbit_rx_opcode(csk); + } + + return ret; +} + +static void cxgbit_lro_skb_dump(struct sk_buff *skb) +{ + struct skb_shared_info *ssi = skb_shinfo(skb); + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); + u8 i; + + pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n", + skb, skb->head, skb->data, skb->len, skb->data_len, + ssi->nr_frags); + pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n", + skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen); + + for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++) + pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, " + "frags %u.\n", + skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq, + pdu_cb->ddigest, pdu_cb->frags); + for (i = 0; i < ssi->nr_frags; i++) + pr_info("skb 0x%p, frag %d, off %u, sz %u.\n", + skb, i, ssi->frags[i].page_offset, ssi->frags[i].size); +} + +static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) +{ + struct sk_buff *skb = csk->lro_hskb; + struct skb_shared_info *ssi = skb_shinfo(skb); + u8 i; + + memset(skb->data, 0, LRO_SKB_MIN_HEADROOM); + for (i = 0; i < ssi->nr_frags; i++) + put_page(skb_frag_page(&ssi->frags[i])); + ssi->nr_frags = 0; +} + +static void +cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) +{ + struct sk_buff *hskb = csk->lro_hskb; + struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx); + struct skb_shared_info *hssi = skb_shinfo(hskb); + struct skb_shared_info *ssi = skb_shinfo(skb); + unsigned int len = 0; + + if (pdu_cb->flags & PDUCBF_RX_HDR) { + hpdu_cb->flags = pdu_cb->flags; + hpdu_cb->seq = pdu_cb->seq; + hpdu_cb->hdr = pdu_cb->hdr; + hpdu_cb->hlen = pdu_cb->hlen; + + memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx], + sizeof(skb_frag_t)); + + get_page(skb_frag_page(&hssi->frags[0])); + hssi->nr_frags = 1; + hpdu_cb->frags = 1; + hpdu_cb->hfrag_idx = 0; + + len = hssi->frags[0].size; + hskb->len = len; + hskb->data_len = len; + hskb->truesize = len; + } + + if (pdu_cb->flags & PDUCBF_RX_DATA) { + u8 hfrag_idx = 1, i; + + hpdu_cb->flags |= pdu_cb->flags; + + len = 0; + for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) { + memcpy(&hssi->frags[hfrag_idx], + &ssi->frags[pdu_cb->dfrag_idx + i], + sizeof(skb_frag_t)); + + get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + + len += hssi->frags[hfrag_idx].size; + + hssi->nr_frags++; + hpdu_cb->frags++; + } + + hpdu_cb->dlen = pdu_cb->dlen; + hpdu_cb->doffset = hpdu_cb->hlen; + hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags; + hpdu_cb->dfrag_idx = 1; + hskb->len += len; + hskb->data_len += len; + hskb->truesize += len; + } + + if (pdu_cb->flags & PDUCBF_RX_STATUS) { + hpdu_cb->flags |= pdu_cb->flags; + + if (hpdu_cb->flags & PDUCBF_RX_DATA) + hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD; + + hpdu_cb->ddigest = pdu_cb->ddigest; + hpdu_cb->pdulen = pdu_cb->pdulen; + } +} + +static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); + u8 pdu_idx = 0, last_idx = 0; + int ret = 0; + + if (!pdu_cb->complete) { + cxgbit_lro_skb_merge(csk, skb, 0); + + if (pdu_cb->flags & PDUCBF_RX_STATUS) { + struct sk_buff *hskb = csk->lro_hskb; + + ret = cxgbit_process_iscsi_pdu(csk, hskb, 0); + + cxgbit_lro_hskb_reset(csk); + + if (ret < 0) + goto out; + } + + pdu_idx = 1; + } + + if (lro_cb->pdu_idx) + last_idx = lro_cb->pdu_idx - 1; + + for (; pdu_idx <= last_idx; pdu_idx++) { + ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx); + if (ret < 0) + goto out; + } + + if ((!lro_cb->complete) && lro_cb->pdu_idx) + cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx); + +out: + return ret; +} + +static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); + int ret = -1; + + if ((pdu_cb->flags & PDUCBF_RX_HDR) && + (pdu_cb->seq != csk->rcv_nxt)) { + pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n", + csk, csk->tid, pdu_cb->seq, csk->rcv_nxt); + cxgbit_lro_skb_dump(skb); + return ret; + } + + csk->rcv_nxt += lro_cb->pdu_totallen; + + ret = cxgbit_process_lro_skb(csk, skb); + + csk->rx_credits += lro_cb->pdu_totallen; + + if (csk->rx_credits >= (csk->rcv_win / 4)) + cxgbit_rx_data_ack(csk); + + return ret; +} + +static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + int ret = -1; + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) + ret = cxgbit_rx_lro_skb(csk, skb); + + __kfree_skb(skb); + return ret; +} + +static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq) +{ + spin_lock_bh(&csk->rxq.lock); + if (skb_queue_len(&csk->rxq)) { + skb_queue_splice_init(&csk->rxq, rxq); + spin_unlock_bh(&csk->rxq.lock); + return true; + } + spin_unlock_bh(&csk->rxq.lock); + return false; +} + +static int cxgbit_wait_rxq(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + struct sk_buff_head rxq; + + skb_queue_head_init(&rxq); + + wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq)); + + if (signal_pending(current)) + goto out; + + while ((skb = __skb_dequeue(&rxq))) { + if (cxgbit_rx_skb(csk, skb)) + goto out; + } + + return 0; +out: + __skb_queue_purge(&rxq); + return -1; +} + +int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) +{ + struct cxgbit_sock *csk = conn->context; + int ret = -1; + + while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) { + ret = cxgbit_wait_rxq(csk); + if (ret) { + clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); + break; + } + } + + return ret; +} + +void cxgbit_get_rx_pdu(struct iscsi_conn *conn) +{ + struct cxgbit_sock *csk = conn->context; + + while (!kthread_should_stop()) { + iscsit_thread_check_cpumask(conn, current, 0); + if (cxgbit_wait_rxq(csk)) + return; + } +} diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 961202f4e9aa..50f3d3a0dd7b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -478,16 +478,16 @@ int iscsit_del_np(struct iscsi_np *np) return 0; } -static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int); -static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int); +static void iscsit_get_rx_pdu(struct iscsi_conn *); -static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); return 0; } +EXPORT_SYMBOL(iscsit_queue_rsp); -static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); @@ -498,6 +498,169 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) __iscsit_free_cmd(cmd, scsi_cmd, true); } +EXPORT_SYMBOL(iscsit_aborted_task); + +static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *, + u32, u32, u8 *, u8 *); +static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *); + +static int +iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + const void *data_buf, u32 data_buf_len) +{ + struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu; + struct kvec *iov; + u32 niov = 0, tx_size = ISCSI_HDR_LEN; + int ret; + + iov = &cmd->iov_misc[0]; + iov[niov].iov_base = cmd->pdu; + iov[niov++].iov_len = ISCSI_HDR_LEN; + + if (conn->conn_ops->HeaderDigest) { + u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; + + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, + (u8 *)header_digest); + + iov[0].iov_len += ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + pr_debug("Attaching CRC32C HeaderDigest" + " to opcode 0x%x 0x%08x\n", + hdr->opcode, *header_digest); + } + + if (data_buf_len) { + u32 padding = ((-data_buf_len) & 3); + + iov[niov].iov_base = (void *)data_buf; + iov[niov++].iov_len = data_buf_len; + tx_size += data_buf_len; + + if (padding != 0) { + iov[niov].iov_base = &cmd->pad_bytes; + iov[niov++].iov_len = padding; + tx_size += padding; + pr_debug("Attaching %u additional" + " padding bytes.\n", padding); + } + + if (conn->conn_ops->DataDigest) { + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, + data_buf, data_buf_len, + padding, + (u8 *)&cmd->pad_bytes, + (u8 *)&cmd->data_crc); + + iov[niov].iov_base = &cmd->data_crc; + iov[niov++].iov_len = ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + pr_debug("Attached DataDigest for %u" + " bytes opcode 0x%x, CRC 0x%08x\n", + data_buf_len, hdr->opcode, cmd->data_crc); + } + } + + cmd->iov_misc_count = niov; + cmd->tx_size = tx_size; + + ret = iscsit_send_tx_data(cmd, conn, 1); + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + return 0; +} + +static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32); +static void iscsit_unmap_iovec(struct iscsi_cmd *); +static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *, + u32, u32, u32, u8 *); +static int +iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + const struct iscsi_datain *datain) +{ + struct kvec *iov; + u32 iov_count = 0, tx_size = 0; + int ret, iov_ret; + + iov = &cmd->iov_data[0]; + iov[iov_count].iov_base = cmd->pdu; + iov[iov_count++].iov_len = ISCSI_HDR_LEN; + tx_size += ISCSI_HDR_LEN; + + if (conn->conn_ops->HeaderDigest) { + u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; + + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, + (u8 *)header_digest); + + iov[0].iov_len += ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + + pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n", + *header_digest); + } + + iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], + datain->offset, datain->length); + if (iov_ret < 0) + return -1; + + iov_count += iov_ret; + tx_size += datain->length; + + cmd->padding = ((-datain->length) & 3); + if (cmd->padding) { + iov[iov_count].iov_base = cmd->pad_bytes; + iov[iov_count++].iov_len = cmd->padding; + tx_size += cmd->padding; + + pr_debug("Attaching %u padding bytes\n", cmd->padding); + } + + if (conn->conn_ops->DataDigest) { + cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, + cmd, datain->offset, + datain->length, + cmd->padding, + cmd->pad_bytes); + + iov[iov_count].iov_base = &cmd->data_crc; + iov[iov_count++].iov_len = ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + + pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n", + datain->length + cmd->padding, cmd->data_crc); + } + + cmd->iov_data_count = iov_count; + cmd->tx_size = tx_size; + + ret = iscsit_fe_sendpage_sg(cmd, conn); + + iscsit_unmap_iovec(cmd); + + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + return 0; +} + +static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct iscsi_datain_req *dr, const void *buf, + u32 buf_len) +{ + if (dr) + return iscsit_xmit_datain_pdu(conn, cmd, buf); + else + return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); +} static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) { @@ -507,6 +670,7 @@ static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) static struct iscsit_transport iscsi_target_transport = { .name = "iSCSI/TCP", .transport_type = ISCSI_TCP, + .rdma_shutdown = false, .owner = NULL, .iscsit_setup_np = iscsit_setup_np, .iscsit_accept_np = iscsit_accept_np, @@ -519,6 +683,8 @@ static struct iscsit_transport iscsi_target_transport = { .iscsit_queue_data_in = iscsit_queue_rsp, .iscsit_queue_status = iscsit_queue_rsp, .iscsit_aborted_task = iscsit_aborted_task, + .iscsit_xmit_pdu = iscsit_xmit_pdu, + .iscsit_get_rx_pdu = iscsit_get_rx_pdu, .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, }; @@ -634,7 +800,7 @@ static void __exit iscsi_target_cleanup_module(void) kfree(iscsit_global); } -static int iscsit_add_reject( +int iscsit_add_reject( struct iscsi_conn *conn, u8 reason, unsigned char *buf) @@ -664,6 +830,7 @@ static int iscsit_add_reject( return -1; } +EXPORT_SYMBOL(iscsit_add_reject); static int iscsit_add_reject_from_cmd( struct iscsi_cmd *cmd, @@ -719,6 +886,7 @@ int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf) { return iscsit_add_reject_from_cmd(cmd, reason, false, buf); } +EXPORT_SYMBOL(iscsit_reject_cmd); /* * Map some portion of the allocated scatterlist to an iovec, suitable for @@ -737,7 +905,14 @@ static int iscsit_map_iovec( /* * We know each entry in t_data_sg contains a page. */ - sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; + u32 ent = data_offset / PAGE_SIZE; + + if (ent >= cmd->se_cmd.t_data_nents) { + pr_err("Initial page entry out-of-bounds\n"); + return -1; + } + + sg = &cmd->se_cmd.t_data_sg[ent]; page_off = (data_offset % PAGE_SIZE); cmd->first_data_sg = sg; @@ -2335,7 +2510,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } EXPORT_SYMBOL(iscsit_handle_logout_cmd); -static int iscsit_handle_snack( +int iscsit_handle_snack( struct iscsi_conn *conn, unsigned char *buf) { @@ -2388,6 +2563,7 @@ static int iscsit_handle_snack( return 0; } +EXPORT_SYMBOL(iscsit_handle_snack); static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) { @@ -2534,7 +2710,6 @@ static int iscsit_send_conn_drop_async_message( { struct iscsi_async *hdr; - cmd->tx_size = ISCSI_HDR_LEN; cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; hdr = (struct iscsi_async *) cmd->pdu; @@ -2552,25 +2727,11 @@ static int iscsit_send_conn_drop_async_message( hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - cmd->tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32C HeaderDigest to" - " Async Message 0x%08x\n", *header_digest); - } - - cmd->iov_misc[0].iov_base = cmd->pdu; - cmd->iov_misc[0].iov_len = cmd->tx_size; - cmd->iov_misc_count = 1; - pr_debug("Sending Connection Dropped Async Message StatSN:" " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, cmd->logout_cid, conn->cid); - return 0; + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); } static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) @@ -2583,7 +2744,7 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) } } -static void +void iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, bool set_statsn) @@ -2627,15 +2788,14 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), ntohl(hdr->offset), datain->length, conn->cid); } +EXPORT_SYMBOL(iscsit_build_datain_pdu); static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) { struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; struct iscsi_datain datain; struct iscsi_datain_req *dr; - struct kvec *iov; - u32 iov_count = 0, tx_size = 0; - int eodr = 0, ret, iov_ret; + int eodr = 0, ret; bool set_statsn = false; memset(&datain, 0, sizeof(struct iscsi_datain)); @@ -2677,64 +2837,9 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); - iov = &cmd->iov_data[0]; - iov[iov_count].iov_base = cmd->pdu; - iov[iov_count++].iov_len = ISCSI_HDR_LEN; - tx_size += ISCSI_HDR_LEN; - - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - iov[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - - pr_debug("Attaching CRC32 HeaderDigest" - " for DataIN PDU 0x%08x\n", *header_digest); - } - - iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], - datain.offset, datain.length); - if (iov_ret < 0) - return -1; - - iov_count += iov_ret; - tx_size += datain.length; - - cmd->padding = ((-datain.length) & 3); - if (cmd->padding) { - iov[iov_count].iov_base = cmd->pad_bytes; - iov[iov_count++].iov_len = cmd->padding; - tx_size += cmd->padding; - - pr_debug("Attaching %u padding bytes\n", - cmd->padding); - } - if (conn->conn_ops->DataDigest) { - cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd, - datain.offset, datain.length, cmd->padding, cmd->pad_bytes); - - iov[iov_count].iov_base = &cmd->data_crc; - iov[iov_count++].iov_len = ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - - pr_debug("Attached CRC32C DataDigest %d bytes, crc" - " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc); - } - - cmd->iov_data_count = iov_count; - cmd->tx_size = tx_size; - - ret = iscsit_fe_sendpage_sg(cmd, conn); - - iscsit_unmap_iovec(cmd); - - if (ret < 0) { - iscsit_tx_thread_wait_for_tcp(conn); + ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0); + if (ret < 0) return ret; - } if (dr->dr_complete) { eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? @@ -2843,34 +2948,14 @@ EXPORT_SYMBOL(iscsit_build_logout_rsp); static int iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) { - struct kvec *iov; - int niov = 0, tx_size, rc; + int rc; rc = iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)&cmd->pdu[0]); if (rc < 0) return rc; - tx_size = ISCSI_HDR_LEN; - iov = &cmd->iov_misc[0]; - iov[niov].iov_base = cmd->pdu; - iov[niov++].iov_len = ISCSI_HDR_LEN; - - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0], - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - iov[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32C HeaderDigest to" - " Logout Response 0x%08x\n", *header_digest); - } - cmd->iov_misc_count = niov; - cmd->tx_size = tx_size; - - return 0; + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); } void @@ -2910,34 +2995,16 @@ static int iscsit_send_unsolicited_nopin( int want_response) { struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; - int tx_size = ISCSI_HDR_LEN, ret; + int ret; iscsit_build_nopin_rsp(cmd, conn, hdr, false); - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32C HeaderDigest to" - " NopIN 0x%08x\n", *header_digest); - } - - cmd->iov_misc[0].iov_base = cmd->pdu; - cmd->iov_misc[0].iov_len = tx_size; - cmd->iov_misc_count = 1; - cmd->tx_size = tx_size; - pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); - ret = iscsit_send_tx_data(cmd, conn, 1); - if (ret < 0) { - iscsit_tx_thread_wait_for_tcp(conn); + ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); + if (ret < 0) return ret; - } spin_lock_bh(&cmd->istate_lock); cmd->i_state = want_response ? @@ -2951,75 +3018,24 @@ static int iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) { struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; - struct kvec *iov; - u32 padding = 0; - int niov = 0, tx_size; iscsit_build_nopin_rsp(cmd, conn, hdr, true); - tx_size = ISCSI_HDR_LEN; - iov = &cmd->iov_misc[0]; - iov[niov].iov_base = cmd->pdu; - iov[niov++].iov_len = ISCSI_HDR_LEN; - - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - iov[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32C HeaderDigest" - " to NopIn 0x%08x\n", *header_digest); - } - /* * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. */ - if (cmd->buf_ptr_size) { - iov[niov].iov_base = cmd->buf_ptr; - iov[niov++].iov_len = cmd->buf_ptr_size; - tx_size += cmd->buf_ptr_size; - - pr_debug("Echoing back %u bytes of ping" - " data.\n", cmd->buf_ptr_size); - - padding = ((-cmd->buf_ptr_size) & 3); - if (padding != 0) { - iov[niov].iov_base = &cmd->pad_bytes; - iov[niov++].iov_len = padding; - tx_size += padding; - pr_debug("Attaching %u additional" - " padding bytes.\n", padding); - } - if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, - cmd->buf_ptr, cmd->buf_ptr_size, - padding, (u8 *)&cmd->pad_bytes, - (u8 *)&cmd->data_crc); - - iov[niov].iov_base = &cmd->data_crc; - iov[niov++].iov_len = ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attached DataDigest for %u" - " bytes of ping data, CRC 0x%08x\n", - cmd->buf_ptr_size, cmd->data_crc); - } - } + pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size); - cmd->iov_misc_count = niov; - cmd->tx_size = tx_size; - - return 0; + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, + cmd->buf_ptr, + cmd->buf_ptr_size); } static int iscsit_send_r2t( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { - int tx_size = 0; struct iscsi_r2t *r2t; struct iscsi_r2t_rsp *hdr; int ret; @@ -3035,7 +3051,10 @@ static int iscsit_send_r2t( int_to_scsilun(cmd->se_cmd.orig_fe_lun, (struct scsi_lun *)&hdr->lun); hdr->itt = cmd->init_task_tag; - r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); + if (conn->conn_transport->iscsit_get_r2t_ttt) + conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t); + else + r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); hdr->statsn = cpu_to_be32(conn->stat_sn); hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); @@ -3044,38 +3063,18 @@ static int iscsit_send_r2t( hdr->data_offset = cpu_to_be32(r2t->offset); hdr->data_length = cpu_to_be32(r2t->xfer_len); - cmd->iov_misc[0].iov_base = cmd->pdu; - cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; - tx_size += ISCSI_HDR_LEN; - - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32 HeaderDigest for R2T" - " PDU 0x%08x\n", *header_digest); - } - pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, r2t->offset, r2t->xfer_len, conn->cid); - cmd->iov_misc_count = 1; - cmd->tx_size = tx_size; - spin_lock_bh(&cmd->r2t_lock); r2t->sent_r2t = 1; spin_unlock_bh(&cmd->r2t_lock); - ret = iscsit_send_tx_data(cmd, conn, 1); + ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); if (ret < 0) { - iscsit_tx_thread_wait_for_tcp(conn); return ret; } @@ -3166,6 +3165,7 @@ int iscsit_build_r2ts_for_cmd( return 0; } +EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd); void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) @@ -3204,18 +3204,12 @@ EXPORT_SYMBOL(iscsit_build_rsp_pdu); static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) { struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; - struct kvec *iov; - u32 padding = 0, tx_size = 0; - int iov_count = 0; bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); + void *data_buf = NULL; + u32 padding = 0, data_buf_len = 0; iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); - iov = &cmd->iov_misc[0]; - iov[iov_count].iov_base = cmd->pdu; - iov[iov_count++].iov_len = ISCSI_HDR_LEN; - tx_size += ISCSI_HDR_LEN; - /* * Attach SENSE DATA payload to iSCSI Response PDU */ @@ -3227,56 +3221,23 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) padding = -(cmd->se_cmd.scsi_sense_length) & 3; hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); - iov[iov_count].iov_base = cmd->sense_buffer; - iov[iov_count++].iov_len = - (cmd->se_cmd.scsi_sense_length + padding); - tx_size += cmd->se_cmd.scsi_sense_length; + data_buf = cmd->sense_buffer; + data_buf_len = cmd->se_cmd.scsi_sense_length + padding; if (padding) { memset(cmd->sense_buffer + cmd->se_cmd.scsi_sense_length, 0, padding); - tx_size += padding; pr_debug("Adding %u bytes of padding to" " SENSE.\n", padding); } - if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, - cmd->sense_buffer, - (cmd->se_cmd.scsi_sense_length + padding), - 0, NULL, (u8 *)&cmd->data_crc); - - iov[iov_count].iov_base = &cmd->data_crc; - iov[iov_count++].iov_len = ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - - pr_debug("Attaching CRC32 DataDigest for" - " SENSE, %u bytes CRC 0x%08x\n", - (cmd->se_cmd.scsi_sense_length + padding), - cmd->data_crc); - } - pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" " Response PDU\n", cmd->se_cmd.scsi_sense_length); } - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - iov[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32 HeaderDigest for Response" - " PDU 0x%08x\n", *header_digest); - } - - cmd->iov_misc_count = iov_count; - cmd->tx_size = tx_size; - - return 0; + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf, + data_buf_len); } static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) @@ -3323,30 +3284,10 @@ static int iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) { struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; - u32 tx_size = 0; iscsit_build_task_mgt_rsp(cmd, conn, hdr); - cmd->iov_misc[0].iov_base = cmd->pdu; - cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; - tx_size += ISCSI_HDR_LEN; - - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32 HeaderDigest for Task" - " Mgmt Response PDU 0x%08x\n", *header_digest); - } - - cmd->iov_misc_count = 1; - cmd->tx_size = tx_size; - - return 0; + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); } static bool iscsit_check_inaddr_any(struct iscsi_np *np) @@ -3583,53 +3524,16 @@ static int iscsit_send_text_rsp( struct iscsi_conn *conn) { struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; - struct kvec *iov; - u32 tx_size = 0; - int text_length, iov_count = 0, rc; - - rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); - if (rc < 0) - return rc; - - text_length = rc; - iov = &cmd->iov_misc[0]; - iov[iov_count].iov_base = cmd->pdu; - iov[iov_count++].iov_len = ISCSI_HDR_LEN; - iov[iov_count].iov_base = cmd->buf_ptr; - iov[iov_count++].iov_len = text_length; - - tx_size += (ISCSI_HDR_LEN + text_length); - - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - iov[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32 HeaderDigest for" - " Text Response PDU 0x%08x\n", *header_digest); - } - - if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, - cmd->buf_ptr, text_length, - 0, NULL, (u8 *)&cmd->data_crc); - - iov[iov_count].iov_base = &cmd->data_crc; - iov[iov_count++].iov_len = ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - - pr_debug("Attaching DataDigest for %u bytes of text" - " data, CRC 0x%08x\n", text_length, - cmd->data_crc); - } + int text_length; - cmd->iov_misc_count = iov_count; - cmd->tx_size = tx_size; + text_length = iscsit_build_text_rsp(cmd, conn, hdr, + conn->conn_transport->transport_type); + if (text_length < 0) + return text_length; - return 0; + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, + cmd->buf_ptr, + text_length); } void @@ -3654,49 +3558,15 @@ static int iscsit_send_reject( struct iscsi_conn *conn) { struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; - struct kvec *iov; - u32 iov_count = 0, tx_size; iscsit_build_reject(cmd, conn, hdr); - iov = &cmd->iov_misc[0]; - iov[iov_count].iov_base = cmd->pdu; - iov[iov_count++].iov_len = ISCSI_HDR_LEN; - iov[iov_count].iov_base = cmd->buf_ptr; - iov[iov_count++].iov_len = ISCSI_HDR_LEN; - - tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN); - - if (conn->conn_ops->HeaderDigest) { - u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); - - iov[0].iov_len += ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32 HeaderDigest for" - " REJECT PDU 0x%08x\n", *header_digest); - } - - if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr, - ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); - - iov[iov_count].iov_base = &cmd->data_crc; - iov[iov_count++].iov_len = ISCSI_CRC_LEN; - tx_size += ISCSI_CRC_LEN; - pr_debug("Attaching CRC32 DataDigest for REJECT" - " PDU 0x%08x\n", cmd->data_crc); - } - - cmd->iov_misc_count = iov_count; - cmd->tx_size = tx_size; - pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); - return 0; + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, + cmd->buf_ptr, + ISCSI_HDR_LEN); } void iscsit_thread_get_cpumask(struct iscsi_conn *conn) @@ -3724,33 +3594,7 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn) cpumask_setall(conn->conn_cpumask); } -static inline void iscsit_thread_check_cpumask( - struct iscsi_conn *conn, - struct task_struct *p, - int mode) -{ - /* - * mode == 1 signals iscsi_target_tx_thread() usage. - * mode == 0 signals iscsi_target_rx_thread() usage. - */ - if (mode == 1) { - if (!conn->conn_tx_reset_cpumask) - return; - conn->conn_tx_reset_cpumask = 0; - } else { - if (!conn->conn_rx_reset_cpumask) - return; - conn->conn_rx_reset_cpumask = 0; - } - /* - * Update the CPU mask for this single kthread so that - * both TX and RX kthreads are scheduled to run on the - * same CPU. - */ - set_cpus_allowed_ptr(p, conn->conn_cpumask); -} - -static int +int iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) { int ret; @@ -3792,6 +3636,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state err: return -1; } +EXPORT_SYMBOL(iscsit_immediate_queue); static int iscsit_handle_immediate_queue(struct iscsi_conn *conn) @@ -3816,7 +3661,7 @@ iscsit_handle_immediate_queue(struct iscsi_conn *conn) return 0; } -static int +int iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) { int ret; @@ -3889,13 +3734,6 @@ check_rsp_state: if (ret < 0) goto err; - if (iscsit_send_tx_data(cmd, conn, 1) < 0) { - iscsit_tx_thread_wait_for_tcp(conn); - iscsit_unmap_iovec(cmd); - goto err; - } - iscsit_unmap_iovec(cmd); - switch (state) { case ISTATE_SEND_LOGOUTRSP: if (!iscsit_logout_post_handler(cmd, conn)) @@ -3928,6 +3766,7 @@ check_rsp_state: err: return -1; } +EXPORT_SYMBOL(iscsit_response_queue); static int iscsit_handle_response_queue(struct iscsi_conn *conn) { @@ -4087,36 +3926,12 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) return ret; } -int iscsi_target_rx_thread(void *arg) +static void iscsit_get_rx_pdu(struct iscsi_conn *conn) { - int ret, rc; + int ret; u8 buffer[ISCSI_HDR_LEN], opcode; u32 checksum = 0, digest = 0; - struct iscsi_conn *conn = arg; struct kvec iov; - /* - * Allow ourselves to be interrupted by SIGINT so that a - * connection recovery / failure event can be triggered externally. - */ - allow_signal(SIGINT); - /* - * Wait for iscsi_post_login_handler() to complete before allowing - * incoming iscsi/tcp socket I/O, and/or failing the connection. - */ - rc = wait_for_completion_interruptible(&conn->rx_login_comp); - if (rc < 0 || iscsi_target_check_conn_state(conn)) - return 0; - - if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { - struct completion comp; - - init_completion(&comp); - rc = wait_for_completion_interruptible(&comp); - if (rc < 0) - goto transport_err; - - goto transport_err; - } while (!kthread_should_stop()) { /* @@ -4134,7 +3949,7 @@ int iscsi_target_rx_thread(void *arg) ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); if (ret != ISCSI_HDR_LEN) { iscsit_rx_thread_wait_for_tcp(conn); - goto transport_err; + return; } if (conn->conn_ops->HeaderDigest) { @@ -4144,7 +3959,7 @@ int iscsi_target_rx_thread(void *arg) ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); if (ret != ISCSI_CRC_LEN) { iscsit_rx_thread_wait_for_tcp(conn); - goto transport_err; + return; } iscsit_do_crypto_hash_buf(conn->conn_rx_hash, @@ -4168,7 +3983,7 @@ int iscsi_target_rx_thread(void *arg) } if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) - goto transport_err; + return; opcode = buffer[0] & ISCSI_OPCODE_MASK; @@ -4179,15 +3994,38 @@ int iscsi_target_rx_thread(void *arg) " while in Discovery Session, rejecting.\n", opcode); iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buffer); - goto transport_err; + return; } ret = iscsi_target_rx_opcode(conn, buffer); if (ret < 0) - goto transport_err; + return; } +} + +int iscsi_target_rx_thread(void *arg) +{ + int rc; + struct iscsi_conn *conn = arg; + + /* + * Allow ourselves to be interrupted by SIGINT so that a + * connection recovery / failure event can be triggered externally. + */ + allow_signal(SIGINT); + /* + * Wait for iscsi_post_login_handler() to complete before allowing + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); + if (rc < 0 || iscsi_target_check_conn_state(conn)) + return 0; + + if (!conn->conn_transport->iscsit_get_rx_pdu) + return 0; + + conn->conn_transport->iscsit_get_rx_pdu(conn); -transport_err: if (!signal_pending(current)) atomic_set(&conn->transport_failed, 1); iscsit_take_action_for_connection_exit(conn); @@ -4240,16 +4078,17 @@ int iscsit_close_connection( pr_debug("Closing iSCSI connection CID %hu on SID:" " %u\n", conn->cid, sess->sid); /* - * Always up conn_logout_comp for the traditional TCP case just in case - * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout - * response never got sent because the connection failed. + * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD + * case just in case the RX Thread in iscsi_target_rx_opcode() is + * sleeping and the logout response never got sent because the + * connection failed. * * However for iser-target, isert_wait4logout() is using conn_logout_comp * to signal logout response TX interrupt completion. Go ahead and skip * this for iser since isert_rx_opcode() does not wait on logout failure, * and to avoid iscsi_conn pointer dereference in iser-target code. */ - if (conn->conn_transport->transport_type == ISCSI_TCP) + if (!conn->conn_transport->rdma_shutdown) complete(&conn->conn_logout_comp); if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { @@ -4438,7 +4277,7 @@ int iscsit_close_connection( if (!atomic_read(&sess->session_reinstatement) && atomic_read(&sess->session_fall_back_to_erl0)) { spin_unlock_bh(&sess->conn_lock); - target_put_session(sess->se_sess); + iscsit_close_session(sess); return 0; } else if (atomic_read(&sess->session_logout)) { @@ -4467,6 +4306,10 @@ int iscsit_close_connection( } } +/* + * If the iSCSI Session for the iSCSI Initiator Node exists, + * forcefully shutdown the iSCSI NEXUS. + */ int iscsit_close_session(struct iscsi_session *sess) { struct iscsi_portal_group *tpg = sess->tpg; @@ -4556,7 +4399,7 @@ static void iscsit_logout_post_handler_closesession( * always sleep waiting for RX/TX thread shutdown to complete * within iscsit_close_connection(). */ - if (conn->conn_transport->transport_type == ISCSI_TCP) + if (!conn->conn_transport->rdma_shutdown) sleep = cmpxchg(&conn->tx_thread_active, true, false); atomic_set(&conn->conn_logout_remove, 0); @@ -4565,7 +4408,7 @@ static void iscsit_logout_post_handler_closesession( iscsit_dec_conn_usage_count(conn); iscsit_stop_session(sess, sleep, sleep); iscsit_dec_session_usage_count(sess); - target_put_session(sess->se_sess); + iscsit_close_session(sess); } static void iscsit_logout_post_handler_samecid( @@ -4573,7 +4416,7 @@ static void iscsit_logout_post_handler_samecid( { int sleep = 1; - if (conn->conn_transport->transport_type == ISCSI_TCP) + if (!conn->conn_transport->rdma_shutdown) sleep = cmpxchg(&conn->tx_thread_active, true, false); atomic_set(&conn->conn_logout_remove, 0); @@ -4736,7 +4579,7 @@ int iscsit_free_session(struct iscsi_session *sess) } else spin_unlock_bh(&sess->conn_lock); - target_put_session(sess->se_sess); + iscsit_close_session(sess); return 0; } diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 97e5b69e0668..923c032f0b95 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -43,14 +43,15 @@ static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item) return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np); } -static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page) +static ssize_t lio_target_np_driver_show(struct config_item *item, char *page, + enum iscsit_transport_type type) { struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); - struct iscsi_tpg_np *tpg_np_sctp; + struct iscsi_tpg_np *tpg_np_new; ssize_t rb; - tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP); - if (tpg_np_sctp) + tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type); + if (tpg_np_new) rb = sprintf(page, "1\n"); else rb = sprintf(page, "0\n"); @@ -58,19 +59,20 @@ static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page) return rb; } -static ssize_t lio_target_np_sctp_store(struct config_item *item, - const char *page, size_t count) +static ssize_t lio_target_np_driver_store(struct config_item *item, + const char *page, size_t count, enum iscsit_transport_type type, + const char *mod_name) { struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); struct iscsi_np *np; struct iscsi_portal_group *tpg; - struct iscsi_tpg_np *tpg_np_sctp = NULL; + struct iscsi_tpg_np *tpg_np_new = NULL; u32 op; - int ret; + int rc; - ret = kstrtou32(page, 0, &op); - if (ret) - return ret; + rc = kstrtou32(page, 0, &op); + if (rc) + return rc; if ((op != 1) && (op != 0)) { pr_err("Illegal value for tpg_enable: %u\n", op); return -EINVAL; @@ -87,107 +89,64 @@ static ssize_t lio_target_np_sctp_store(struct config_item *item, return -EINVAL; if (op) { - /* - * Use existing np->np_sockaddr for SCTP network portal reference - */ - tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, - tpg_np, ISCSI_SCTP_TCP); - if (!tpg_np_sctp || IS_ERR(tpg_np_sctp)) - goto out; - } else { - tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP); - if (!tpg_np_sctp) - goto out; + if (strlen(mod_name)) { + rc = request_module(mod_name); + if (rc != 0) { + pr_warn("Unable to request_module for %s\n", + mod_name); + rc = 0; + } + } - ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp); - if (ret < 0) + tpg_np_new = iscsit_tpg_add_network_portal(tpg, + &np->np_sockaddr, tpg_np, type); + if (IS_ERR(tpg_np_new)) goto out; + } else { + tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type); + if (tpg_np_new) { + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new); + if (rc < 0) + goto out; + } } iscsit_put_tpg(tpg); return count; out: iscsit_put_tpg(tpg); - return -EINVAL; + return rc; } static ssize_t lio_target_np_iser_show(struct config_item *item, char *page) { - struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); - struct iscsi_tpg_np *tpg_np_iser; - ssize_t rb; - - tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); - if (tpg_np_iser) - rb = sprintf(page, "1\n"); - else - rb = sprintf(page, "0\n"); - - return rb; + return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND); } static ssize_t lio_target_np_iser_store(struct config_item *item, - const char *page, size_t count) + const char *page, size_t count) { - struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); - struct iscsi_np *np; - struct iscsi_portal_group *tpg; - struct iscsi_tpg_np *tpg_np_iser = NULL; - char *endptr; - u32 op; - int rc = 0; - - op = simple_strtoul(page, &endptr, 0); - if ((op != 1) && (op != 0)) { - pr_err("Illegal value for tpg_enable: %u\n", op); - return -EINVAL; - } - np = tpg_np->tpg_np; - if (!np) { - pr_err("Unable to locate struct iscsi_np from" - " struct iscsi_tpg_np\n"); - return -EINVAL; - } - - tpg = tpg_np->tpg; - if (iscsit_get_tpg(tpg) < 0) - return -EINVAL; - - if (op) { - rc = request_module("ib_isert"); - if (rc != 0) { - pr_warn("Unable to request_module for ib_isert\n"); - rc = 0; - } - - tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, - tpg_np, ISCSI_INFINIBAND); - if (IS_ERR(tpg_np_iser)) { - rc = PTR_ERR(tpg_np_iser); - goto out; - } - } else { - tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); - if (tpg_np_iser) { - rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); - if (rc < 0) - goto out; - } - } + return lio_target_np_driver_store(item, page, count, + ISCSI_INFINIBAND, "ib_isert"); +} +CONFIGFS_ATTR(lio_target_np_, iser); - iscsit_put_tpg(tpg); - return count; -out: - iscsit_put_tpg(tpg); - return rc; +static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page) +{ + return lio_target_np_driver_show(item, page, ISCSI_CXGBIT); } -CONFIGFS_ATTR(lio_target_np_, sctp); -CONFIGFS_ATTR(lio_target_np_, iser); +static ssize_t lio_target_np_cxgbit_store(struct config_item *item, + const char *page, size_t count) +{ + return lio_target_np_driver_store(item, page, count, + ISCSI_CXGBIT, "cxgbit"); +} +CONFIGFS_ATTR(lio_target_np_, cxgbit); static struct configfs_attribute *lio_target_portal_attrs[] = { - &lio_target_np_attr_sctp, &lio_target_np_attr_iser, + &lio_target_np_attr_cxgbit, NULL, }; @@ -1554,7 +1513,7 @@ static int lio_tpg_check_prot_fabric_only( * This function calls iscsit_inc_session_usage_count() on the * struct iscsi_session in question. */ -static int lio_tpg_shutdown_session(struct se_session *se_sess) +static void lio_tpg_close_session(struct se_session *se_sess) { struct iscsi_session *sess = se_sess->fabric_sess_ptr; struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg; @@ -1566,7 +1525,7 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess) (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess->conn_lock); spin_unlock_bh(&se_tpg->session_lock); - return 0; + return; } atomic_set(&sess->session_reinstatement, 1); spin_unlock(&sess->conn_lock); @@ -1575,20 +1534,6 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess) spin_unlock_bh(&se_tpg->session_lock); iscsit_stop_session(sess, 1, 1); - return 1; -} - -/* - * Calls iscsit_dec_session_usage_count() as inverse of - * lio_tpg_shutdown_session() - */ -static void lio_tpg_close_session(struct se_session *se_sess) -{ - struct iscsi_session *sess = se_sess->fabric_sess_ptr; - /* - * If the iSCSI Session for the iSCSI Initiator Node exists, - * forcefully shutdown the iSCSI NEXUS. - */ iscsit_close_session(sess); } @@ -1640,7 +1585,6 @@ const struct target_core_fabric_ops iscsi_ops = { .tpg_get_inst_index = lio_tpg_get_inst_index, .check_stop_free = lio_check_stop_free, .release_cmd = lio_release_cmd, - .shutdown_session = lio_tpg_shutdown_session, .close_session = lio_tpg_close_session, .sess_get_index = lio_sess_get_index, .sess_get_initiator_sid = lio_sess_get_initiator_sid, diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c index fb3b52b124ac..647d4a5dca52 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.c +++ b/drivers/target/iscsi/iscsi_target_datain_values.c @@ -524,3 +524,4 @@ struct iscsi_datain_req *iscsit_get_datain_values( return NULL; } +EXPORT_SYMBOL(iscsit_get_datain_values); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 210f6e4830e3..b54e72c7ab0f 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -786,7 +786,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data) } spin_unlock_bh(&se_tpg->session_lock); - target_put_session(sess->se_sess); + iscsit_close_session(sess); } void iscsit_start_time2retain_handler(struct iscsi_session *sess) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8436d56c5f0c..b5212f0f9571 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -228,7 +228,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) if (sess->session_state == TARG_SESS_STATE_FAILED) { spin_unlock_bh(&sess->conn_lock); iscsit_dec_session_usage_count(sess); - target_put_session(sess->se_sess); + iscsit_close_session(sess); return 0; } spin_unlock_bh(&sess->conn_lock); @@ -236,7 +236,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) iscsit_stop_session(sess, 1, 1); iscsit_dec_session_usage_count(sess); - target_put_session(sess->se_sess); + iscsit_close_session(sess); return 0; } @@ -258,7 +258,7 @@ static void iscsi_login_set_conn_values( mutex_unlock(&auth_id_lock); } -static __printf(2, 3) int iscsi_change_param_sprintf( +__printf(2, 3) int iscsi_change_param_sprintf( struct iscsi_conn *conn, const char *fmt, ...) { @@ -279,6 +279,7 @@ static __printf(2, 3) int iscsi_change_param_sprintf( return 0; } +EXPORT_SYMBOL(iscsi_change_param_sprintf); /* * This is the leading connection of a new session, @@ -1387,6 +1388,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) goto old_sess_out; } + if (conn->conn_transport->iscsit_validate_params) { + ret = conn->conn_transport->iscsit_validate_params(conn); + if (ret < 0) { + if (zero_tsih) + goto new_sess_out; + else + goto old_sess_out; + } + } + ret = iscsi_target_start_negotiation(login, conn); if (ret < 0) goto new_sess_out; diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 9fc9117d0f22..89d34bd6d87f 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -269,6 +269,7 @@ int iscsi_target_check_login_request( return 0; } +EXPORT_SYMBOL(iscsi_target_check_login_request); static int iscsi_target_check_first_request( struct iscsi_conn *conn, @@ -1246,16 +1247,16 @@ int iscsi_target_start_negotiation( { int ret; - ret = iscsi_target_do_login(conn, login); - if (!ret) { - if (conn->sock) { - struct sock *sk = conn->sock->sk; + if (conn->sock) { + struct sock *sk = conn->sock->sk; - write_lock_bh(&sk->sk_callback_lock); - set_bit(LOGIN_FLAGS_READY, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } - } else if (ret < 0) { + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + + ret = iscsi_target_do_login(conn, login); + if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_cleanup_work); iscsi_target_restore_sock_callbacks(conn); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 3a1f9a7e6bb6..0efa80bb8962 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -680,6 +680,7 @@ struct iscsi_param *iscsi_find_param_from_key( pr_err("Unable to locate key \"%s\".\n", key); return NULL; } +EXPORT_SYMBOL(iscsi_find_param_from_key); int iscsi_extract_key_value(char *textbuf, char **key, char **value) { diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 57720385a751..1f38177207e0 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -514,6 +514,7 @@ void iscsit_add_cmd_to_immediate_queue( wake_up(&conn->queues_wq); } +EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue); struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) { @@ -725,6 +726,9 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, iscsit_remove_cmd_from_immediate_queue(cmd, conn); iscsit_remove_cmd_from_response_queue(cmd, conn); } + + if (conn && conn->conn_transport->iscsit_release_cmd) + conn->conn_transport->iscsit_release_cmd(conn, cmd); } void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) @@ -773,6 +777,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) break; } } +EXPORT_SYMBOL(iscsit_free_cmd); int iscsit_check_session_usage_count(struct iscsi_session *sess) { diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 0ad5ac541a7f..5091b31b3e56 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -601,16 +601,6 @@ static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) return tl_cmd->sc_cmd_state; } -static int tcm_loop_shutdown_session(struct se_session *se_sess) -{ - return 0; -} - -static void tcm_loop_close_session(struct se_session *se_sess) -{ - return; -}; - static int tcm_loop_write_pending(struct se_cmd *se_cmd) { /* @@ -1243,8 +1233,6 @@ static const struct target_core_fabric_ops loop_ops = { .tpg_get_inst_index = tcm_loop_get_inst_index, .check_stop_free = tcm_loop_check_stop_free, .release_cmd = tcm_loop_release_cmd, - .shutdown_session = tcm_loop_shutdown_session, - .close_session = tcm_loop_close_session, .sess_get_index = tcm_loop_sess_get_index, .write_pending = tcm_loop_write_pending, .write_pending_status = tcm_loop_write_pending_status, diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index c57e7884973d..58bb6ed18185 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -1726,16 +1726,6 @@ static void sbp_release_cmd(struct se_cmd *se_cmd) sbp_free_request(req); } -static int sbp_shutdown_session(struct se_session *se_sess) -{ - return 0; -} - -static void sbp_close_session(struct se_session *se_sess) -{ - return; -} - static u32 sbp_sess_get_index(struct se_session *se_sess) { return 0; @@ -2349,8 +2339,6 @@ static const struct target_core_fabric_ops sbp_ops = { .tpg_check_prod_mode_write_protect = sbp_check_false, .tpg_get_inst_index = sbp_tpg_get_inst_index, .release_cmd = sbp_release_cmd, - .shutdown_session = sbp_shutdown_session, - .close_session = sbp_close_session, .sess_get_index = sbp_sess_get_index, .write_pending = sbp_write_pending, .write_pending_status = sbp_write_pending_status, diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 49aba4a31747..4c82bbe19003 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -932,7 +932,7 @@ static int core_alua_update_tpg_primary_metadata( tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, - "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], + "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0], config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); rc = core_alua_write_tpg_metadata(path, md_buf, len); @@ -1275,8 +1275,8 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) atomic_read(&lun->lun_tg_pt_secondary_offline), lun->lun_tg_pt_secondary_stat); - snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu", - se_tpg->se_tpg_tfo->get_fabric_name(), wwn, + snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu", + db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn, lun->unpacked_lun); rc = core_alua_write_tpg_metadata(path, md_buf, len); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index d498533f09ee..2001005bef45 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -99,6 +99,67 @@ static ssize_t target_core_item_version_show(struct config_item *item, CONFIGFS_ATTR_RO(target_core_item_, version); +char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT; +static char db_root_stage[DB_ROOT_LEN]; + +static ssize_t target_core_item_dbroot_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%s\n", db_root); +} + +static ssize_t target_core_item_dbroot_store(struct config_item *item, + const char *page, size_t count) +{ + ssize_t read_bytes; + struct file *fp; + + mutex_lock(&g_tf_lock); + if (!list_empty(&g_tf_list)) { + mutex_unlock(&g_tf_lock); + pr_err("db_root: cannot be changed: target drivers registered"); + return -EINVAL; + } + + if (count > (DB_ROOT_LEN - 1)) { + mutex_unlock(&g_tf_lock); + pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n", + (int)count, DB_ROOT_LEN - 1); + return -EINVAL; + } + + read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page); + if (!read_bytes) { + mutex_unlock(&g_tf_lock); + return -EINVAL; + } + if (db_root_stage[read_bytes - 1] == '\n') + db_root_stage[read_bytes - 1] = '\0'; + + /* validate new db root before accepting it */ + fp = filp_open(db_root_stage, O_RDONLY, 0); + if (IS_ERR(fp)) { + mutex_unlock(&g_tf_lock); + pr_err("db_root: cannot open: %s\n", db_root_stage); + return -EINVAL; + } + if (!S_ISDIR(fp->f_inode->i_mode)) { + filp_close(fp, 0); + mutex_unlock(&g_tf_lock); + pr_err("db_root: not a directory: %s\n", db_root_stage); + return -EINVAL; + } + filp_close(fp, 0); + + strncpy(db_root, db_root_stage, read_bytes); + + mutex_unlock(&g_tf_lock); + + return read_bytes; +} + +CONFIGFS_ATTR(target_core_item_, dbroot); + static struct target_fabric_configfs *target_core_get_fabric( const char *name) { @@ -239,6 +300,7 @@ static struct configfs_group_operations target_core_fabric_group_ops = { */ static struct configfs_attribute *target_core_fabric_item_attrs[] = { &target_core_item_attr_version, + &target_core_item_attr_dbroot, NULL, }; @@ -323,14 +385,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) pr_err("Missing tfo->release_cmd()\n"); return -EINVAL; } - if (!tfo->shutdown_session) { - pr_err("Missing tfo->shutdown_session()\n"); - return -EINVAL; - } - if (!tfo->close_session) { - pr_err("Missing tfo->close_session()\n"); - return -EINVAL; - } if (!tfo->sess_get_index) { pr_err("Missing tfo->sess_get_index()\n"); return -EINVAL; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 86b4a8375628..fc91e85f54ba 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -155,4 +155,10 @@ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); /* target_core_xcopy.c */ extern struct se_portal_group xcopy_pt_tpg; +/* target_core_configfs.c */ +#define DB_ROOT_LEN 4096 +#define DB_ROOT_DEFAULT "/var/target" + +extern char db_root[]; + #endif /* TARGET_CORE_INTERNAL_H */ diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b1795735eafc..47463c99c318 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1985,7 +1985,7 @@ static int __core_scsi3_write_aptpl_to_file( return -EMSGSIZE; } - snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); + snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]); file = filp_open(path, flags, 0600); if (IS_ERR(file)) { pr_err("filp_open(%s) for APTPL metadata" diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 47a833f3a145..24b36fd785f1 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -403,7 +403,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) struct se_device *se_dev = cmd->se_dev; struct rd_dev *dev = RD_DEV(se_dev); struct rd_dev_sg_table *prot_table; - bool need_to_release = false; struct scatterlist *prot_sg; u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; u32 prot_offset, prot_page; @@ -432,9 +431,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) if (!rc) sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); - if (need_to_release) - kfree(prot_sg); - return rc; } diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index ddf046080dc3..d99752c6cd60 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -336,44 +336,39 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( return acl; } -void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) +static void target_shutdown_sessions(struct se_node_acl *acl) { - struct se_portal_group *tpg = acl->se_tpg; - LIST_HEAD(sess_list); - struct se_session *sess, *sess_tmp; + struct se_session *sess; unsigned long flags; - int rc; - - mutex_lock(&tpg->acl_node_mutex); - if (acl->dynamic_node_acl) { - acl->dynamic_node_acl = 0; - } - list_del(&acl->acl_list); - mutex_unlock(&tpg->acl_node_mutex); +restart: spin_lock_irqsave(&acl->nacl_sess_lock, flags); - acl->acl_stop = 1; - - list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, - sess_acl_list) { - if (sess->sess_tearing_down != 0) + list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) { + if (sess->sess_tearing_down) continue; - if (!target_get_session(sess)) - continue; - list_move(&sess->sess_acl_list, &sess_list); + list_del_init(&sess->sess_acl_list); + spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); + + if (acl->se_tpg->se_tpg_tfo->close_session) + acl->se_tpg->se_tpg_tfo->close_session(sess); + goto restart; } spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); +} - list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) { - list_del(&sess->sess_acl_list); +void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) +{ + struct se_portal_group *tpg = acl->se_tpg; + + mutex_lock(&tpg->acl_node_mutex); + if (acl->dynamic_node_acl) + acl->dynamic_node_acl = 0; + list_del(&acl->acl_list); + mutex_unlock(&tpg->acl_node_mutex); + + target_shutdown_sessions(acl); - rc = tpg->se_tpg_tfo->shutdown_session(sess); - target_put_session(sess); - if (!rc) - continue; - target_put_session(sess); - } target_put_nacl(acl); /* * Wait for last target_put_nacl() to complete in target_complete_nacl() @@ -400,11 +395,7 @@ int core_tpg_set_initiator_node_queue_depth( struct se_node_acl *acl, u32 queue_depth) { - LIST_HEAD(sess_list); struct se_portal_group *tpg = acl->se_tpg; - struct se_session *sess, *sess_tmp; - unsigned long flags; - int rc; /* * User has requested to change the queue depth for a Initiator Node. @@ -413,30 +404,10 @@ int core_tpg_set_initiator_node_queue_depth( */ target_set_nacl_queue_depth(tpg, acl, queue_depth); - spin_lock_irqsave(&acl->nacl_sess_lock, flags); - list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, - sess_acl_list) { - if (sess->sess_tearing_down != 0) - continue; - if (!target_get_session(sess)) - continue; - spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); - - /* - * Finally call tpg->se_tpg_tfo->close_session() to force session - * reinstatement to occur if there is an active session for the - * $FABRIC_MOD Initiator Node in question. - */ - rc = tpg->se_tpg_tfo->shutdown_session(sess); - target_put_session(sess); - if (!rc) { - spin_lock_irqsave(&acl->nacl_sess_lock, flags); - continue; - } - target_put_session(sess); - spin_lock_irqsave(&acl->nacl_sess_lock, flags); - } - spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); + /* + * Shutdown all pending sessions to force session reinstatement. + */ + target_shutdown_sessions(acl); pr_debug("Successfully changed queue depth to: %d for Initiator" " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 590384a2bf8b..5ab3967dda43 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -239,7 +239,6 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) INIT_LIST_HEAD(&se_sess->sess_cmd_list); INIT_LIST_HEAD(&se_sess->sess_wait_list); spin_lock_init(&se_sess->sess_cmd_lock); - kref_init(&se_sess->sess_kref); se_sess->sup_prot_ops = sup_prot_ops; return se_sess; @@ -430,27 +429,6 @@ target_alloc_session(struct se_portal_group *tpg, } EXPORT_SYMBOL(target_alloc_session); -static void target_release_session(struct kref *kref) -{ - struct se_session *se_sess = container_of(kref, - struct se_session, sess_kref); - struct se_portal_group *se_tpg = se_sess->se_tpg; - - se_tpg->se_tpg_tfo->close_session(se_sess); -} - -int target_get_session(struct se_session *se_sess) -{ - return kref_get_unless_zero(&se_sess->sess_kref); -} -EXPORT_SYMBOL(target_get_session); - -void target_put_session(struct se_session *se_sess) -{ - kref_put(&se_sess->sess_kref, target_release_session); -} -EXPORT_SYMBOL(target_put_session); - ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) { struct se_session *se_sess; @@ -499,8 +477,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess) se_nacl = se_sess->se_node_acl; if (se_nacl) { spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); - if (se_nacl->acl_stop == 0) - list_del(&se_sess->sess_acl_list); + if (!list_empty(&se_sess->sess_acl_list)) + list_del_init(&se_sess->sess_acl_list); /* * If the session list is empty, then clear the pointer. * Otherwise, set the struct se_session pointer from the tail diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index c30003bd4ff0..e28209b99b59 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -139,7 +139,6 @@ extern unsigned int ft_debug_logging; * Session ops. */ void ft_sess_put(struct ft_sess *); -int ft_sess_shutdown(struct se_session *); void ft_sess_close(struct se_session *); u32 ft_sess_get_index(struct se_session *); u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 4d375e95841b..42ee91123dca 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -442,7 +442,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = { .tpg_get_inst_index = ft_tpg_get_inst_index, .check_stop_free = ft_check_stop_free, .release_cmd = ft_release_cmd, - .shutdown_session = ft_sess_shutdown, .close_session = ft_sess_close, .sess_get_index = ft_sess_get_index, .sess_get_initiator_sid = NULL, diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index d0c3e1894c61..f5186a744399 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -303,18 +303,6 @@ static void ft_sess_delete_all(struct ft_tport *tport) */ /* - * Determine whether session is allowed to be shutdown in the current context. - * Returns non-zero if the session should be shutdown. - */ -int ft_sess_shutdown(struct se_session *se_sess) -{ - struct ft_sess *sess = se_sess->fabric_sess_ptr; - - pr_debug("port_id %x\n", sess->port_id); - return 1; -} - -/* * Remove session and send PRLO. * This is called when the ACL is being deleted or queue depth is changing. */ diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index d89d60c8b6cf..2d702ca6556f 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -260,16 +260,6 @@ config ARMADA_THERMAL Enable this option if you want to have support for thermal management controller present in Armada 370 and Armada XP SoC. -config TEGRA_SOCTHERM - tristate "Tegra SOCTHERM thermal management" - depends on ARCH_TEGRA - help - Enable this option for integrated thermal management support on NVIDIA - Tegra124 systems-on-chip. The driver supports four thermal zones - (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal - zones to manage temperatures. This option is also required for the - emergency thermal reset (thermtrip) feature to function. - config DB8500_CPUFREQ_COOLING tristate "DB8500 cpufreq cooling" depends on ARCH_U8500 || COMPILE_TEST @@ -377,6 +367,17 @@ depends on ARCH_STI && OF source "drivers/thermal/st/Kconfig" endmenu +config TANGO_THERMAL + tristate "Tango thermal management" + depends on ARCH_TANGO || COMPILE_TEST + help + Enable the Tango thermal driver, which supports the primitive + temperature sensor embedded in Tango chips since the SMP8758. + This sensor only generates a 1-bit signal to indicate whether + the die temperature exceeds a programmable threshold. + +source "drivers/thermal/tegra/Kconfig" + config QCOM_SPMI_TEMP_ALARM tristate "Qualcomm SPMI PMIC Temperature Alarm" depends on OF && SPMI && IIO @@ -388,4 +389,14 @@ config QCOM_SPMI_TEMP_ALARM real time die temperature if an ADC is present or an estimate of the temperature based upon the over temperature stage value. +config GENERIC_ADC_THERMAL + tristate "Generic ADC based thermal sensor" + depends on IIO + help + This enabled a thermal sysfs driver for the temperature sensor + which is connected to the General Purpose ADC. The ADC channel + is read via IIO framework and the channel information is provided + to this driver. This driver reports the temperature by reading ADC + channel and converts it to temperature based on lookup table. + endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 8e9cbc3b5679..10b07c14f8a9 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -35,6 +35,7 @@ obj-y += samsung/ obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o +obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o @@ -46,6 +47,7 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o obj-$(CONFIG_ST_THERMAL) += st/ -obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o +obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/ obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o +obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index 70836c5b89bc..fc52016d4e85 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -29,7 +29,13 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) struct thermal_instance *instance; tz->ops->get_trip_temp(tz, trip, &trip_temp); - tz->ops->get_trip_hyst(tz, trip, &trip_hyst); + + if (!tz->ops->get_trip_hyst) { + pr_warn_once("Undefined get_trip_hyst for thermal zone %s - " + "running with default hysteresis zero\n", tz->type); + trip_hyst = 0; + } else + tz->ops->get_trip_hyst(tz, trip, &trip_hyst); dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n", trip, trip_temp, tz->temperature, diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 5e820b541506..97fad8f51e1c 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -160,7 +160,7 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp) struct hisi_thermal_sensor *sensor = _sensor; struct hisi_thermal_data *data = sensor->thermal; - int sensor_id = 0, i; + int sensor_id = -1, i; long max_temp = 0; *temp = hisi_thermal_get_sensor_temp(data, sensor); @@ -168,12 +168,19 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp) sensor->sensor_temp = *temp; for (i = 0; i < HISI_MAX_SENSORS; i++) { + if (!data->sensors[i].tzd) + continue; + if (data->sensors[i].sensor_temp >= max_temp) { max_temp = data->sensors[i].sensor_temp; sensor_id = i; } } + /* If no sensor has been enabled, then skip to enable irq */ + if (sensor_id == -1) + return 0; + mutex_lock(&data->thermal_lock); data->irq_bind_sensor = sensor_id; mutex_unlock(&data->thermal_lock); @@ -226,8 +233,12 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) sensor->thres_temp / 1000); mutex_unlock(&data->thermal_lock); - for (i = 0; i < HISI_MAX_SENSORS; i++) + for (i = 0; i < HISI_MAX_SENSORS; i++) { + if (!data->sensors[i].tzd) + continue; + thermal_zone_device_update(data->sensors[i].tzd); + } return IRQ_HANDLED; } @@ -243,10 +254,11 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev, sensor->id = index; sensor->thermal = data; - sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id, - sensor, &hisi_of_thermal_ops); + sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, + sensor->id, sensor, &hisi_of_thermal_ops); if (IS_ERR(sensor->tzd)) { ret = PTR_ERR(sensor->tzd); + sensor->tzd = NULL; dev_err(&pdev->dev, "failed to register sensor id %d: %d\n", sensor->id, ret); return ret; @@ -331,28 +343,21 @@ static int hisi_thermal_probe(struct platform_device *pdev) return ret; } + hisi_thermal_enable_bind_irq_sensor(data); + irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED, + &data->irq_enabled); + for (i = 0; i < HISI_MAX_SENSORS; ++i) { ret = hisi_thermal_register_sensor(pdev, data, &data->sensors[i], i); - if (ret) { + if (ret) dev_err(&pdev->dev, "failed to register thermal sensor: %d\n", ret); - goto err_get_sensor_data; - } + else + hisi_thermal_toggle_sensor(&data->sensors[i], true); } - hisi_thermal_enable_bind_irq_sensor(data); - data->irq_enabled = true; - - for (i = 0; i < HISI_MAX_SENSORS; i++) - hisi_thermal_toggle_sensor(&data->sensors[i], true); - return 0; - -err_get_sensor_data: - clk_disable_unprepare(data->clk); - - return ret; } static int hisi_thermal_remove(struct platform_device *pdev) @@ -363,8 +368,10 @@ static int hisi_thermal_remove(struct platform_device *pdev) for (i = 0; i < HISI_MAX_SENSORS; i++) { struct hisi_thermal_sensor *sensor = &data->sensors[i]; + if (!sensor->tzd) + continue; + hisi_thermal_toggle_sensor(sensor, false); - thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd); } hisi_thermal_disable_sensor(data); diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index 13d431cbd29e..a578cd257db4 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c @@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev) return -ENODEV; d->raw_bd = bd; - ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br); + ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL); if (ret) return ret; diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 36fa724a36c8..42c1ac057bad 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -198,49 +198,33 @@ static struct thermal_zone_device_ops proc_thermal_local_ops = { .get_temp = proc_thermal_get_zone_temp, }; -static int proc_thermal_add(struct device *dev, - struct proc_thermal_device **priv) +static int proc_thermal_read_ppcc(struct proc_thermal_device *proc_priv) { - struct proc_thermal_device *proc_priv; - struct acpi_device *adev; + int i; acpi_status status; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *elements, *ppcc; union acpi_object *p; - unsigned long long tmp; - struct thermal_zone_device_ops *ops = NULL; - int i; - int ret; - - adev = ACPI_COMPANION(dev); - if (!adev) - return -ENODEV; + int ret = 0; - status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); + status = acpi_evaluate_object(proc_priv->adev->handle, "PPCC", + NULL, &buf); if (ACPI_FAILURE(status)) return -ENODEV; p = buf.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { - dev_err(dev, "Invalid PPCC data\n"); + dev_err(proc_priv->dev, "Invalid PPCC data\n"); ret = -EFAULT; goto free_buffer; } + if (!p->package.count) { - dev_err(dev, "Invalid PPCC package size\n"); + dev_err(proc_priv->dev, "Invalid PPCC package size\n"); ret = -EFAULT; goto free_buffer; } - proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL); - if (!proc_priv) { - ret = -ENOMEM; - goto free_buffer; - } - - proc_priv->dev = dev; - proc_priv->adev = adev; - for (i = 0; i < min((int)p->package.count - 1, 2); ++i) { elements = &(p->package.elements[i+1]); if (elements->type != ACPI_TYPE_PACKAGE || @@ -257,12 +241,62 @@ static int proc_thermal_add(struct device *dev, proc_priv->power_limits[i].step_uw = ppcc[5].integer.value; } +free_buffer: + kfree(buf.pointer); + + return ret; +} + +#define PROC_POWER_CAPABILITY_CHANGED 0x83 +static void proc_thermal_notify(acpi_handle handle, u32 event, void *data) +{ + struct proc_thermal_device *proc_priv = data; + + if (!proc_priv) + return; + + switch (event) { + case PROC_POWER_CAPABILITY_CHANGED: + proc_thermal_read_ppcc(proc_priv); + int340x_thermal_zone_device_update(proc_priv->int340x_zone); + break; + default: + dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event); + break; + } +} + + +static int proc_thermal_add(struct device *dev, + struct proc_thermal_device **priv) +{ + struct proc_thermal_device *proc_priv; + struct acpi_device *adev; + acpi_status status; + unsigned long long tmp; + struct thermal_zone_device_ops *ops = NULL; + int ret; + + adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; + + proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL); + if (!proc_priv) + return -ENOMEM; + + proc_priv->dev = dev; + proc_priv->adev = adev; *priv = proc_priv; - ret = sysfs_create_group(&dev->kobj, - &power_limit_attribute_group); + ret = proc_thermal_read_ppcc(proc_priv); + if (!ret) { + ret = sysfs_create_group(&dev->kobj, + &power_limit_attribute_group); + + } if (ret) - goto free_buffer; + return ret; status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp); if (ACPI_FAILURE(status)) { @@ -274,20 +308,32 @@ static int proc_thermal_add(struct device *dev, proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); if (IS_ERR(proc_priv->int340x_zone)) { - sysfs_remove_group(&proc_priv->dev->kobj, - &power_limit_attribute_group); ret = PTR_ERR(proc_priv->int340x_zone); + goto remove_group; } else ret = 0; -free_buffer: - kfree(buf.pointer); + ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, + proc_thermal_notify, + (void *)proc_priv); + if (ret) + goto remove_zone; + + return 0; + +remove_zone: + int340x_thermal_zone_remove(proc_priv->int340x_zone); +remove_group: + sysfs_remove_group(&proc_priv->dev->kobj, + &power_limit_attribute_group); return ret; } static void proc_thermal_remove(struct proc_thermal_device *proc_priv) { + acpi_remove_notify_handler(proc_priv->adev->handle, + ACPI_DEVICE_NOTIFY, proc_thermal_notify); int340x_thermal_zone_remove(proc_priv->int340x_zone); sysfs_remove_group(&proc_priv->dev->kobj, &power_limit_attribute_group); diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 6c79588251d5..015ce2eb6eb7 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -510,12 +510,6 @@ static int start_power_clamp(void) unsigned long cpu; struct task_struct *thread; - /* check if pkg cstate counter is completely 0, abort in this case */ - if (!has_pkg_state_counter()) { - pr_err("pkg cstate counter not functional, abort\n"); - return -EINVAL; - } - set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1); /* prevent cpu hotplug */ get_online_cpus(); @@ -672,35 +666,11 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { .set_cur_state = powerclamp_set_cur_state, }; -/* runs on Nehalem and later */ static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = { - { X86_VENDOR_INTEL, 6, 0x1a}, - { X86_VENDOR_INTEL, 6, 0x1c}, - { X86_VENDOR_INTEL, 6, 0x1e}, - { X86_VENDOR_INTEL, 6, 0x1f}, - { X86_VENDOR_INTEL, 6, 0x25}, - { X86_VENDOR_INTEL, 6, 0x26}, - { X86_VENDOR_INTEL, 6, 0x2a}, - { X86_VENDOR_INTEL, 6, 0x2c}, - { X86_VENDOR_INTEL, 6, 0x2d}, - { X86_VENDOR_INTEL, 6, 0x2e}, - { X86_VENDOR_INTEL, 6, 0x2f}, - { X86_VENDOR_INTEL, 6, 0x37}, - { X86_VENDOR_INTEL, 6, 0x3a}, - { X86_VENDOR_INTEL, 6, 0x3c}, - { X86_VENDOR_INTEL, 6, 0x3d}, - { X86_VENDOR_INTEL, 6, 0x3e}, - { X86_VENDOR_INTEL, 6, 0x3f}, - { X86_VENDOR_INTEL, 6, 0x45}, - { X86_VENDOR_INTEL, 6, 0x46}, - { X86_VENDOR_INTEL, 6, 0x47}, - { X86_VENDOR_INTEL, 6, 0x4c}, - { X86_VENDOR_INTEL, 6, 0x4d}, - { X86_VENDOR_INTEL, 6, 0x4e}, - { X86_VENDOR_INTEL, 6, 0x4f}, - { X86_VENDOR_INTEL, 6, 0x56}, - { X86_VENDOR_INTEL, 6, 0x57}, - { X86_VENDOR_INTEL, 6, 0x5e}, + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT }, + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC }, + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC}, {} }; MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); @@ -712,11 +682,12 @@ static int __init powerclamp_probe(void) boot_cpu_data.x86, boot_cpu_data.x86_model); return -ENODEV; } - if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || - !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) || - !boot_cpu_has(X86_FEATURE_MWAIT) || - !boot_cpu_has(X86_FEATURE_ARAT)) + + /* The goal for idle time alignment is to achieve package cstate. */ + if (!has_pkg_state_counter()) { + pr_info("No package C-state available"); return -ENODEV; + } /* find the deepest mwait value */ find_target_mwait(); diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 507632b9648e..262ab0a2266f 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -144,7 +144,6 @@ struct mtk_thermal { s32 o_slope; s32 vts[MT8173_NUM_SENSORS]; - struct thermal_zone_device *tzd; }; struct mtk_thermal_bank_cfg { @@ -572,16 +571,11 @@ static int mtk_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mt); - mt->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, mt, - &mtk_thermal_ops); - if (IS_ERR(mt->tzd)) - goto err_register; + devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt, + &mtk_thermal_ops); return 0; -err_register: - clk_disable_unprepare(mt->clk_peri_therm); - err_disable_clk_auxadc: clk_disable_unprepare(mt->clk_auxadc); @@ -592,8 +586,6 @@ static int mtk_thermal_remove(struct platform_device *pdev) { struct mtk_thermal *mt = platform_get_drvdata(pdev); - thermal_zone_of_sensor_unregister(&pdev->dev, mt->tzd); - clk_disable_unprepare(mt->clk_peri_therm); clk_disable_unprepare(mt->clk_auxadc); diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index d8ec44b194d6..b8e509c60848 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -331,6 +331,14 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip, if (trip >= data->ntrips || trip < 0) return -EDOM; + if (data->ops->set_trip_temp) { + int ret; + + ret = data->ops->set_trip_temp(data->sensor_data, trip, temp); + if (ret) + return ret; + } + /* thermal framework should take care of data->mask & (1 << trip) */ data->trips[trip].temperature = temp; @@ -906,7 +914,7 @@ finish: return tz; free_tbps: - for (i = 0; i < tz->num_tbps; i++) + for (i = i - 1; i >= 0; i--) of_node_put(tz->tbps[i].cooling_device); kfree(tz->tbps); free_trips: diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c index b677aada5b52..f8a3c60bef94 100644 --- a/drivers/thermal/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom-spmi-temp-alarm.c @@ -260,7 +260,7 @@ static int qpnp_tm_probe(struct platform_device *pdev) if (ret < 0) goto fail; - chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip, + chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip, &qpnp_tm_sensor_ops); if (IS_ERR(chip->tz_dev)) { dev_err(&pdev->dev, "failed to register sensor\n"); @@ -281,7 +281,6 @@ static int qpnp_tm_remove(struct platform_device *pdev) { struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev); - thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev); if (!IS_ERR(chip->adc)) iio_channel_release(chip->adc); diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 82daba09e150..71a339271fa5 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -492,7 +492,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) goto error_unregister; if (of_data == USE_OF_THERMAL) - priv->zone = thermal_zone_of_sensor_register( + priv->zone = devm_thermal_zone_of_sensor_register( dev, i, priv, &rcar_thermal_zone_of_ops); else diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 233a564442a0..5d491f16a866 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -1,7 +1,5 @@ /* - * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd - * - * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * Copyright (c) 2014-2016, Fuzhou Rockchip Electronics Co., Ltd * Caesar Wang <wxt@rock-chips.com> * * This program is free software; you can redistribute it and/or modify it @@ -23,8 +21,10 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/reset.h> #include <linux/thermal.h> +#include <linux/mfd/syscon.h> #include <linux/pinctrl/consumer.h> /** @@ -73,7 +73,7 @@ enum adc_sort_mode { #define SOC_MAX_SENSORS 2 /** - * struct chip_tsadc_table: hold information about chip-specific differences + * struct chip_tsadc_table - hold information about chip-specific differences * @id: conversion table * @length: size of conversion table * @data_mask: mask to apply on data inputs @@ -86,6 +86,20 @@ struct chip_tsadc_table { enum adc_sort_mode mode; }; +/** + * struct rockchip_tsadc_chip - hold the private data of tsadc chip + * @chn_id[SOC_MAX_SENSORS]: the sensor id of chip correspond to the channel + * @chn_num: the channel number of tsadc chip + * @tshut_temp: the hardware-controlled shutdown temperature value + * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO) + * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) + * @initialize: SoC special initialize tsadc controller method + * @irq_ack: clear the interrupt + * @get_temp: get the temperature + * @set_tshut_temp: set the hardware-controlled shutdown temperature + * @set_tshut_mode: set the hardware-controlled shutdown mode + * @table: the chip-specific conversion table + */ struct rockchip_tsadc_chip { /* The sensor id of chip correspond to the ADC channel */ int chn_id[SOC_MAX_SENSORS]; @@ -97,7 +111,8 @@ struct rockchip_tsadc_chip { enum tshut_polarity tshut_polarity; /* Chip-wide methods */ - void (*initialize)(void __iomem *reg, enum tshut_polarity p); + void (*initialize)(struct regmap *grf, + void __iomem *reg, enum tshut_polarity p); void (*irq_ack)(void __iomem *reg); void (*control)(void __iomem *reg, bool on); @@ -112,12 +127,32 @@ struct rockchip_tsadc_chip { struct chip_tsadc_table table; }; +/** + * struct rockchip_thermal_sensor - hold the information of thermal sensor + * @thermal: pointer to the platform/configuration data + * @tzd: pointer to a thermal zone + * @id: identifier of the thermal sensor + */ struct rockchip_thermal_sensor { struct rockchip_thermal_data *thermal; struct thermal_zone_device *tzd; int id; }; +/** + * struct rockchip_thermal_data - hold the private data of thermal driver + * @chip: pointer to the platform/configuration data + * @pdev: platform device of thermal + * @reset: the reset controller of tsadc + * @sensors[SOC_MAX_SENSORS]: the thermal sensor + * @clk: the controller clock is divided by the exteral 24MHz + * @pclk: the advanced peripherals bus clock + * @grf: the general register file will be used to do static set by software + * @regs: the base address of tsadc controller + * @tshut_temp: the hardware-controlled shutdown temperature value + * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO) + * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) + */ struct rockchip_thermal_data { const struct rockchip_tsadc_chip *chip; struct platform_device *pdev; @@ -128,6 +163,7 @@ struct rockchip_thermal_data { struct clk *clk; struct clk *pclk; + struct regmap *grf; void __iomem *regs; int tshut_temp; @@ -142,6 +178,7 @@ struct rockchip_thermal_data { * TSADCV3_* are used for newer SoCs than RK3288. (e.g: RK3228, RK3399) * */ +#define TSADCV2_USER_CON 0x00 #define TSADCV2_AUTO_CON 0x04 #define TSADCV2_INT_EN 0x08 #define TSADCV2_INT_PD 0x0c @@ -155,12 +192,7 @@ struct rockchip_thermal_data { #define TSADCV2_AUTO_EN BIT(0) #define TSADCV2_AUTO_SRC_EN(chn) BIT(4 + (chn)) #define TSADCV2_AUTO_TSHUT_POLARITY_HIGH BIT(8) -/** - * TSADCV1_AUTO_Q_SEL_EN: - * whether select (1024 - tsadc_q) as output - * 1'b0:use tsadc_q as output(temperature-code is rising sequence) - * 1'b1:use(1024 - tsadc_q) as output (temperature-code is falling sequence) - */ + #define TSADCV3_AUTO_Q_SEL_EN BIT(1) #define TSADCV2_INT_SRC_EN(chn) BIT(chn) @@ -177,19 +209,32 @@ struct rockchip_thermal_data { #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 #define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ #define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* msec */ +#define TSADCV2_USER_INTER_PD_SOC 0x340 /* 13 clocks */ -struct tsadc_table { - u32 code; - int temp; -}; +#define GRF_SARADC_TESTBIT 0x0e644 +#define GRF_TSADC_TESTBIT_L 0x0e648 +#define GRF_TSADC_TESTBIT_H 0x0e64c + +#define GRF_TSADC_TSEN_PD_ON (0x30003 << 0) +#define GRF_TSADC_TSEN_PD_OFF (0x30000 << 0) +#define GRF_SARADC_TESTBIT_ON (0x10001 << 2) +#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2) /** + * struct tsadc_table - code to temperature conversion table + * @code: the value of adc channel + * @temp: the temperature * Note: - * Code to Temperature mapping of the Temperature sensor is a piece wise linear + * code to temperature mapping of the temperature sensor is a piece wise linear * curve.Any temperature, code faling between to 2 give temperatures can be * linearly interpolated. - * Code to Temperature mapping should be updated based on sillcon results. + * Code to Temperature mapping should be updated based on manufacturer results. */ +struct tsadc_table { + u32 code; + int temp; +}; + static const struct tsadc_table rk3228_code_table[] = { {0, -40000}, {588, -40000}, @@ -308,40 +353,40 @@ static const struct tsadc_table rk3368_code_table[] = { static const struct tsadc_table rk3399_code_table[] = { {0, -40000}, - {593, -40000}, - {598, -35000}, - {603, -30000}, - {609, -25000}, - {614, -20000}, - {619, -15000}, - {625, -10000}, - {630, -5000}, - {635, 0}, - {641, 5000}, - {646, 10000}, - {651, 15000}, - {657, 20000}, - {662, 25000}, - {667, 30000}, - {673, 35000}, - {678, 40000}, - {684, 45000}, - {689, 50000}, - {694, 55000}, - {700, 60000}, - {705, 65000}, - {711, 70000}, - {716, 75000}, - {722, 80000}, - {727, 85000}, - {733, 90000}, - {738, 95000}, - {743, 100000}, - {749, 105000}, - {754, 110000}, - {760, 115000}, - {765, 120000}, - {771, 125000}, + {402, -40000}, + {410, -35000}, + {419, -30000}, + {427, -25000}, + {436, -20000}, + {444, -15000}, + {453, -10000}, + {461, -5000}, + {470, 0}, + {478, 5000}, + {487, 10000}, + {496, 15000}, + {504, 20000}, + {513, 25000}, + {521, 30000}, + {530, 35000}, + {538, 40000}, + {547, 45000}, + {555, 50000}, + {564, 55000}, + {573, 60000}, + {581, 65000}, + {590, 70000}, + {599, 75000}, + {607, 80000}, + {616, 85000}, + {624, 90000}, + {633, 95000}, + {642, 100000}, + {650, 105000}, + {659, 110000}, + {668, 115000}, + {677, 120000}, + {685, 125000}, {TSADCV3_DATA_MASK, 125000}, }; @@ -405,8 +450,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, return -EAGAIN; /* Incorrect reading */ while (low <= high) { - if (code >= table.id[mid - 1].code && - code < table.id[mid].code) + if (code <= table.id[mid].code && + code > table.id[mid - 1].code) break; else if (code > table.id[mid].code) low = mid + 1; @@ -449,7 +494,7 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, * If the temperature is higher than COMP_INT or COMP_SHUT for * "debounce" times, TSADC controller will generate interrupt or TSHUT. */ -static void rk_tsadcv2_initialize(void __iomem *regs, +static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs, enum tshut_polarity tshut_polarity) { if (tshut_polarity == TSHUT_HIGH_ACTIVE) @@ -466,6 +511,62 @@ static void rk_tsadcv2_initialize(void __iomem *regs, regs + TSADCV2_AUTO_PERIOD_HT); writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); + + if (IS_ERR(grf)) { + pr_warn("%s: Missing rockchip,grf property\n", __func__); + return; + } +} + +/** + * rk_tsadcv3_initialize - initialize TASDC Controller. + * + * (1) The tsadc control power sequence. + * + * (2) Set TSADC_V2_AUTO_PERIOD: + * Configure the interleave between every two accessing of + * TSADC in normal operation. + * + * (2) Set TSADCV2_AUTO_PERIOD_HT: + * Configure the interleave between every two accessing of + * TSADC after the temperature is higher than COM_SHUT or COM_INT. + * + * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE: + * If the temperature is higher than COMP_INT or COMP_SHUT for + * "debounce" times, TSADC controller will generate interrupt or TSHUT. + */ +static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs, + enum tshut_polarity tshut_polarity) +{ + /* The tsadc control power sequence */ + if (IS_ERR(grf)) { + /* Set interleave value to workround ic time sync issue */ + writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs + + TSADCV2_USER_CON); + } else { + regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_ON); + mdelay(10); + regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_OFF); + usleep_range(15, 100); /* The spec note says at least 15 us */ + regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON); + regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON); + usleep_range(90, 200); /* The spec note says at least 90 us */ + } + + if (tshut_polarity == TSHUT_HIGH_ACTIVE) + writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH, + regs + TSADCV2_AUTO_CON); + else + writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH, + regs + TSADCV2_AUTO_CON); + + writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD); + writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, + regs + TSADCV2_HIGHT_INT_DEBOUNCE); + writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME, + regs + TSADCV2_AUTO_PERIOD_HT); + writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, + regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); } static void rk_tsadcv2_irq_ack(void __iomem *regs) @@ -498,10 +599,11 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable) } /** - * @rk_tsadcv3_control: - * TSADC controller works at auto mode, and some SoCs need set the tsadc_q_sel - * bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output adc value if - * setting this bit to enable. + * rk_tsadcv3_control - the tsadc controller is enabled or disabled. + * + * NOTE: TSADC controller works at auto mode, and some SoCs need set the + * tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output + * adc value if setting this bit to enable. */ static void rk_tsadcv3_control(void __iomem *regs, bool enable) { @@ -603,6 +705,30 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = { }, }; +static const struct rockchip_tsadc_chip rk3366_tsadc_data = { + .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ + .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */ + .chn_num = 2, /* two channels for tsadc */ + + .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ + .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ + .tshut_temp = 95000, + + .initialize = rk_tsadcv3_initialize, + .irq_ack = rk_tsadcv3_irq_ack, + .control = rk_tsadcv3_control, + .get_temp = rk_tsadcv2_get_temp, + .set_tshut_temp = rk_tsadcv2_tshut_temp, + .set_tshut_mode = rk_tsadcv2_tshut_mode, + + .table = { + .id = rk3228_code_table, + .length = ARRAY_SIZE(rk3228_code_table), + .data_mask = TSADCV3_DATA_MASK, + .mode = ADC_INCREMENT, + }, +}; + static const struct rockchip_tsadc_chip rk3368_tsadc_data = { .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */ @@ -636,7 +762,7 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = { .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, - .initialize = rk_tsadcv2_initialize, + .initialize = rk_tsadcv3_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, @@ -661,6 +787,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = { .data = (void *)&rk3288_tsadc_data, }, { + .compatible = "rockchip,rk3366-tsadc", + .data = (void *)&rk3366_tsadc_data, + }, + { .compatible = "rockchip,rk3368-tsadc", .data = (void *)&rk3368_tsadc_data, }, @@ -768,6 +898,11 @@ static int rockchip_configure_from_dt(struct device *dev, return -EINVAL; } + /* The tsadc wont to handle the error in here since some SoCs didn't + * need this property. + */ + thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); + return 0; } @@ -786,8 +921,8 @@ rockchip_thermal_register_sensor(struct platform_device *pdev, sensor->thermal = thermal; sensor->id = id; - sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, id, sensor, - &rockchip_of_thermal_ops); + sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, id, + sensor, &rockchip_of_thermal_ops); if (IS_ERR(sensor->tzd)) { error = PTR_ERR(sensor->tzd); dev_err(&pdev->dev, "failed to register sensor %d: %d\n", @@ -815,7 +950,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev) const struct of_device_id *match; struct resource *res; int irq; - int i, j; + int i; int error; match = of_match_node(of_rockchip_thermal_match, np); @@ -888,7 +1023,8 @@ static int rockchip_thermal_probe(struct platform_device *pdev) goto err_disable_pclk; } - thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); + thermal->chip->initialize(thermal->grf, thermal->regs, + thermal->tshut_polarity); for (i = 0; i < thermal->chip->chn_num; i++) { error = rockchip_thermal_register_sensor(pdev, thermal, @@ -898,9 +1034,6 @@ static int rockchip_thermal_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to register sensor[%d] : error = %d\n", i, error); - for (j = 0; j < i; j++) - thermal_zone_of_sensor_unregister(&pdev->dev, - thermal->sensors[j].tzd); goto err_disable_pclk; } } @@ -912,7 +1045,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev) if (error) { dev_err(&pdev->dev, "failed to request tsadc irq: %d\n", error); - goto err_unregister_sensor; + goto err_disable_pclk; } thermal->chip->control(thermal->regs, true); @@ -924,11 +1057,6 @@ static int rockchip_thermal_probe(struct platform_device *pdev) return 0; -err_unregister_sensor: - while (i--) - thermal_zone_of_sensor_unregister(&pdev->dev, - thermal->sensors[i].tzd); - err_disable_pclk: clk_disable_unprepare(thermal->pclk); err_disable_clk: @@ -946,7 +1074,6 @@ static int rockchip_thermal_remove(struct platform_device *pdev) struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; rockchip_thermal_toggle_sensor(sensor, false); - thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd); } thermal->chip->control(thermal->regs, false); @@ -988,12 +1115,15 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) return error; error = clk_enable(thermal->pclk); - if (error) + if (error) { + clk_disable(thermal->clk); return error; + } rockchip_thermal_reset_controller(thermal->reset); - thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); + thermal->chip->initialize(thermal->grf, thermal->regs, + thermal->tshut_polarity); for (i = 0; i < thermal->chip->chn_num; i++) { int id = thermal->sensors[i].id; diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c new file mode 100644 index 000000000000..70e0d9f406e9 --- /dev/null +++ b/drivers/thermal/tango_thermal.c @@ -0,0 +1,109 @@ +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/thermal.h> +#include <linux/platform_device.h> + +/* + * According to a data sheet draft, "this temperature sensor uses a bandgap + * type of circuit to compare a voltage which has a negative temperature + * coefficient with a voltage that is proportional to absolute temperature. + * A resistor bank allows 41 different temperature thresholds to be selected + * and the logic output will then indicate whether the actual die temperature + * lies above or below the selected threshold." + */ + +#define TEMPSI_CMD 0 +#define TEMPSI_RES 4 +#define TEMPSI_CFG 8 + +#define CMD_OFF 0 +#define CMD_ON 1 +#define CMD_READ 2 + +#define IDX_MIN 15 +#define IDX_MAX 40 + +struct tango_thermal_priv { + void __iomem *base; + int thresh_idx; +}; + +static bool temp_above_thresh(void __iomem *base, int thresh_idx) +{ + writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD); + usleep_range(10, 20); + writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD); + + return readl(base + TEMPSI_RES); +} + +static int tango_get_temp(void *arg, int *res) +{ + struct tango_thermal_priv *priv = arg; + int idx = priv->thresh_idx; + + if (temp_above_thresh(priv->base, idx)) { + /* Search upward by incrementing thresh_idx */ + while (idx < IDX_MAX && temp_above_thresh(priv->base, ++idx)) + cpu_relax(); + idx = idx - 1; /* always return lower bound */ + } else { + /* Search downward by decrementing thresh_idx */ + while (idx > IDX_MIN && !temp_above_thresh(priv->base, --idx)) + cpu_relax(); + } + + *res = (idx * 9 / 2 - 38) * 1000; /* millidegrees Celsius */ + priv->thresh_idx = idx; + + return 0; +} + +static const struct thermal_zone_of_device_ops ops = { + .get_temp = tango_get_temp, +}; + +static int tango_thermal_probe(struct platform_device *pdev) +{ + struct resource *res; + struct tango_thermal_priv *priv; + struct thermal_zone_device *tzdev; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->thresh_idx = IDX_MIN; + writel(0, priv->base + TEMPSI_CFG); + writel(CMD_ON, priv->base + TEMPSI_CMD); + + tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops); + return PTR_ERR_OR_ZERO(tzdev); +} + +static const struct of_device_id tango_sensor_ids[] = { + { + .compatible = "sigma,smp8758-thermal", + }, + { /* sentinel */ } +}; + +static struct platform_driver tango_thermal_driver = { + .probe = tango_thermal_probe, + .driver = { + .name = "tango-thermal", + .of_match_table = tango_sensor_ids, + }, +}; + +module_platform_driver(tango_thermal_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sigma Designs"); +MODULE_DESCRIPTION("Tango temperature sensor"); diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig new file mode 100644 index 000000000000..cec586ec7e4b --- /dev/null +++ b/drivers/thermal/tegra/Kconfig @@ -0,0 +1,13 @@ +menu "NVIDIA Tegra thermal drivers" +depends on ARCH_TEGRA + +config TEGRA_SOCTHERM + tristate "Tegra SOCTHERM thermal management" + help + Enable this option for integrated thermal management support on NVIDIA + Tegra systems-on-chip. The driver supports four thermal zones + (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal + zones to manage temperatures. This option is also required for the + emergency thermal reset (thermtrip) feature to function. + +endmenu diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile new file mode 100644 index 000000000000..1ce1af2cf0f5 --- /dev/null +++ b/drivers/thermal/tegra/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o + +tegra-soctherm-y := soctherm.o soctherm-fuse.o +tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o +tegra-soctherm-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra132-soctherm.o +tegra-soctherm-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-soctherm.o diff --git a/drivers/thermal/tegra/soctherm-fuse.c b/drivers/thermal/tegra/soctherm-fuse.c new file mode 100644 index 000000000000..29963180c453 --- /dev/null +++ b/drivers/thermal/tegra/soctherm-fuse.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <soc/tegra/fuse.h> + +#include "soctherm.h" + +#define NOMINAL_CALIB_FT 105 +#define NOMINAL_CALIB_CP 25 + +#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff +#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13) +#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13 + +#define FUSE_TSENSOR_COMMON 0x180 + +/* + * Tegra210: Layout of bits in FUSE_TSENSOR_COMMON: + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | BASE_FT | BASE_CP | SHFT_FT | SHIFT_CP | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Tegra12x, etc: + * In chips prior to Tegra210, this fuse was incorrectly sized as 26 bits, + * and didn't hold SHIFT_CP in [31:26]. Therefore these missing six bits + * were obtained via the FUSE_SPARE_REALIGNMENT_REG register [5:0]. + * + * FUSE_TSENSOR_COMMON: + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |-----------| SHFT_FT | BASE_FT | BASE_CP | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * FUSE_SPARE_REALIGNMENT_REG: + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |---------------------------------------------------| SHIFT_CP | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + +#define CALIB_COEFFICIENT 1000000LL + +/** + * div64_s64_precise() - wrapper for div64_s64() + * @a: the dividend + * @b: the divisor + * + * Implements division with fairly accurate rounding instead of truncation by + * shifting the dividend to the left by 16 so that the quotient has a + * much higher precision. + * + * Return: the quotient of a / b. + */ +static s64 div64_s64_precise(s64 a, s32 b) +{ + s64 r, al; + + /* Scale up for increased precision division */ + al = a << 16; + + r = div64_s64(al * 2 + 1, 2 * b); + return r >> 16; +} + +int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse, + struct tsensor_shared_calib *shared) +{ + u32 val; + s32 shifted_cp, shifted_ft; + int err; + + err = tegra_fuse_readl(FUSE_TSENSOR_COMMON, &val); + if (err) + return err; + + shared->base_cp = (val & tfuse->fuse_base_cp_mask) >> + tfuse->fuse_base_cp_shift; + shared->base_ft = (val & tfuse->fuse_base_ft_mask) >> + tfuse->fuse_base_ft_shift; + + shifted_ft = (val & tfuse->fuse_shift_ft_mask) >> + tfuse->fuse_shift_ft_shift; + shifted_ft = sign_extend32(shifted_ft, 4); + + if (tfuse->fuse_spare_realignment) { + err = tegra_fuse_readl(tfuse->fuse_spare_realignment, &val); + if (err) + return err; + } + + shifted_cp = sign_extend32(val, 5); + + shared->actual_temp_cp = 2 * NOMINAL_CALIB_CP + shifted_cp; + shared->actual_temp_ft = 2 * NOMINAL_CALIB_FT + shifted_ft; + + return 0; +} + +int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor, + const struct tsensor_shared_calib *shared, + u32 *calibration) +{ + const struct tegra_tsensor_group *sensor_group; + u32 val, calib; + s32 actual_tsensor_ft, actual_tsensor_cp; + s32 delta_sens, delta_temp; + s32 mult, div; + s16 therma, thermb; + s64 temp; + int err; + + sensor_group = sensor->group; + + err = tegra_fuse_readl(sensor->calib_fuse_offset, &val); + if (err) + return err; + + actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12); + val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK) >> + FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT; + actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12); + + delta_sens = actual_tsensor_ft - actual_tsensor_cp; + delta_temp = shared->actual_temp_ft - shared->actual_temp_cp; + + mult = sensor_group->pdiv * sensor->config->tsample_ate; + div = sensor->config->tsample * sensor_group->pdiv_ate; + + temp = (s64)delta_temp * (1LL << 13) * mult; + therma = div64_s64_precise(temp, (s64)delta_sens * div); + + temp = ((s64)actual_tsensor_ft * shared->actual_temp_cp) - + ((s64)actual_tsensor_cp * shared->actual_temp_ft); + thermb = div64_s64_precise(temp, delta_sens); + + temp = (s64)therma * sensor->fuse_corr_alpha; + therma = div64_s64_precise(temp, CALIB_COEFFICIENT); + + temp = (s64)thermb * sensor->fuse_corr_alpha + sensor->fuse_corr_beta; + thermb = div64_s64_precise(temp, CALIB_COEFFICIENT); + + calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) | + ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT); + + *calibration = calib; + + return 0; +} + +MODULE_AUTHOR("Wei Ni <wni@nvidia.com>"); +MODULE_DESCRIPTION("Tegra SOCTHERM fuse management"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c new file mode 100644 index 000000000000..b8651726201e --- /dev/null +++ b/drivers/thermal/tegra/soctherm.c @@ -0,0 +1,685 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Author: + * Mikko Perttunen <mperttunen@nvidia.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/thermal.h> + +#include <dt-bindings/thermal/tegra124-soctherm.h> + +#include "soctherm.h" + +#define SENSOR_CONFIG0 0 +#define SENSOR_CONFIG0_STOP BIT(0) +#define SENSOR_CONFIG0_CPTR_OVER BIT(2) +#define SENSOR_CONFIG0_OVER BIT(3) +#define SENSOR_CONFIG0_TCALC_OVER BIT(4) +#define SENSOR_CONFIG0_TALL_MASK (0xfffff << 8) +#define SENSOR_CONFIG0_TALL_SHIFT 8 + +#define SENSOR_CONFIG1 4 +#define SENSOR_CONFIG1_TSAMPLE_MASK 0x3ff +#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0 +#define SENSOR_CONFIG1_TIDDQ_EN_MASK (0x3f << 15) +#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15 +#define SENSOR_CONFIG1_TEN_COUNT_MASK (0x3f << 24) +#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24 +#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31) + +/* + * SENSOR_CONFIG2 is defined in soctherm.h + * because, it will be used by tegra_soctherm_fuse.c + */ + +#define SENSOR_STATUS0 0xc +#define SENSOR_STATUS0_VALID_MASK BIT(31) +#define SENSOR_STATUS0_CAPTURE_MASK 0xffff + +#define SENSOR_STATUS1 0x10 +#define SENSOR_STATUS1_TEMP_VALID_MASK BIT(31) +#define SENSOR_STATUS1_TEMP_MASK 0xffff + +#define READBACK_VALUE_MASK 0xff00 +#define READBACK_VALUE_SHIFT 8 +#define READBACK_ADD_HALF BIT(7) +#define READBACK_NEGATE BIT(0) + +/* get val from register(r) mask bits(m) */ +#define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1)) +/* set val(v) to mask bits(m) of register(r) */ +#define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \ + (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1))) + +static const int min_low_temp = -127000; +static const int max_high_temp = 127000; + +struct tegra_thermctl_zone { + void __iomem *reg; + struct device *dev; + struct thermal_zone_device *tz; + const struct tegra_tsensor_group *sg; +}; + +struct tegra_soctherm { + struct reset_control *reset; + struct clk *clock_tsensor; + struct clk *clock_soctherm; + void __iomem *regs; + struct thermal_zone_device **thermctl_tzs; + + u32 *calib; + struct tegra_soctherm_soc *soc; + + struct dentry *debugfs_dir; +}; + +static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i) +{ + const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i]; + void __iomem *base = tegra->regs + sensor->base; + unsigned int val; + + val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT; + writel(val, base + SENSOR_CONFIG0); + + val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT; + val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT; + val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT; + val |= SENSOR_CONFIG1_TEMP_ENABLE; + writel(val, base + SENSOR_CONFIG1); + + writel(tegra->calib[i], base + SENSOR_CONFIG2); +} + +/* + * Translate from soctherm readback format to millicelsius. + * The soctherm readback format in bits is as follows: + * TTTTTTTT H______N + * where T's contain the temperature in Celsius, + * H denotes an addition of 0.5 Celsius and N denotes negation + * of the final value. + */ +static int translate_temp(u16 val) +{ + int t; + + t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000; + if (val & READBACK_ADD_HALF) + t += 500; + if (val & READBACK_NEGATE) + t *= -1; + + return t; +} + +static int tegra_thermctl_get_temp(void *data, int *out_temp) +{ + struct tegra_thermctl_zone *zone = data; + u32 val; + + val = readl(zone->reg); + val = REG_GET_MASK(val, zone->sg->sensor_temp_mask); + *out_temp = translate_temp(val); + + return 0; +} + +static int +thermtrip_program(struct device *dev, const struct tegra_tsensor_group *sg, + int trip_temp); + +static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp) +{ + struct tegra_thermctl_zone *zone = data; + struct thermal_zone_device *tz = zone->tz; + const struct tegra_tsensor_group *sg = zone->sg; + struct device *dev = zone->dev; + enum thermal_trip_type type; + int ret; + + if (!tz) + return -EINVAL; + + ret = tz->ops->get_trip_type(tz, trip, &type); + if (ret) + return ret; + + if (type != THERMAL_TRIP_CRITICAL) + return 0; + + return thermtrip_program(dev, sg, temp); +} + +static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = { + .get_temp = tegra_thermctl_get_temp, + .set_trip_temp = tegra_thermctl_set_trip_temp, +}; + +/** + * enforce_temp_range() - check and enforce temperature range [min, max] + * @trip_temp: the trip temperature to check + * + * Checks and enforces the permitted temperature range that SOC_THERM + * HW can support This is + * done while taking care of precision. + * + * Return: The precision adjusted capped temperature in millicelsius. + */ +static int enforce_temp_range(struct device *dev, int trip_temp) +{ + int temp; + + temp = clamp_val(trip_temp, min_low_temp, max_high_temp); + if (temp != trip_temp) + dev_info(dev, "soctherm: trip temperature %d forced to %d\n", + trip_temp, temp); + return temp; +} + +/** + * thermtrip_program() - Configures the hardware to shut down the + * system if a given sensor group reaches a given temperature + * @dev: ptr to the struct device for the SOC_THERM IP block + * @sg: pointer to the sensor group to set the thermtrip temperature for + * @trip_temp: the temperature in millicelsius to trigger the thermal trip at + * + * Sets the thermal trip threshold of the given sensor group to be the + * @trip_temp. If this threshold is crossed, the hardware will shut + * down. + * + * Note that, although @trip_temp is specified in millicelsius, the + * hardware is programmed in degrees Celsius. + * + * Return: 0 upon success, or %-EINVAL upon failure. + */ +static int thermtrip_program(struct device *dev, + const struct tegra_tsensor_group *sg, + int trip_temp) +{ + struct tegra_soctherm *ts = dev_get_drvdata(dev); + int temp; + u32 r; + + if (!sg || !sg->thermtrip_threshold_mask) + return -EINVAL; + + temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain; + + r = readl(ts->regs + THERMCTL_THERMTRIP_CTL); + r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp); + r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1); + r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0); + writel(r, ts->regs + THERMCTL_THERMTRIP_CTL); + + return 0; +} + +/** + * tegra_soctherm_set_hwtrips() - set HW trip point from DT data + * @dev: struct device * of the SOC_THERM instance + * + * Configure the SOC_THERM HW trip points, setting "THERMTRIP" + * trip points , using "critical" type trip_temp from thermal + * zone. + * After they have been configured, THERMTRIP will take action + * when the configured SoC thermal sensor group reaches a + * certain temperature. + * + * Return: 0 upon success, or a negative error code on failure. + * "Success" does not mean that trips was enabled; it could also + * mean that no node was found in DT. + * THERMTRIP has been enabled successfully when a message similar to + * this one appears on the serial console: + * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC" + */ +static int tegra_soctherm_set_hwtrips(struct device *dev, + const struct tegra_tsensor_group *sg, + struct thermal_zone_device *tz) +{ + int temperature; + int ret; + + ret = tz->ops->get_crit_temp(tz, &temperature); + if (ret) { + dev_warn(dev, "thermtrip: %s: missing critical temperature\n", + sg->name); + return ret; + } + + ret = thermtrip_program(dev, sg, temperature); + if (ret) { + dev_err(dev, "thermtrip: %s: error during enable\n", + sg->name); + return ret; + } + + dev_info(dev, + "thermtrip: will shut down when %s reaches %d mC\n", + sg->name, temperature); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static int regs_show(struct seq_file *s, void *data) +{ + struct platform_device *pdev = s->private; + struct tegra_soctherm *ts = platform_get_drvdata(pdev); + const struct tegra_tsensor *tsensors = ts->soc->tsensors; + const struct tegra_tsensor_group **ttgs = ts->soc->ttgs; + u32 r, state; + int i; + + seq_puts(s, "-----TSENSE (convert HW)-----\n"); + + for (i = 0; i < ts->soc->num_tsensors; i++) { + r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1); + state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE); + + seq_printf(s, "%s: ", tsensors[i].name); + seq_printf(s, "En(%d) ", state); + + if (!state) { + seq_puts(s, "\n"); + continue; + } + + state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK); + seq_printf(s, "tiddq(%d) ", state); + state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK); + seq_printf(s, "ten_count(%d) ", state); + state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK); + seq_printf(s, "tsample(%d) ", state + 1); + + r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1); + state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK); + seq_printf(s, "Temp(%d/", state); + state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK); + seq_printf(s, "%d) ", translate_temp(state)); + + r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0); + state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK); + seq_printf(s, "Capture(%d/", state); + state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK); + seq_printf(s, "%d) ", state); + + r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0); + state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP); + seq_printf(s, "Stop(%d) ", state); + state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK); + seq_printf(s, "Tall(%d) ", state); + state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER); + seq_printf(s, "Over(%d/", state); + state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER); + seq_printf(s, "%d/", state); + state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER); + seq_printf(s, "%d) ", state); + + r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2); + state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK); + seq_printf(s, "Therm_A/B(%d/", state); + state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK); + seq_printf(s, "%d)\n", (s16)state); + } + + r = readl(ts->regs + SENSOR_PDIV); + seq_printf(s, "PDIV: 0x%x\n", r); + + r = readl(ts->regs + SENSOR_HOTSPOT_OFF); + seq_printf(s, "HOTSPOT: 0x%x\n", r); + + seq_puts(s, "\n"); + seq_puts(s, "-----SOC_THERM-----\n"); + + r = readl(ts->regs + SENSOR_TEMP1); + state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK); + seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state)); + state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK); + seq_printf(s, " GPU(%d) ", translate_temp(state)); + r = readl(ts->regs + SENSOR_TEMP2); + state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK); + seq_printf(s, " PLLX(%d) ", translate_temp(state)); + state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK); + seq_printf(s, " MEM(%d)\n", translate_temp(state)); + + r = readl(ts->regs + THERMCTL_THERMTRIP_CTL); + state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask); + seq_printf(s, "Thermtrip Any En(%d)\n", state); + for (i = 0; i < ts->soc->num_ttgs; i++) { + state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask); + seq_printf(s, " %s En(%d) ", ttgs[i]->name, state); + state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask); + state *= ts->soc->thresh_grain; + seq_printf(s, "Thresh(%d)\n", state); + } + + return 0; +} + +static int regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, regs_show, inode->i_private); +} + +static const struct file_operations regs_fops = { + .open = regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void soctherm_debug_init(struct platform_device *pdev) +{ + struct tegra_soctherm *tegra = platform_get_drvdata(pdev); + struct dentry *root, *file; + + root = debugfs_create_dir("soctherm", NULL); + if (!root) { + dev_err(&pdev->dev, "failed to create debugfs directory\n"); + return; + } + + tegra->debugfs_dir = root; + + file = debugfs_create_file("reg_contents", 0644, root, + pdev, ®s_fops); + if (!file) { + dev_err(&pdev->dev, "failed to create debugfs file\n"); + debugfs_remove_recursive(tegra->debugfs_dir); + tegra->debugfs_dir = NULL; + } +} +#else +static inline void soctherm_debug_init(struct platform_device *pdev) {} +#endif + +static int soctherm_clk_enable(struct platform_device *pdev, bool enable) +{ + struct tegra_soctherm *tegra = platform_get_drvdata(pdev); + int err; + + if (!tegra->clock_soctherm || !tegra->clock_tsensor) + return -EINVAL; + + reset_control_assert(tegra->reset); + + if (enable) { + err = clk_prepare_enable(tegra->clock_soctherm); + if (err) { + reset_control_deassert(tegra->reset); + return err; + } + + err = clk_prepare_enable(tegra->clock_tsensor); + if (err) { + clk_disable_unprepare(tegra->clock_soctherm); + reset_control_deassert(tegra->reset); + return err; + } + } else { + clk_disable_unprepare(tegra->clock_tsensor); + clk_disable_unprepare(tegra->clock_soctherm); + } + + reset_control_deassert(tegra->reset); + + return 0; +} + +static void soctherm_init(struct platform_device *pdev) +{ + struct tegra_soctherm *tegra = platform_get_drvdata(pdev); + const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs; + int i; + u32 pdiv, hotspot; + + /* Initialize raw sensors */ + for (i = 0; i < tegra->soc->num_tsensors; ++i) + enable_tsensor(tegra, i); + + /* program pdiv and hotspot offsets per THERM */ + pdiv = readl(tegra->regs + SENSOR_PDIV); + hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF); + for (i = 0; i < tegra->soc->num_ttgs; ++i) { + pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask, + ttgs[i]->pdiv); + /* hotspot offset from PLLX, doesn't need to configure PLLX */ + if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX) + continue; + hotspot = REG_SET_MASK(hotspot, + ttgs[i]->pllx_hotspot_mask, + ttgs[i]->pllx_hotspot_diff); + } + writel(pdiv, tegra->regs + SENSOR_PDIV); + writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF); +} + +static const struct of_device_id tegra_soctherm_of_match[] = { +#ifdef CONFIG_ARCH_TEGRA_124_SOC + { + .compatible = "nvidia,tegra124-soctherm", + .data = &tegra124_soctherm, + }, +#endif +#ifdef CONFIG_ARCH_TEGRA_132_SOC + { + .compatible = "nvidia,tegra132-soctherm", + .data = &tegra132_soctherm, + }, +#endif +#ifdef CONFIG_ARCH_TEGRA_210_SOC + { + .compatible = "nvidia,tegra210-soctherm", + .data = &tegra210_soctherm, + }, +#endif + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match); + +static int tegra_soctherm_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct tegra_soctherm *tegra; + struct thermal_zone_device *z; + struct tsensor_shared_calib shared_calib; + struct resource *res; + struct tegra_soctherm_soc *soc; + unsigned int i; + int err; + + match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node); + if (!match) + return -ENODEV; + + soc = (struct tegra_soctherm_soc *)match->data; + if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM) + return -EINVAL; + + tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL); + if (!tegra) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, tegra); + + tegra->soc = soc; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + tegra->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(tegra->regs)) + return PTR_ERR(tegra->regs); + + tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm"); + if (IS_ERR(tegra->reset)) { + dev_err(&pdev->dev, "can't get soctherm reset\n"); + return PTR_ERR(tegra->reset); + } + + tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor"); + if (IS_ERR(tegra->clock_tsensor)) { + dev_err(&pdev->dev, "can't get tsensor clock\n"); + return PTR_ERR(tegra->clock_tsensor); + } + + tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm"); + if (IS_ERR(tegra->clock_soctherm)) { + dev_err(&pdev->dev, "can't get soctherm clock\n"); + return PTR_ERR(tegra->clock_soctherm); + } + + tegra->calib = devm_kzalloc(&pdev->dev, + sizeof(u32) * soc->num_tsensors, + GFP_KERNEL); + if (!tegra->calib) + return -ENOMEM; + + /* calculate shared calibration data */ + err = tegra_calc_shared_calib(soc->tfuse, &shared_calib); + if (err) + return err; + + /* calculate tsensor calibaration data */ + for (i = 0; i < soc->num_tsensors; ++i) { + err = tegra_calc_tsensor_calib(&soc->tsensors[i], + &shared_calib, + &tegra->calib[i]); + if (err) + return err; + } + + tegra->thermctl_tzs = devm_kzalloc(&pdev->dev, + sizeof(*z) * soc->num_ttgs, + GFP_KERNEL); + if (!tegra->thermctl_tzs) + return -ENOMEM; + + err = soctherm_clk_enable(pdev, true); + if (err) + return err; + + soctherm_init(pdev); + + for (i = 0; i < soc->num_ttgs; ++i) { + struct tegra_thermctl_zone *zone = + devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL); + if (!zone) { + err = -ENOMEM; + goto disable_clocks; + } + + zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset; + zone->dev = &pdev->dev; + zone->sg = soc->ttgs[i]; + + z = devm_thermal_zone_of_sensor_register(&pdev->dev, + soc->ttgs[i]->id, zone, + &tegra_of_thermal_ops); + if (IS_ERR(z)) { + err = PTR_ERR(z); + dev_err(&pdev->dev, "failed to register sensor: %d\n", + err); + goto disable_clocks; + } + + zone->tz = z; + tegra->thermctl_tzs[soc->ttgs[i]->id] = z; + + /* Configure hw trip points */ + tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z); + } + + soctherm_debug_init(pdev); + + return 0; + +disable_clocks: + soctherm_clk_enable(pdev, false); + + return err; +} + +static int tegra_soctherm_remove(struct platform_device *pdev) +{ + struct tegra_soctherm *tegra = platform_get_drvdata(pdev); + + debugfs_remove_recursive(tegra->debugfs_dir); + + soctherm_clk_enable(pdev, false); + + return 0; +} + +static int __maybe_unused soctherm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + soctherm_clk_enable(pdev, false); + + return 0; +} + +static int __maybe_unused soctherm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct tegra_soctherm *tegra = platform_get_drvdata(pdev); + struct tegra_soctherm_soc *soc = tegra->soc; + int err, i; + + err = soctherm_clk_enable(pdev, true); + if (err) { + dev_err(&pdev->dev, + "Resume failed: enable clocks failed\n"); + return err; + } + + soctherm_init(pdev); + + for (i = 0; i < soc->num_ttgs; ++i) { + struct thermal_zone_device *tz; + + tz = tegra->thermctl_tzs[soc->ttgs[i]->id]; + tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume); + +static struct platform_driver tegra_soctherm_driver = { + .probe = tegra_soctherm_probe, + .remove = tegra_soctherm_remove, + .driver = { + .name = "tegra_soctherm", + .pm = &tegra_soctherm_pm, + .of_match_table = tegra_soctherm_of_match, + }, +}; +module_platform_driver(tegra_soctherm_driver); + +MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>"); +MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h new file mode 100644 index 000000000000..28e18ec4b4c3 --- /dev/null +++ b/drivers/thermal/tegra/soctherm.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DRIVERS_THERMAL_TEGRA_SOCTHERM_H +#define __DRIVERS_THERMAL_TEGRA_SOCTHERM_H + +#define SENSOR_CONFIG2 8 +#define SENSOR_CONFIG2_THERMA_MASK (0xffff << 16) +#define SENSOR_CONFIG2_THERMA_SHIFT 16 +#define SENSOR_CONFIG2_THERMB_MASK 0xffff +#define SENSOR_CONFIG2_THERMB_SHIFT 0 + +#define THERMCTL_THERMTRIP_CTL 0x80 +/* BITs are defined in device file */ + +#define SENSOR_PDIV 0x1c0 +#define SENSOR_PDIV_CPU_MASK (0xf << 12) +#define SENSOR_PDIV_GPU_MASK (0xf << 8) +#define SENSOR_PDIV_MEM_MASK (0xf << 4) +#define SENSOR_PDIV_PLLX_MASK (0xf << 0) + +#define SENSOR_HOTSPOT_OFF 0x1c4 +#define SENSOR_HOTSPOT_CPU_MASK (0xff << 16) +#define SENSOR_HOTSPOT_GPU_MASK (0xff << 8) +#define SENSOR_HOTSPOT_MEM_MASK (0xff << 0) + +#define SENSOR_TEMP1 0x1c8 +#define SENSOR_TEMP1_CPU_TEMP_MASK (0xffff << 16) +#define SENSOR_TEMP1_GPU_TEMP_MASK 0xffff +#define SENSOR_TEMP2 0x1cc +#define SENSOR_TEMP2_MEM_TEMP_MASK (0xffff << 16) +#define SENSOR_TEMP2_PLLX_TEMP_MASK 0xffff + +/** + * struct tegra_tsensor_group - SOC_THERM sensor group data + * @name: short name of the temperature sensor group + * @id: numeric ID of the temperature sensor group + * @sensor_temp_offset: offset of the SENSOR_TEMP* register + * @sensor_temp_mask: bit mask for this sensor group in SENSOR_TEMP* register + * @pdiv: the sensor count post-divider to use during runtime + * @pdiv_ate: the sensor count post-divider used during automated test + * @pdiv_mask: register bitfield mask for the PDIV field for this sensor + * @pllx_hotspot_diff: hotspot offset from the PLLX sensor, must be 0 for + PLLX sensor group + * @pllx_hotspot_mask: register bitfield mask for the HOTSPOT field + */ +struct tegra_tsensor_group { + const char *name; + u8 id; + u16 sensor_temp_offset; + u32 sensor_temp_mask; + u32 pdiv, pdiv_ate, pdiv_mask; + u32 pllx_hotspot_diff, pllx_hotspot_mask; + u32 thermtrip_enable_mask; + u32 thermtrip_any_en_mask; + u32 thermtrip_threshold_mask; +}; + +struct tegra_tsensor_configuration { + u32 tall, tiddq_en, ten_count, pdiv, pdiv_ate, tsample, tsample_ate; +}; + +struct tegra_tsensor { + const char *name; + const u32 base; + const struct tegra_tsensor_configuration *config; + const u32 calib_fuse_offset; + /* + * Correction values used to modify values read from + * calibration fuses + */ + const s32 fuse_corr_alpha, fuse_corr_beta; + const struct tegra_tsensor_group *group; +}; + +struct tegra_soctherm_fuse { + u32 fuse_base_cp_mask, fuse_base_cp_shift; + u32 fuse_base_ft_mask, fuse_base_ft_shift; + u32 fuse_shift_ft_mask, fuse_shift_ft_shift; + u32 fuse_spare_realignment; +}; + +struct tsensor_shared_calib { + u32 base_cp, base_ft; + u32 actual_temp_cp, actual_temp_ft; +}; + +struct tegra_soctherm_soc { + const struct tegra_tsensor *tsensors; + const unsigned int num_tsensors; + const struct tegra_tsensor_group **ttgs; + const unsigned int num_ttgs; + const struct tegra_soctherm_fuse *tfuse; + const int thresh_grain; +}; + +int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse, + struct tsensor_shared_calib *shared); +int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor, + const struct tsensor_shared_calib *shared, + u32 *calib); + +#ifdef CONFIG_ARCH_TEGRA_124_SOC +extern const struct tegra_soctherm_soc tegra124_soctherm; +#endif + +#ifdef CONFIG_ARCH_TEGRA_132_SOC +extern const struct tegra_soctherm_soc tegra132_soctherm; +#endif + +#ifdef CONFIG_ARCH_TEGRA_210_SOC +extern const struct tegra_soctherm_soc tegra210_soctherm; +#endif + +#endif + diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c new file mode 100644 index 000000000000..beb9d36b9c8a --- /dev/null +++ b/drivers/thermal/tegra/tegra124-soctherm.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <dt-bindings/thermal/tegra124-soctherm.h> + +#include "soctherm.h" + +#define TEGRA124_THERMTRIP_ANY_EN_MASK (0x1 << 28) +#define TEGRA124_THERMTRIP_MEM_EN_MASK (0x1 << 27) +#define TEGRA124_THERMTRIP_GPU_EN_MASK (0x1 << 26) +#define TEGRA124_THERMTRIP_CPU_EN_MASK (0x1 << 25) +#define TEGRA124_THERMTRIP_TSENSE_EN_MASK (0x1 << 24) +#define TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16) +#define TEGRA124_THERMTRIP_CPU_THRESH_MASK (0xff << 8) +#define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK 0xff + +#define TEGRA124_THRESH_GRAIN 1000 + +static const struct tegra_tsensor_configuration tegra124_tsensor_config = { + .tall = 16300, + .tiddq_en = 1, + .ten_count = 1, + .tsample = 120, + .tsample_ate = 480, +}; + +static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = { + .id = TEGRA124_SOCTHERM_SENSOR_CPU, + .name = "cpu", + .sensor_temp_offset = SENSOR_TEMP1, + .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_CPU_MASK, + .pllx_hotspot_diff = 10, + .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK, + .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK, + .thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = { + .id = TEGRA124_SOCTHERM_SENSOR_GPU, + .name = "gpu", + .sensor_temp_offset = SENSOR_TEMP1, + .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_GPU_MASK, + .pllx_hotspot_diff = 5, + .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK, + .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK, + .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra124_tsensor_group_pll = { + .id = TEGRA124_SOCTHERM_SENSOR_PLLX, + .name = "pll", + .sensor_temp_offset = SENSOR_TEMP2, + .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_PLLX_MASK, + .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK, + .thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra124_tsensor_group_mem = { + .id = TEGRA124_SOCTHERM_SENSOR_MEM, + .name = "mem", + .sensor_temp_offset = SENSOR_TEMP2, + .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_MEM_MASK, + .pllx_hotspot_diff = 0, + .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK, + .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK, + .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK, +}; + +static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = { + &tegra124_tsensor_group_cpu, + &tegra124_tsensor_group_gpu, + &tegra124_tsensor_group_pll, + &tegra124_tsensor_group_mem, +}; + +static const struct tegra_tsensor tegra124_tsensors[] = { + { + .name = "cpu0", + .base = 0xc0, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x098, + .fuse_corr_alpha = 1135400, + .fuse_corr_beta = -6266900, + .group = &tegra124_tsensor_group_cpu, + }, { + .name = "cpu1", + .base = 0xe0, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x084, + .fuse_corr_alpha = 1122220, + .fuse_corr_beta = -5700700, + .group = &tegra124_tsensor_group_cpu, + }, { + .name = "cpu2", + .base = 0x100, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x088, + .fuse_corr_alpha = 1127000, + .fuse_corr_beta = -6768200, + .group = &tegra124_tsensor_group_cpu, + }, { + .name = "cpu3", + .base = 0x120, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x12c, + .fuse_corr_alpha = 1110900, + .fuse_corr_beta = -6232000, + .group = &tegra124_tsensor_group_cpu, + }, { + .name = "mem0", + .base = 0x140, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x158, + .fuse_corr_alpha = 1122300, + .fuse_corr_beta = -5936400, + .group = &tegra124_tsensor_group_mem, + }, { + .name = "mem1", + .base = 0x160, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x15c, + .fuse_corr_alpha = 1145700, + .fuse_corr_beta = -7124600, + .group = &tegra124_tsensor_group_mem, + }, { + .name = "gpu", + .base = 0x180, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x154, + .fuse_corr_alpha = 1120100, + .fuse_corr_beta = -6000500, + .group = &tegra124_tsensor_group_gpu, + }, { + .name = "pllx", + .base = 0x1a0, + .config = &tegra124_tsensor_config, + .calib_fuse_offset = 0x160, + .fuse_corr_alpha = 1106500, + .fuse_corr_beta = -6729300, + .group = &tegra124_tsensor_group_pll, + }, +}; + +/* + * Mask/shift bits in FUSE_TSENSOR_COMMON and + * FUSE_TSENSOR_COMMON, which are described in + * tegra_soctherm_fuse.c + */ +static const struct tegra_soctherm_fuse tegra124_soctherm_fuse = { + .fuse_base_cp_mask = 0x3ff, + .fuse_base_cp_shift = 0, + .fuse_base_ft_mask = 0x7ff << 10, + .fuse_base_ft_shift = 10, + .fuse_shift_ft_mask = 0x1f << 21, + .fuse_shift_ft_shift = 21, + .fuse_spare_realignment = 0x1fc, +}; + +const struct tegra_soctherm_soc tegra124_soctherm = { + .tsensors = tegra124_tsensors, + .num_tsensors = ARRAY_SIZE(tegra124_tsensors), + .ttgs = tegra124_tsensor_groups, + .num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups), + .tfuse = &tegra124_soctherm_fuse, + .thresh_grain = TEGRA124_THRESH_GRAIN, +}; diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c new file mode 100644 index 000000000000..e2aa84e1b307 --- /dev/null +++ b/drivers/thermal/tegra/tegra132-soctherm.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <dt-bindings/thermal/tegra124-soctherm.h> + +#include "soctherm.h" + +#define TEGRA132_THERMTRIP_ANY_EN_MASK (0x1 << 28) +#define TEGRA132_THERMTRIP_MEM_EN_MASK (0x1 << 27) +#define TEGRA132_THERMTRIP_GPU_EN_MASK (0x1 << 26) +#define TEGRA132_THERMTRIP_CPU_EN_MASK (0x1 << 25) +#define TEGRA132_THERMTRIP_TSENSE_EN_MASK (0x1 << 24) +#define TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16) +#define TEGRA132_THERMTRIP_CPU_THRESH_MASK (0xff << 8) +#define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK 0xff + +#define TEGRA132_THRESH_GRAIN 1000 + +static const struct tegra_tsensor_configuration tegra132_tsensor_config = { + .tall = 16300, + .tiddq_en = 1, + .ten_count = 1, + .tsample = 120, + .tsample_ate = 480, +}; + +static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = { + .id = TEGRA124_SOCTHERM_SENSOR_CPU, + .name = "cpu", + .sensor_temp_offset = SENSOR_TEMP1, + .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_CPU_MASK, + .pllx_hotspot_diff = 10, + .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK, + .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK, + .thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = { + .id = TEGRA124_SOCTHERM_SENSOR_GPU, + .name = "gpu", + .sensor_temp_offset = SENSOR_TEMP1, + .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_GPU_MASK, + .pllx_hotspot_diff = 5, + .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK, + .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK, + .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra132_tsensor_group_pll = { + .id = TEGRA124_SOCTHERM_SENSOR_PLLX, + .name = "pll", + .sensor_temp_offset = SENSOR_TEMP2, + .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_PLLX_MASK, + .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK, + .thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra132_tsensor_group_mem = { + .id = TEGRA124_SOCTHERM_SENSOR_MEM, + .name = "mem", + .sensor_temp_offset = SENSOR_TEMP2, + .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_MEM_MASK, + .pllx_hotspot_diff = 0, + .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK, + .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK, + .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK, +}; + +static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = { + &tegra132_tsensor_group_cpu, + &tegra132_tsensor_group_gpu, + &tegra132_tsensor_group_pll, + &tegra132_tsensor_group_mem, +}; + +static struct tegra_tsensor tegra132_tsensors[] = { + { + .name = "cpu0", + .base = 0xc0, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x098, + .fuse_corr_alpha = 1126600, + .fuse_corr_beta = -9433500, + .group = &tegra132_tsensor_group_cpu, + }, { + .name = "cpu1", + .base = 0xe0, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x084, + .fuse_corr_alpha = 1110800, + .fuse_corr_beta = -7383000, + .group = &tegra132_tsensor_group_cpu, + }, { + .name = "cpu2", + .base = 0x100, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x088, + .fuse_corr_alpha = 1113800, + .fuse_corr_beta = -6215200, + .group = &tegra132_tsensor_group_cpu, + }, { + .name = "cpu3", + .base = 0x120, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x12c, + .fuse_corr_alpha = 1129600, + .fuse_corr_beta = -8196100, + .group = &tegra132_tsensor_group_cpu, + }, { + .name = "mem0", + .base = 0x140, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x158, + .fuse_corr_alpha = 1132900, + .fuse_corr_beta = -6755300, + .group = &tegra132_tsensor_group_mem, + }, { + .name = "mem1", + .base = 0x160, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x15c, + .fuse_corr_alpha = 1142300, + .fuse_corr_beta = -7374200, + .group = &tegra132_tsensor_group_mem, + }, { + .name = "gpu", + .base = 0x180, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x154, + .fuse_corr_alpha = 1125100, + .fuse_corr_beta = -6350400, + .group = &tegra132_tsensor_group_gpu, + }, { + .name = "pllx", + .base = 0x1a0, + .config = &tegra132_tsensor_config, + .calib_fuse_offset = 0x160, + .fuse_corr_alpha = 1118100, + .fuse_corr_beta = -8208800, + .group = &tegra132_tsensor_group_pll, + }, +}; + +/* + * Mask/shift bits in FUSE_TSENSOR_COMMON and + * FUSE_TSENSOR_COMMON, which are described in + * tegra_soctherm_fuse.c + */ +static const struct tegra_soctherm_fuse tegra132_soctherm_fuse = { + .fuse_base_cp_mask = 0x3ff, + .fuse_base_cp_shift = 0, + .fuse_base_ft_mask = 0x7ff << 10, + .fuse_base_ft_shift = 10, + .fuse_shift_ft_mask = 0x1f << 21, + .fuse_shift_ft_shift = 21, + .fuse_spare_realignment = 0x1fc, +}; + +const struct tegra_soctherm_soc tegra132_soctherm = { + .tsensors = tegra132_tsensors, + .num_tsensors = ARRAY_SIZE(tegra132_tsensors), + .ttgs = tegra132_tsensor_groups, + .num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups), + .tfuse = &tegra132_soctherm_fuse, + .thresh_grain = TEGRA132_THRESH_GRAIN, +}; diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c new file mode 100644 index 000000000000..19cc0ab66f0e --- /dev/null +++ b/drivers/thermal/tegra/tegra210-soctherm.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <soc/tegra/fuse.h> + +#include <dt-bindings/thermal/tegra124-soctherm.h> + +#include "soctherm.h" + +#define TEGRA210_THERMTRIP_ANY_EN_MASK (0x1 << 31) +#define TEGRA210_THERMTRIP_MEM_EN_MASK (0x1 << 30) +#define TEGRA210_THERMTRIP_GPU_EN_MASK (0x1 << 29) +#define TEGRA210_THERMTRIP_CPU_EN_MASK (0x1 << 28) +#define TEGRA210_THERMTRIP_TSENSE_EN_MASK (0x1 << 27) +#define TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK (0x1ff << 18) +#define TEGRA210_THERMTRIP_CPU_THRESH_MASK (0x1ff << 9) +#define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK 0x1ff + +#define TEGRA210_THRESH_GRAIN 500 + +static const struct tegra_tsensor_configuration tegra210_tsensor_config = { + .tall = 16300, + .tiddq_en = 1, + .ten_count = 1, + .tsample = 120, + .tsample_ate = 480, +}; + +static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = { + .id = TEGRA124_SOCTHERM_SENSOR_CPU, + .name = "cpu", + .sensor_temp_offset = SENSOR_TEMP1, + .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_CPU_MASK, + .pllx_hotspot_diff = 10, + .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK, + .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK, + .thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = { + .id = TEGRA124_SOCTHERM_SENSOR_GPU, + .name = "gpu", + .sensor_temp_offset = SENSOR_TEMP1, + .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_GPU_MASK, + .pllx_hotspot_diff = 5, + .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK, + .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK, + .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra210_tsensor_group_pll = { + .id = TEGRA124_SOCTHERM_SENSOR_PLLX, + .name = "pll", + .sensor_temp_offset = SENSOR_TEMP2, + .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_PLLX_MASK, + .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK, + .thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK, +}; + +static const struct tegra_tsensor_group tegra210_tsensor_group_mem = { + .id = TEGRA124_SOCTHERM_SENSOR_MEM, + .name = "mem", + .sensor_temp_offset = SENSOR_TEMP2, + .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK, + .pdiv = 8, + .pdiv_ate = 8, + .pdiv_mask = SENSOR_PDIV_MEM_MASK, + .pllx_hotspot_diff = 0, + .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK, + .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK, + .thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK, + .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK, +}; + +static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = { + &tegra210_tsensor_group_cpu, + &tegra210_tsensor_group_gpu, + &tegra210_tsensor_group_pll, + &tegra210_tsensor_group_mem, +}; + +static const struct tegra_tsensor tegra210_tsensors[] = { + { + .name = "cpu0", + .base = 0xc0, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x098, + .fuse_corr_alpha = 1085000, + .fuse_corr_beta = 3244200, + .group = &tegra210_tsensor_group_cpu, + }, { + .name = "cpu1", + .base = 0xe0, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x084, + .fuse_corr_alpha = 1126200, + .fuse_corr_beta = -67500, + .group = &tegra210_tsensor_group_cpu, + }, { + .name = "cpu2", + .base = 0x100, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x088, + .fuse_corr_alpha = 1098400, + .fuse_corr_beta = 2251100, + .group = &tegra210_tsensor_group_cpu, + }, { + .name = "cpu3", + .base = 0x120, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x12c, + .fuse_corr_alpha = 1108000, + .fuse_corr_beta = 602700, + .group = &tegra210_tsensor_group_cpu, + }, { + .name = "mem0", + .base = 0x140, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x158, + .fuse_corr_alpha = 1069200, + .fuse_corr_beta = 3549900, + .group = &tegra210_tsensor_group_mem, + }, { + .name = "mem1", + .base = 0x160, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x15c, + .fuse_corr_alpha = 1173700, + .fuse_corr_beta = -6263600, + .group = &tegra210_tsensor_group_mem, + }, { + .name = "gpu", + .base = 0x180, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x154, + .fuse_corr_alpha = 1074300, + .fuse_corr_beta = 2734900, + .group = &tegra210_tsensor_group_gpu, + }, { + .name = "pllx", + .base = 0x1a0, + .config = &tegra210_tsensor_config, + .calib_fuse_offset = 0x160, + .fuse_corr_alpha = 1039700, + .fuse_corr_beta = 6829100, + .group = &tegra210_tsensor_group_pll, + }, +}; + +/* + * Mask/shift bits in FUSE_TSENSOR_COMMON and + * FUSE_TSENSOR_COMMON, which are described in + * tegra_soctherm_fuse.c + */ +static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = { + .fuse_base_cp_mask = 0x3ff << 11, + .fuse_base_cp_shift = 11, + .fuse_base_ft_mask = 0x7ff << 21, + .fuse_base_ft_shift = 21, + .fuse_shift_ft_mask = 0x1f << 6, + .fuse_shift_ft_shift = 6, + .fuse_spare_realignment = 0, +}; + +const struct tegra_soctherm_soc tegra210_soctherm = { + .tsensors = tegra210_tsensors, + .num_tsensors = ARRAY_SIZE(tegra210_tsensors), + .ttgs = tegra210_tsensor_groups, + .num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups), + .tfuse = &tegra210_soctherm_fuse, + .thresh_grain = TEGRA210_THRESH_GRAIN, +}; diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c deleted file mode 100644 index 136975220c92..000000000000 --- a/drivers/thermal/tegra_soctherm.c +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. - * - * Author: - * Mikko Perttunen <mperttunen@nvidia.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/bitops.h> -#include <linux/clk.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <linux/reset.h> -#include <linux/thermal.h> - -#include <soc/tegra/fuse.h> - -#define SENSOR_CONFIG0 0 -#define SENSOR_CONFIG0_STOP BIT(0) -#define SENSOR_CONFIG0_TALL_SHIFT 8 -#define SENSOR_CONFIG0_TCALC_OVER BIT(4) -#define SENSOR_CONFIG0_OVER BIT(3) -#define SENSOR_CONFIG0_CPTR_OVER BIT(2) - -#define SENSOR_CONFIG1 4 -#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0 -#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15 -#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24 -#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31) - -#define SENSOR_CONFIG2 8 -#define SENSOR_CONFIG2_THERMA_SHIFT 16 -#define SENSOR_CONFIG2_THERMB_SHIFT 0 - -#define SENSOR_PDIV 0x1c0 -#define SENSOR_PDIV_T124 0x8888 -#define SENSOR_HOTSPOT_OFF 0x1c4 -#define SENSOR_HOTSPOT_OFF_T124 0x00060600 -#define SENSOR_TEMP1 0x1c8 -#define SENSOR_TEMP2 0x1cc - -#define SENSOR_TEMP_MASK 0xffff -#define READBACK_VALUE_MASK 0xff00 -#define READBACK_VALUE_SHIFT 8 -#define READBACK_ADD_HALF BIT(7) -#define READBACK_NEGATE BIT(0) - -#define FUSE_TSENSOR8_CALIB 0x180 -#define FUSE_SPARE_REALIGNMENT_REG_0 0x1fc - -#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff -#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13) -#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13 - -#define FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK 0x3ff -#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK (0x7ff << 10) -#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT 10 - -#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_MASK 0x3f -#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK (0x1f << 21) -#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT 21 - -#define NOMINAL_CALIB_FT_T124 105 -#define NOMINAL_CALIB_CP_T124 25 - -struct tegra_tsensor_configuration { - u32 tall, tsample, tiddq_en, ten_count, pdiv, tsample_ate, pdiv_ate; -}; - -struct tegra_tsensor { - const struct tegra_tsensor_configuration *config; - u32 base, calib_fuse_offset; - /* Correction values used to modify values read from calibration fuses */ - s32 fuse_corr_alpha, fuse_corr_beta; -}; - -struct tegra_thermctl_zone { - void __iomem *reg; - unsigned int shift; -}; - -static const struct tegra_tsensor_configuration t124_tsensor_config = { - .tall = 16300, - .tsample = 120, - .tiddq_en = 1, - .ten_count = 1, - .pdiv = 8, - .tsample_ate = 480, - .pdiv_ate = 8 -}; - -static const struct tegra_tsensor t124_tsensors[] = { - { - .config = &t124_tsensor_config, - .base = 0xc0, - .calib_fuse_offset = 0x098, - .fuse_corr_alpha = 1135400, - .fuse_corr_beta = -6266900, - }, - { - .config = &t124_tsensor_config, - .base = 0xe0, - .calib_fuse_offset = 0x084, - .fuse_corr_alpha = 1122220, - .fuse_corr_beta = -5700700, - }, - { - .config = &t124_tsensor_config, - .base = 0x100, - .calib_fuse_offset = 0x088, - .fuse_corr_alpha = 1127000, - .fuse_corr_beta = -6768200, - }, - { - .config = &t124_tsensor_config, - .base = 0x120, - .calib_fuse_offset = 0x12c, - .fuse_corr_alpha = 1110900, - .fuse_corr_beta = -6232000, - }, - { - .config = &t124_tsensor_config, - .base = 0x140, - .calib_fuse_offset = 0x158, - .fuse_corr_alpha = 1122300, - .fuse_corr_beta = -5936400, - }, - { - .config = &t124_tsensor_config, - .base = 0x160, - .calib_fuse_offset = 0x15c, - .fuse_corr_alpha = 1145700, - .fuse_corr_beta = -7124600, - }, - { - .config = &t124_tsensor_config, - .base = 0x180, - .calib_fuse_offset = 0x154, - .fuse_corr_alpha = 1120100, - .fuse_corr_beta = -6000500, - }, - { - .config = &t124_tsensor_config, - .base = 0x1a0, - .calib_fuse_offset = 0x160, - .fuse_corr_alpha = 1106500, - .fuse_corr_beta = -6729300, - }, -}; - -struct tegra_soctherm { - struct reset_control *reset; - struct clk *clock_tsensor; - struct clk *clock_soctherm; - void __iomem *regs; - - struct thermal_zone_device *thermctl_tzs[4]; -}; - -struct tsensor_shared_calibration { - u32 base_cp, base_ft; - u32 actual_temp_cp, actual_temp_ft; -}; - -static int calculate_shared_calibration(struct tsensor_shared_calibration *r) -{ - u32 val, shifted_cp, shifted_ft; - int err; - - err = tegra_fuse_readl(FUSE_TSENSOR8_CALIB, &val); - if (err) - return err; - r->base_cp = val & FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK; - r->base_ft = (val & FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK) - >> FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT; - val = ((val & FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK) - >> FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT); - shifted_ft = sign_extend32(val, 4); - - err = tegra_fuse_readl(FUSE_SPARE_REALIGNMENT_REG_0, &val); - if (err) - return err; - shifted_cp = sign_extend32(val, 5); - - r->actual_temp_cp = 2 * NOMINAL_CALIB_CP_T124 + shifted_cp; - r->actual_temp_ft = 2 * NOMINAL_CALIB_FT_T124 + shifted_ft; - - return 0; -} - -static s64 div64_s64_precise(s64 a, s64 b) -{ - s64 r, al; - - /* Scale up for increased precision division */ - al = a << 16; - - r = div64_s64(al * 2 + 1, 2 * b); - return r >> 16; -} - -static int -calculate_tsensor_calibration(const struct tegra_tsensor *sensor, - const struct tsensor_shared_calibration *shared, - u32 *calib) -{ - u32 val; - s32 actual_tsensor_ft, actual_tsensor_cp, delta_sens, delta_temp, - mult, div; - s16 therma, thermb; - s64 tmp; - int err; - - err = tegra_fuse_readl(sensor->calib_fuse_offset, &val); - if (err) - return err; - - actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12); - val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK) - >> FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT; - actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12); - - delta_sens = actual_tsensor_ft - actual_tsensor_cp; - delta_temp = shared->actual_temp_ft - shared->actual_temp_cp; - - mult = sensor->config->pdiv * sensor->config->tsample_ate; - div = sensor->config->tsample * sensor->config->pdiv_ate; - - therma = div64_s64_precise((s64) delta_temp * (1LL << 13) * mult, - (s64) delta_sens * div); - - tmp = (s64)actual_tsensor_ft * shared->actual_temp_cp - - (s64)actual_tsensor_cp * shared->actual_temp_ft; - thermb = div64_s64_precise(tmp, (s64)delta_sens); - - therma = div64_s64_precise((s64)therma * sensor->fuse_corr_alpha, - (s64)1000000LL); - thermb = div64_s64_precise((s64)thermb * sensor->fuse_corr_alpha + - sensor->fuse_corr_beta, (s64)1000000LL); - - *calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) | - ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT); - - return 0; -} - -static int enable_tsensor(struct tegra_soctherm *tegra, - const struct tegra_tsensor *sensor, - const struct tsensor_shared_calibration *shared) -{ - void __iomem *base = tegra->regs + sensor->base; - unsigned int val; - u32 calib; - int err; - - err = calculate_tsensor_calibration(sensor, shared, &calib); - if (err) - return err; - - val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT; - writel(val, base + SENSOR_CONFIG0); - - val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT; - val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT; - val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT; - val |= SENSOR_CONFIG1_TEMP_ENABLE; - writel(val, base + SENSOR_CONFIG1); - - writel(calib, base + SENSOR_CONFIG2); - - return 0; -} - -/* - * Translate from soctherm readback format to millicelsius. - * The soctherm readback format in bits is as follows: - * TTTTTTTT H______N - * where T's contain the temperature in Celsius, - * H denotes an addition of 0.5 Celsius and N denotes negation - * of the final value. - */ -static int translate_temp(u16 val) -{ - long t; - - t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000; - if (val & READBACK_ADD_HALF) - t += 500; - if (val & READBACK_NEGATE) - t *= -1; - - return t; -} - -static int tegra_thermctl_get_temp(void *data, int *out_temp) -{ - struct tegra_thermctl_zone *zone = data; - u32 val; - - val = (readl(zone->reg) >> zone->shift) & SENSOR_TEMP_MASK; - *out_temp = translate_temp(val); - - return 0; -} - -static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = { - .get_temp = tegra_thermctl_get_temp, -}; - -static const struct of_device_id tegra_soctherm_of_match[] = { - { .compatible = "nvidia,tegra124-soctherm" }, - { }, -}; -MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match); - -struct thermctl_zone_desc { - unsigned int offset; - unsigned int shift; -}; - -static const struct thermctl_zone_desc t124_thermctl_temp_zones[] = { - { SENSOR_TEMP1, 16 }, - { SENSOR_TEMP2, 16 }, - { SENSOR_TEMP1, 0 }, - { SENSOR_TEMP2, 0 } -}; - -static int tegra_soctherm_probe(struct platform_device *pdev) -{ - struct tegra_soctherm *tegra; - struct thermal_zone_device *tz; - struct tsensor_shared_calibration shared_calib; - struct resource *res; - unsigned int i; - int err; - - const struct tegra_tsensor *tsensors = t124_tsensors; - - tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL); - if (!tegra) - return -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - tegra->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(tegra->regs)) - return PTR_ERR(tegra->regs); - - tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm"); - if (IS_ERR(tegra->reset)) { - dev_err(&pdev->dev, "can't get soctherm reset\n"); - return PTR_ERR(tegra->reset); - } - - tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor"); - if (IS_ERR(tegra->clock_tsensor)) { - dev_err(&pdev->dev, "can't get tsensor clock\n"); - return PTR_ERR(tegra->clock_tsensor); - } - - tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm"); - if (IS_ERR(tegra->clock_soctherm)) { - dev_err(&pdev->dev, "can't get soctherm clock\n"); - return PTR_ERR(tegra->clock_soctherm); - } - - reset_control_assert(tegra->reset); - - err = clk_prepare_enable(tegra->clock_soctherm); - if (err) - return err; - - err = clk_prepare_enable(tegra->clock_tsensor); - if (err) { - clk_disable_unprepare(tegra->clock_soctherm); - return err; - } - - reset_control_deassert(tegra->reset); - - /* Initialize raw sensors */ - - err = calculate_shared_calibration(&shared_calib); - if (err) - goto disable_clocks; - - for (i = 0; i < ARRAY_SIZE(t124_tsensors); ++i) { - err = enable_tsensor(tegra, tsensors + i, &shared_calib); - if (err) - goto disable_clocks; - } - - writel(SENSOR_PDIV_T124, tegra->regs + SENSOR_PDIV); - writel(SENSOR_HOTSPOT_OFF_T124, tegra->regs + SENSOR_HOTSPOT_OFF); - - /* Initialize thermctl sensors */ - - for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) { - struct tegra_thermctl_zone *zone = - devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL); - if (!zone) { - err = -ENOMEM; - goto unregister_tzs; - } - - zone->reg = tegra->regs + t124_thermctl_temp_zones[i].offset; - zone->shift = t124_thermctl_temp_zones[i].shift; - - tz = thermal_zone_of_sensor_register(&pdev->dev, i, zone, - &tegra_of_thermal_ops); - if (IS_ERR(tz)) { - err = PTR_ERR(tz); - dev_err(&pdev->dev, "failed to register sensor: %d\n", - err); - goto unregister_tzs; - } - - tegra->thermctl_tzs[i] = tz; - } - - return 0; - -unregister_tzs: - while (i--) - thermal_zone_of_sensor_unregister(&pdev->dev, - tegra->thermctl_tzs[i]); - -disable_clocks: - clk_disable_unprepare(tegra->clock_tsensor); - clk_disable_unprepare(tegra->clock_soctherm); - - return err; -} - -static int tegra_soctherm_remove(struct platform_device *pdev) -{ - struct tegra_soctherm *tegra = platform_get_drvdata(pdev); - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) { - thermal_zone_of_sensor_unregister(&pdev->dev, - tegra->thermctl_tzs[i]); - } - - clk_disable_unprepare(tegra->clock_tsensor); - clk_disable_unprepare(tegra->clock_soctherm); - - return 0; -} - -static struct platform_driver tegra_soctherm_driver = { - .probe = tegra_soctherm_probe, - .remove = tegra_soctherm_remove, - .driver = { - .name = "tegra-soctherm", - .of_match_table = tegra_soctherm_of_match, - }, -}; -module_platform_driver(tegra_soctherm_driver); - -MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>"); -MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c new file mode 100644 index 000000000000..73f55d6a1721 --- /dev/null +++ b/drivers/thermal/thermal-generic-adc.c @@ -0,0 +1,182 @@ +/* + * Generic ADC thermal driver + * + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * Author: Laxman Dewangan <ldewangan@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/iio/consumer.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/thermal.h> + +struct gadc_thermal_info { + struct device *dev; + struct thermal_zone_device *tz_dev; + struct iio_channel *channel; + s32 *lookup_table; + int nlookup_table; +}; + +static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val) +{ + int temp, adc_hi, adc_lo; + int i; + + for (i = 0; i < gti->nlookup_table; i++) { + if (val >= gti->lookup_table[2 * i + 1]) + break; + } + + if (i == 0) { + temp = gti->lookup_table[0]; + } else if (i >= (gti->nlookup_table - 1)) { + temp = gti->lookup_table[2 * (gti->nlookup_table - 1)]; + } else { + adc_hi = gti->lookup_table[2 * i - 1]; + adc_lo = gti->lookup_table[2 * i + 1]; + temp = gti->lookup_table[2 * i]; + temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo); + } + + return temp; +} + +static int gadc_thermal_get_temp(void *data, int *temp) +{ + struct gadc_thermal_info *gti = data; + int val; + int ret; + + ret = iio_read_channel_processed(gti->channel, &val); + if (ret < 0) { + dev_err(gti->dev, "IIO channel read failed %d\n", ret); + return ret; + } + *temp = gadc_thermal_adc_to_temp(gti, val); + + return 0; +} + +static const struct thermal_zone_of_device_ops gadc_thermal_ops = { + .get_temp = gadc_thermal_get_temp, +}; + +static int gadc_thermal_read_linear_lookup_table(struct device *dev, + struct gadc_thermal_info *gti) +{ + struct device_node *np = dev->of_node; + int ntable; + int ret; + + ntable = of_property_count_elems_of_size(np, "temperature-lookup-table", + sizeof(u32)); + if (ntable < 0) { + dev_err(dev, "Lookup table is not provided\n"); + return ntable; + } + + if (ntable % 2) { + dev_err(dev, "Pair of temperature vs ADC read value missing\n"); + return -EINVAL; + } + + gti->lookup_table = devm_kzalloc(dev, sizeof(*gti->lookup_table) * + ntable, GFP_KERNEL); + if (!gti->lookup_table) + return -ENOMEM; + + ret = of_property_read_u32_array(np, "temperature-lookup-table", + (u32 *)gti->lookup_table, ntable); + if (ret < 0) { + dev_err(dev, "Failed to read temperature lookup table: %d\n", + ret); + return ret; + } + + gti->nlookup_table = ntable / 2; + + return 0; +} + +static int gadc_thermal_probe(struct platform_device *pdev) +{ + struct gadc_thermal_info *gti; + int ret; + + if (!pdev->dev.of_node) { + dev_err(&pdev->dev, "Only DT based supported\n"); + return -ENODEV; + } + + gti = devm_kzalloc(&pdev->dev, sizeof(*gti), GFP_KERNEL); + if (!gti) + return -ENOMEM; + + ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti); + if (ret < 0) + return ret; + + gti->dev = &pdev->dev; + platform_set_drvdata(pdev, gti); + + gti->channel = iio_channel_get(&pdev->dev, "sensor-channel"); + if (IS_ERR(gti->channel)) { + ret = PTR_ERR(gti->channel); + dev_err(&pdev->dev, "IIO channel not found: %d\n", ret); + return ret; + } + + gti->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, + gti, &gadc_thermal_ops); + if (IS_ERR(gti->tz_dev)) { + ret = PTR_ERR(gti->tz_dev); + dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n", + ret); + goto sensor_fail; + } + + return 0; + +sensor_fail: + iio_channel_release(gti->channel); + + return ret; +} + +static int gadc_thermal_remove(struct platform_device *pdev) +{ + struct gadc_thermal_info *gti = platform_get_drvdata(pdev); + + thermal_zone_of_sensor_unregister(&pdev->dev, gti->tz_dev); + iio_channel_release(gti->channel); + + return 0; +} + +static const struct of_device_id of_adc_thermal_match[] = { + { .compatible = "generic-adc-thermal", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_adc_thermal_match); + +static struct platform_driver gadc_thermal_driver = { + .driver = { + .name = "generic-adc-thermal", + .of_match_table = of_adc_thermal_match, + }, + .probe = gadc_thermal_probe, + .remove = gadc_thermal_remove, +}; + +module_platform_driver(gadc_thermal_driver); + +MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); +MODULE_DESCRIPTION("Generic ADC thermal driver using IIO framework with DT"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index b213a1222295..15c0a9ac2209 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -337,7 +337,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, return -EINVAL; /* in case this is specified by DT */ - data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id, + data->ti_thermal = devm_thermal_zone_of_sensor_register(bgp->dev, id, data, &ti_of_thermal_ops); if (IS_ERR(data->ti_thermal)) { /* Create thermal zone */ @@ -368,9 +368,6 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) if (data && data->ti_thermal) { if (data->our_zone) thermal_zone_device_unregister(data->ti_thermal); - else - thermal_zone_of_sensor_unregister(bgp->dev, - data->ti_thermal); } return 0; diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 7fc919f7da4d..97f0a2bd93ed 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -555,7 +555,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb, { unsigned int cpu = (unsigned long) hcpu; - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: get_core_online(cpu); diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 82c4d2e45319..95103054c0e4 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -120,17 +120,6 @@ config UNIX98_PTYS All modern Linux systems use the Unix98 ptys. Say Y unless you're on an embedded system and want to conserve memory. -config DEVPTS_MULTIPLE_INSTANCES - bool "Support multiple instances of devpts" - depends on UNIX98_PTYS - default n - ---help--- - Enable support for multiple instances of devpts filesystem. - If you want to have isolated PTY namespaces (eg: in containers), - say Y here. Otherwise, say N. If enabled, each mount of devpts - filesystem with the '-o newinstance' option will create an - independent PTY namespace. - config LEGACY_PTYS bool "Legacy (BSD) PTY support" default y diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index dd4b8417e7f4..f856c4544eea 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) else fsi = tty->link->driver_data; devpts_kill_index(fsi, tty->index); - devpts_put_ref(fsi); + devpts_release(fsi); } static const struct tty_operations ptm_unix98_ops = { @@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp) if (retval) return retval; - fsi = devpts_get_ref(inode, filp); - retval = -ENODEV; - if (!fsi) + fsi = devpts_acquire(filp); + if (IS_ERR(fsi)) { + retval = PTR_ERR(fsi); goto out_free_file; + } /* find a device that is not in use. */ mutex_lock(&devpts_mutex); @@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) retval = index; if (index < 0) - goto out_put_ref; + goto out_put_fsi; mutex_lock(&tty_mutex); @@ -789,8 +790,8 @@ err_release: return retval; out: devpts_kill_index(fsi, index); -out_put_ref: - devpts_put_ref(fsi); +out_put_fsi: + devpts_release(fsi); out_free_file: tty_free_file(filp); return retval; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index a2aa655f56c4..1b7331e40d79 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2360,7 +2360,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev) return ret; ret = of_alias_get_id(np, "serial"); - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { seen_dev_without_alias = true; ret = index; } else { diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 18971063f95f..699447aa8b43 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -654,7 +654,7 @@ static int sprd_probe_dt_alias(int index, struct device *dev) return ret; ret = of_alias_get_id(np, "serial"); - if (IS_ERR_VALUE(ret)) + if (ret < 0) ret = index; else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) { dev_warn(dev, "requested serial port %d not available.\n", ret); diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 2ace0295408e..35fe3c80cfc0 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -1290,15 +1290,6 @@ static void usbg_release_cmd(struct se_cmd *se_cmd) percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); } -static int usbg_shutdown_session(struct se_session *se_sess) -{ - return 0; -} - -static void usbg_close_session(struct se_session *se_sess) -{ -} - static u32 usbg_sess_get_index(struct se_session *se_sess) { return 0; @@ -1735,8 +1726,6 @@ static const struct target_core_fabric_ops usbg_ops = { .tpg_check_prod_mode_write_protect = usbg_check_false, .tpg_get_inst_index = usbg_tpg_get_inst_index, .release_cmd = usbg_release_cmd, - .shutdown_session = usbg_shutdown_session, - .close_session = usbg_close_session, .sess_get_index = usbg_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = usbg_send_write_request, diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 712a84978e97..188b1ff03f5f 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -113,6 +113,35 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev) static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); +/* + * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND + * _and_ the ability detect when the device is asserting INTx via PCI_STATUS. + * If a device implements the former but not the latter we would typically + * expect broken_intx_masking be set and require an exclusive interrupt. + * However since we do have control of the device's ability to assert INTx, + * we can instead pretend that the device does not implement INTx, virtualizing + * the pin register to report zero and maintaining DisINTx set on the host. + */ +static bool vfio_pci_nointx(struct pci_dev *pdev) +{ + switch (pdev->vendor) { + case PCI_VENDOR_ID_INTEL: + switch (pdev->device) { + /* All i40e (XL710/X710) 10/20/40GbE NICs */ + case 0x1572: + case 0x1574: + case 0x1580 ... 0x1581: + case 0x1583 ... 0x1589: + case 0x37d0 ... 0x37d2: + return true; + default: + return false; + } + } + + return false; +} + static int vfio_pci_enable(struct vfio_pci_device *vdev) { struct pci_dev *pdev = vdev->pdev; @@ -136,23 +165,29 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) pr_debug("%s: Couldn't store %s saved state\n", __func__, dev_name(&pdev->dev)); - ret = vfio_config_init(vdev); - if (ret) { - kfree(vdev->pci_saved_state); - vdev->pci_saved_state = NULL; - pci_disable_device(pdev); - return ret; + if (likely(!nointxmask)) { + if (vfio_pci_nointx(pdev)) { + dev_info(&pdev->dev, "Masking broken INTx support\n"); + vdev->nointx = true; + pci_intx(pdev, 0); + } else + vdev->pci_2_3 = pci_intx_mask_supported(pdev); } - if (likely(!nointxmask)) - vdev->pci_2_3 = pci_intx_mask_supported(pdev); - pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) { cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, cmd); } + ret = vfio_config_init(vdev); + if (ret) { + kfree(vdev->pci_saved_state); + vdev->pci_saved_state = NULL; + pci_disable_device(pdev); + return ret; + } + msix_pos = pdev->msix_cap; if (msix_pos) { u16 flags; @@ -304,7 +339,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { u8 pin; pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); - if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && pin) + if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin) return 1; } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 142c533efec7..688691d9058d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -408,6 +408,7 @@ static void vfio_bar_restore(struct vfio_pci_device *vdev) { struct pci_dev *pdev = vdev->pdev; u32 *rbar = vdev->rbar; + u16 cmd; int i; if (pdev->is_virtfn) @@ -420,6 +421,12 @@ static void vfio_bar_restore(struct vfio_pci_device *vdev) pci_user_write_config_dword(pdev, i, *rbar); pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar); + + if (vdev->nointx) { + pci_user_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_INTX_DISABLE; + pci_user_write_config_word(pdev, PCI_COMMAND, cmd); + } } static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar) @@ -515,6 +522,23 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos, return count; } +/* Test whether BARs match the value we think they should contain */ +static bool vfio_need_bar_restore(struct vfio_pci_device *vdev) +{ + int i = 0, pos = PCI_BASE_ADDRESS_0, ret; + u32 bar; + + for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) { + if (vdev->rbar[i]) { + ret = pci_user_read_config_dword(vdev->pdev, pos, &bar); + if (ret || vdev->rbar[i] != bar) + return true; + } + } + + return false; +} + static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, int count, struct perm_bits *perm, int offset, __le32 val) @@ -553,7 +577,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, * SR-IOV devices will trigger this, but we catch them later */ if ((new_mem && virt_mem && !phys_mem) || - (new_io && virt_io && !phys_io)) + (new_io && virt_io && !phys_io) || + vfio_need_bar_restore(vdev)) vfio_bar_restore(vdev); } @@ -724,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) return count; } else { - if (pci_read_vpd(pdev, addr, 4, &data) != 4) + data = 0; + if (pci_read_vpd(pdev, addr, 4, &data) < 0) return count; *pdata = cpu_to_le32(data); } @@ -1124,9 +1150,12 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) return pcibios_err_to_errno(ret); if (PCI_X_CMD_VERSION(word)) { - /* Test for extended capabilities */ - pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); - vdev->extended_caps = (dword != 0); + if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, + &dword); + vdev->extended_caps = (dword != 0); + } return PCI_CAP_PCIX_SIZEOF_V2; } else return PCI_CAP_PCIX_SIZEOF_V0; @@ -1138,9 +1167,11 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) return byte; case PCI_CAP_ID_EXP: - /* Test for extended capabilities */ - pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); - vdev->extended_caps = (dword != 0); + if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) { + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); + vdev->extended_caps = (dword != 0); + } /* length based on version */ if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) @@ -1545,7 +1576,7 @@ int vfio_config_init(struct vfio_pci_device *vdev) *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); } - if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX)) + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) vconfig[PCI_INTERRUPT_PIN] = 0; ret = vfio_cap_init(vdev); diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index e9ea3fef144a..15ecfc9c5f6c 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) static void vfio_intx_disable(struct vfio_pci_device *vdev) { - vfio_intx_set_signal(vdev, -1); vfio_virqfd_disable(&vdev->ctx[0].unmask); vfio_virqfd_disable(&vdev->ctx[0].mask); + vfio_intx_set_signal(vdev, -1); vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->num_ctx = 0; kfree(vdev->ctx); @@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) struct pci_dev *pdev = vdev->pdev; int i; - vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); - for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); vfio_virqfd_disable(&vdev->ctx[i].mask); } + vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + if (msix) { pci_disable_msix(vdev->pdev); kfree(vdev->msix); diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 8a7d546d18a0..016c14a1b454 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -83,6 +83,7 @@ struct vfio_pci_device { bool bardirty; bool has_vga; bool needs_reset; + bool nointx; struct pci_saved_state *pci_saved_state; int refcnt; struct eventfd_ctx *err_trigger; diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 3054e3fa63ac..80378ddadc5c 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -331,14 +331,12 @@ static void tce_iommu_free_table(struct iommu_table *tbl); static void tce_iommu_release(void *iommu_data) { struct tce_container *container = iommu_data; - struct iommu_table_group *table_group; struct tce_iommu_group *tcegrp; long i; while (tce_groups_attached(container)) { tcegrp = list_first_entry(&container->group_list, struct tce_iommu_group, next); - table_group = iommu_group_get_iommudata(tcegrp->grp); tce_iommu_detach_group(iommu_data, tcegrp->grp); } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 15a65823aad9..2ba19424e4a1 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, unsigned long pfn, long npage, int prot) { long i; - int ret; + int ret = 0; for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { ret = iommu_map(domain->domain, iova, diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 0e6fd556c982..9d6320e8ff3e 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -333,16 +333,6 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); } -static int vhost_scsi_shutdown_session(struct se_session *se_sess) -{ - return 0; -} - -static void vhost_scsi_close_session(struct se_session *se_sess) -{ - return; -} - static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) { return 0; @@ -2114,8 +2104,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = { .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, .release_cmd = vhost_scsi_release_cmd, .check_stop_free = vhost_scsi_check_stop_free, - .shutdown_session = vhost_scsi_shutdown_session, - .close_session = vhost_scsi_close_session, .sess_get_index = vhost_scsi_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = vhost_scsi_write_pending, diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c index 35fe4825a454..60d6c2ac87aa 100644 --- a/drivers/video/backlight/lm3630a_bl.c +++ b/drivers/video/backlight/lm3630a_bl.c @@ -162,7 +162,7 @@ static int lm3630a_intr_config(struct lm3630a_chip *pchip) static void lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max) { - unsigned int period = pwm_get_period(pchip->pwmd); + unsigned int period = pchip->pdata->pwm_period; unsigned int duty = br * period / br_max; pwm_config(pchip->pwmd, duty, period); @@ -424,8 +424,13 @@ static int lm3630a_probe(struct i2c_client *client, dev_err(&client->dev, "fail : get pwm device\n"); return PTR_ERR(pchip->pwmd); } + + /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(pchip->pwmd); } - pchip->pwmd->period = pdata->pwm_period; /* interrupt enable : irq 0 is not allowed */ pchip->irq = client->irq; diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c index daca9e6a2bb3..e5b14f52628f 100644 --- a/drivers/video/backlight/lp855x_bl.c +++ b/drivers/video/backlight/lp855x_bl.c @@ -246,6 +246,12 @@ static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br) return; lp->pwm = pwm; + + /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(pwm); } pwm_config(lp->pwm, duty, period); diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c index 5d583d7a517b..cf869ec90cce 100644 --- a/drivers/video/backlight/lp8788_bl.c +++ b/drivers/video/backlight/lp8788_bl.c @@ -145,6 +145,12 @@ static void lp8788_pwm_ctrl(struct lp8788_bl *bl, int br, int max_br) } bl->pwm = pwm; + + /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(pwm); } pwm_config(bl->pwm, duty, period); diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 64f9e1b8655f..b2b366bb0f97 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -201,6 +201,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; struct pwm_bl_data *pb; int initial_blank = FB_BLANK_UNBLANK; + struct pwm_args pargs; int ret; if (!data) { @@ -307,16 +308,21 @@ static int pwm_backlight_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "got pwm for backlight\n"); /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(pb->pwm); + + /* * The DT case will set the pwm_period_ns field to 0 and store the * period, parsed from the DT, in the PWM device. For the non-DT case, * set the period from platform data if it has not already been set * via the PWM lookup table. */ - pb->period = pwm_get_period(pb->pwm); - if (!pb->period && (data->pwm_period_ns > 0)) { + pwm_get_args(pb->pwm, &pargs); + pb->period = pargs.period; + if (!pb->period && (data->pwm_period_ns > 0)) pb->period = data->pwm_period_ns; - pwm_set_period(pb->pwm, data->pwm_period_ns); - } pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale); diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index d8d583d32a37..c229b1a0d13b 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -713,7 +713,7 @@ static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par, if (par->lcdc_clk_rate != lcdc_clk_rate) { ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate); - if (IS_ERR_VALUE(ret)) { + if (ret) { dev_err(par->dev, "unable to set clock rate at %u\n", lcdc_clk_rate); @@ -784,7 +784,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, int ret = 0; ret = da8xx_fb_calc_config_clk_divider(par, panel); - if (IS_ERR_VALUE(ret)) { + if (ret) { dev_err(par->dev, "unable to configure clock\n"); return ret; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c index 8ea531d2652c..bbfe7e2d4332 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) { void __iomem *base = core->base; const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ - const unsigned ss_scl_high = 4000; /* ns */ - const unsigned ss_scl_low = 4700; /* ns */ + const unsigned ss_scl_high = 4600; /* ns */ + const unsigned ss_scl_low = 5400; /* ns */ const unsigned fs_scl_high = 600; /* ns */ const unsigned fs_scl_low = 1300; /* ns */ const unsigned sda_hold = 1000; /* ns */ @@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, c = (ptr[1] >> 6) & 0x3; m = (ptr[1] >> 4) & 0x3; - r = (ptr[1] >> 0) & 0x3; + r = (ptr[1] >> 0) & 0xf; itc = (ptr[2] >> 7) & 0x1; ec = (ptr[2] >> 4) & 0x7; diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 21dafe53ca49..a9c45c89b15e 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -286,6 +286,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) { int ret; u32 precharge, dclk, com_invdir, compins; + struct pwm_args pargs; if (par->device_info->need_pwm) { par->pwm = pwm_get(&par->client->dev, NULL); @@ -294,7 +295,15 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) return PTR_ERR(par->pwm); } - par->pwm_period = pwm_get_period(par->pwm); + /* + * FIXME: pwm_apply_args() should be removed when switching to + * the atomic PWM API. + */ + pwm_apply_args(par->pwm); + + pwm_get_args(par->pwm, &pargs); + + par->pwm_period = pargs.period; /* Enable the PWM */ pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period); pwm_enable(par->pwm); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7b6d74f0c72f..476c0e3a7150 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -75,7 +75,7 @@ struct virtio_balloon { /* The array of pfns we tell the Host about. */ unsigned int num_pfns; - u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; /* Memory statistics */ struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; @@ -127,14 +127,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) } -static void set_page_pfns(u32 pfns[], struct page *page) +static void set_page_pfns(struct virtio_balloon *vb, + __virtio32 pfns[], struct page *page) { unsigned int i; /* Set balloon pfns pointing at this page. * Note that the first pfn points at start of the page. */ for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) - pfns[i] = page_to_balloon_pfn(page) + i; + pfns[i] = cpu_to_virtio32(vb->vdev, + page_to_balloon_pfn(page) + i); } static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) @@ -158,7 +160,7 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) msleep(200); break; } - set_page_pfns(vb->pfns + vb->num_pfns, page); + set_page_pfns(vb, vb->pfns + vb->num_pfns, page); vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) @@ -177,10 +179,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) static void release_pages_balloon(struct virtio_balloon *vb) { unsigned int i; + struct page *page; /* Find pfns pointing at start of each page, get pages and free them. */ for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { - struct page *page = balloon_pfn_to_page(vb->pfns[i]); + page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev, + vb->pfns[i])); if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) adjust_managed_page_count(page, 1); @@ -203,7 +207,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) page = balloon_page_dequeue(vb_dev_info); if (!page) break; - set_page_pfns(vb->pfns + vb->num_pfns, page); + set_page_pfns(vb, vb->pfns + vb->num_pfns, page); vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; } @@ -471,13 +475,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, __count_vm_event(BALLOON_MIGRATE); spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; - set_page_pfns(vb->pfns, newpage); + set_page_pfns(vb, vb->pfns, newpage); tell_host(vb, vb->inflate_vq); /* balloon's page migration 2nd step -- deflate "page" */ balloon_page_delete(page); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; - set_page_pfns(vb->pfns, page); + set_page_pfns(vb, vb->pfns, page); tell_host(vb, vb->deflate_vq); mutex_unlock(&vb->balloon_lock); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 5b45e277697b..b54f26c55dfd 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -661,6 +661,14 @@ config ATLAS7_WATCHDOG To compile this driver as a module, choose M here: the module will be called atlas7_wdt. +config RENESAS_WDT + tristate "Renesas WDT Watchdog" + depends on ARCH_RENESAS || COMPILE_TEST + select WATCHDOG_CORE + help + This driver adds watchdog support for the integrated watchdogs in the + Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT). + # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 9bde095ff691..a46e7c1380ac 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o +obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 02007689e9ca..71ee07950e63 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -611,9 +611,7 @@ static int cpwd_probe(struct platform_device *op) } if (p->broken) { - init_timer(&cpwd_timer); - cpwd_timer.function = cpwd_brokentimer; - cpwd_timer.data = (unsigned long) p; + setup_timer(&cpwd_timer, cpwd_brokentimer, (unsigned long)p); cpwd_timer.expires = WD_BTIMEOUT; pr_info("PLD defect workaround enabled for model %s\n", diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index 016bd9355190..d4ba262da7ba 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -38,7 +38,7 @@ #define SIO_F71808FG_LD_WDT 0x07 /* Watchdog timer logical device */ #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ -#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */ +#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */ #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ @@ -59,6 +59,7 @@ #define SIO_F71869A_ID 0x1007 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ #define SIO_F71889_ID 0x0723 /* Chipset ID */ +#define SIO_F81865_ID 0x0704 /* Chipset ID */ #define F71808FG_REG_WDO_CONF 0xf0 #define F71808FG_REG_WDT_CONF 0xf5 @@ -66,11 +67,14 @@ #define F71808FG_FLAG_WDOUT_EN 7 -#define F71808FG_FLAG_WDTMOUT_STS 5 +#define F71808FG_FLAG_WDTMOUT_STS 6 #define F71808FG_FLAG_WD_EN 5 #define F71808FG_FLAG_WD_PULSE 4 #define F71808FG_FLAG_WD_UNIT 3 +#define F81865_REG_WDO_CONF 0xfa +#define F81865_FLAG_WDOUT_EN 0 + /* Default values */ #define WATCHDOG_TIMEOUT 60 /* 1 minute default timeout */ #define WATCHDOG_MAX_TIMEOUT (60 * 255) @@ -112,7 +116,7 @@ module_param(start_withtimeout, uint, 0); MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with" " given initial timeout. Zero (default) disables this feature."); -enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg }; +enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865 }; static const char *f71808e_names[] = { "f71808fg", @@ -121,6 +125,7 @@ static const char *f71808e_names[] = { "f71869", "f71882fg", "f71889fg", + "f81865", }; /* Super-I/O Function prototypes */ @@ -360,6 +365,11 @@ static int watchdog_start(void) superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf); break; + case f81865: + /* Set pin 70 to WDTRST# */ + superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5); + break; + default: /* * 'default' label to shut up the compiler and catch @@ -371,8 +381,13 @@ static int watchdog_start(void) superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0); - superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF, - F71808FG_FLAG_WDOUT_EN); + + if (watchdog.type == f81865) + superio_set_bit(watchdog.sioaddr, F81865_REG_WDO_CONF, + F81865_FLAG_WDOUT_EN); + else + superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF, + F71808FG_FLAG_WDOUT_EN); superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF, F71808FG_FLAG_WD_EN); @@ -655,7 +670,7 @@ static int __init watchdog_init(int sioaddr) superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT); wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); - watchdog.caused_reboot = wdt_conf & F71808FG_FLAG_WDTMOUT_STS; + watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); superio_exit(sioaddr); @@ -770,6 +785,9 @@ static int __init f71808e_find(int sioaddr) /* Confirmed (by datasheet) not to have a watchdog. */ err = -ENODEV; goto exit; + case SIO_F81865_ID: + watchdog.type = f81865; + break; default: pr_info("Unrecognized Fintek device: %04x\n", (unsigned int)devid); diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 331aed831dac..62f346bb4348 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -37,6 +37,8 @@ #define IMX2_WDT_WCR 0x00 /* Control Register */ #define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */ +#define IMX2_WDT_WCR_WDA (1 << 5) /* -> External Reset WDOG_B */ +#define IMX2_WDT_WCR_SRS (1 << 4) /* -> Software Reset Signal */ #define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */ #define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */ #define IMX2_WDT_WCR_WDZST (1 << 0) /* -> Watchdog timer Suspend */ @@ -59,6 +61,7 @@ struct imx2_wdt_device { struct clk *clk; struct regmap *regmap; struct watchdog_device wdog; + bool ext_reset; }; static bool nowayout = WATCHDOG_NOWAYOUT; @@ -83,6 +86,12 @@ static int imx2_wdt_restart(struct watchdog_device *wdog, unsigned long action, struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); unsigned int wcr_enable = IMX2_WDT_WCR_WDE; + /* Use internal reset or external - not both */ + if (wdev->ext_reset) + wcr_enable |= IMX2_WDT_WCR_SRS; /* do not assert int reset */ + else + wcr_enable |= IMX2_WDT_WCR_WDA; /* do not assert ext-reset */ + /* Assert SRS signal */ regmap_write(wdev->regmap, IMX2_WDT_WCR, wcr_enable); /* @@ -112,8 +121,12 @@ static inline void imx2_wdt_setup(struct watchdog_device *wdog) val |= IMX2_WDT_WCR_WDZST; /* Strip the old watchdog Time-Out value */ val &= ~IMX2_WDT_WCR_WT; - /* Generate reset if WDOG times out */ - val &= ~IMX2_WDT_WCR_WRE; + /* Generate internal chip-level reset if WDOG times out */ + if (!wdev->ext_reset) + val &= ~IMX2_WDT_WCR_WRE; + /* Or if external-reset assert WDOG_B reset only on time-out */ + else + val |= IMX2_WDT_WCR_WRE; /* Keep Watchdog Disabled */ val &= ~IMX2_WDT_WCR_WDE; /* Set the watchdog's Time-Out value */ @@ -230,6 +243,8 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val); wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0; + wdev->ext_reset = of_property_read_bool(pdev->dev.of_node, + "fsl,ext-reset-output"); wdog->timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME); if (wdog->timeout != timeout) dev_warn(&pdev->dev, "Initial timeout out of range! Clamped from %u to %u\n", diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c index 6a7d5c365438..c8d51ddb26d5 100644 --- a/drivers/watchdog/jz4740_wdt.c +++ b/drivers/watchdog/jz4740_wdt.c @@ -160,10 +160,8 @@ static int jz4740_wdt_probe(struct platform_device *pdev) drvdata = devm_kzalloc(&pdev->dev, sizeof(struct jz4740_wdt_drvdata), GFP_KERNEL); - if (!drvdata) { - dev_err(&pdev->dev, "Unable to alloacate watchdog device\n"); + if (!drvdata) return -ENOMEM; - } if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) heartbeat = DEFAULT_HEARTBEAT; diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c index 14521c8b3d5a..b55981f88a08 100644 --- a/drivers/watchdog/octeon-wdt-main.c +++ b/drivers/watchdog/octeon-wdt-main.c @@ -431,7 +431,7 @@ static int octeon_wdt_cpu_callback(struct notifier_block *nfb, { unsigned int cpu = (unsigned long)hcpu; - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: octeon_wdt_disable_interrupt(cpu); break; diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index 20563ccb7be0..a043fa4f60e5 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -21,6 +21,7 @@ #define WDT_RST 0x38 #define WDT_EN 0x40 +#define WDT_STS 0x44 #define WDT_BITE_TIME 0x5C struct qcom_wdt { @@ -108,7 +109,8 @@ static const struct watchdog_ops qcom_wdt_ops = { static const struct watchdog_info qcom_wdt_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE - | WDIOF_SETTIMEOUT, + | WDIOF_SETTIMEOUT + | WDIOF_CARDRESET, .identity = KBUILD_MODNAME, }; @@ -171,6 +173,9 @@ static int qcom_wdt_probe(struct platform_device *pdev) wdt->wdd.max_timeout = 0x10000000U / wdt->rate; wdt->wdd.parent = &pdev->dev; + if (readl(wdt->base + WDT_STS) & 1) + wdt->wdd.bootstatus = WDIOF_CARDRESET; + /* * If 'timeout-sec' unspecified in devicetree, assume a 30 second * default, unless the max timeout is less than 30 seconds, then use diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c new file mode 100644 index 000000000000..cf61c92f7ecd --- /dev/null +++ b/drivers/watchdog/renesas_wdt.c @@ -0,0 +1,213 @@ +/* + * Watchdog driver for Renesas WDT watchdog + * + * Copyright (C) 2015-16 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com> + * Copyright (C) 2015-16 Renesas Electronics Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/watchdog.h> + +#define RWTCNT 0 +#define RWTCSRA 4 +#define RWTCSRA_WOVF BIT(4) +#define RWTCSRA_WRFLG BIT(5) +#define RWTCSRA_TME BIT(7) + +#define RWDT_DEFAULT_TIMEOUT 60U + +static const unsigned int clk_divs[] = { 1, 4, 16, 32, 64, 128, 1024 }; + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +struct rwdt_priv { + void __iomem *base; + struct watchdog_device wdev; + struct clk *clk; + unsigned int clks_per_sec; + u8 cks; +}; + +static void rwdt_write(struct rwdt_priv *priv, u32 val, unsigned int reg) +{ + if (reg == RWTCNT) + val |= 0x5a5a0000; + else + val |= 0xa5a5a500; + + writel_relaxed(val, priv->base + reg); +} + +static int rwdt_init_timeout(struct watchdog_device *wdev) +{ + struct rwdt_priv *priv = watchdog_get_drvdata(wdev); + + rwdt_write(priv, 65536 - wdev->timeout * priv->clks_per_sec, RWTCNT); + + return 0; +} + +static int rwdt_start(struct watchdog_device *wdev) +{ + struct rwdt_priv *priv = watchdog_get_drvdata(wdev); + + clk_prepare_enable(priv->clk); + + rwdt_write(priv, priv->cks, RWTCSRA); + rwdt_init_timeout(wdev); + + while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG) + cpu_relax(); + + rwdt_write(priv, priv->cks | RWTCSRA_TME, RWTCSRA); + + return 0; +} + +static int rwdt_stop(struct watchdog_device *wdev) +{ + struct rwdt_priv *priv = watchdog_get_drvdata(wdev); + + rwdt_write(priv, priv->cks, RWTCSRA); + clk_disable_unprepare(priv->clk); + + return 0; +} + +static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev) +{ + struct rwdt_priv *priv = watchdog_get_drvdata(wdev); + u16 val = readw_relaxed(priv->base + RWTCNT); + + return DIV_ROUND_CLOSEST(65536 - val, priv->clks_per_sec); +} + +static const struct watchdog_info rwdt_ident = { + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .identity = "Renesas WDT Watchdog", +}; + +static const struct watchdog_ops rwdt_ops = { + .owner = THIS_MODULE, + .start = rwdt_start, + .stop = rwdt_stop, + .ping = rwdt_init_timeout, + .get_timeleft = rwdt_get_timeleft, +}; + +static int rwdt_probe(struct platform_device *pdev) +{ + struct rwdt_priv *priv; + struct resource *res; + unsigned long rate; + unsigned int clks_per_sec; + int ret, i; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + rate = clk_get_rate(priv->clk); + if (!rate) + return -ENOENT; + + for (i = ARRAY_SIZE(clk_divs) - 1; i >= 0; i--) { + clks_per_sec = DIV_ROUND_UP(rate, clk_divs[i]); + if (clks_per_sec) { + priv->clks_per_sec = clks_per_sec; + priv->cks = i; + break; + } + } + + if (!clks_per_sec) { + dev_err(&pdev->dev, "Can't find suitable clock divider\n"); + return -ERANGE; + } + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + priv->wdev.info = &rwdt_ident, + priv->wdev.ops = &rwdt_ops, + priv->wdev.parent = &pdev->dev; + priv->wdev.min_timeout = 1; + priv->wdev.max_timeout = 65536 / clks_per_sec; + priv->wdev.timeout = min(priv->wdev.max_timeout, RWDT_DEFAULT_TIMEOUT); + + platform_set_drvdata(pdev, priv); + watchdog_set_drvdata(&priv->wdev, priv); + watchdog_set_nowayout(&priv->wdev, nowayout); + + /* This overrides the default timeout only if DT configuration was found */ + ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev); + if (ret) + dev_warn(&pdev->dev, "Specified timeout value invalid, using default\n"); + + ret = watchdog_register_device(&priv->wdev); + if (ret < 0) { + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; + } + + return 0; +} + +static int rwdt_remove(struct platform_device *pdev) +{ + struct rwdt_priv *priv = platform_get_drvdata(pdev); + + watchdog_unregister_device(&priv->wdev); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +/* + * This driver does also fit for R-Car Gen2 (r8a779[0-4]) WDT. However, for SMP + * to work there, one also needs a RESET (RST) driver which does not exist yet + * due to HW issues. This needs to be solved before adding compatibles here. + */ +static const struct of_device_id rwdt_ids[] = { + { .compatible = "renesas,rcar-gen3-wdt", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rwdt_ids); + +static struct platform_driver rwdt_driver = { + .driver = { + .name = "renesas_wdt", + .of_match_table = rwdt_ids, + }, + .probe = rwdt_probe, + .remove = rwdt_remove, +}; +module_platform_driver(rwdt_driver); + +MODULE_DESCRIPTION("Renesas WDT Watchdog Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>"); diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index f90812170657..517a733175ef 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c @@ -275,9 +275,7 @@ static int sh_wdt_probe(struct platform_device *pdev) return rc; } - init_timer(&wdt->timer); - wdt->timer.function = sh_wdt_ping; - wdt->timer.data = (unsigned long)wdt; + setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt); wdt->timer.expires = next_ping_period(clock_division_ratio); dev_info(&pdev->dev, "initialized.\n"); diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 6467b91f2245..028618c5eeba 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -73,6 +73,13 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." /* * Some TCO specific functions */ + +static bool tco_has_sp5100_reg_layout(struct pci_dev *dev) +{ + return dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && + dev->revision < 0x40; +} + static void tco_timer_start(void) { u32 val; @@ -129,7 +136,7 @@ static void tco_timer_enable(void) { int val; - if (sp5100_tco_pci->revision >= 0x40) { + if (!tco_has_sp5100_reg_layout(sp5100_tco_pci)) { /* For SB800 or later */ /* Set the Watchdog timer resolution to 1 sec */ outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG); @@ -342,8 +349,7 @@ static unsigned char sp5100_tco_setupdevice(void) /* * Determine type of southbridge chipset. */ - if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && - sp5100_tco_pci->revision < 0x40) { + if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) { dev_name = SP5100_DEVNAME; index_reg = SP5100_IO_PM_INDEX_REG; data_reg = SP5100_IO_PM_DATA_REG; @@ -388,8 +394,7 @@ static unsigned char sp5100_tco_setupdevice(void) * Secondly, Find the watchdog timer MMIO address * from SBResource_MMIO register. */ - if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && - sp5100_tco_pci->revision < 0x40) { + if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) { /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */ pci_read_config_dword(sp5100_tco_pci, SP5100_SB_RESOURCE_MMIO_BASE, &val); diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 981a668b17e3..7c3ba58ae1be 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -104,7 +104,7 @@ static void watchdog_check_min_max_timeout(struct watchdog_device *wdd) * timeout module parameter (if it is valid value) or the timeout-sec property * (only if it is a valid value and the timeout_parm is out of bounds). * If none of them are valid then we keep the old value (which should normally - * be the default timeout value. + * be the default timeout value). * * A zero is returned on success and -EINVAL for failure. */ diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index e2c5abbb45ff..3595cffa24ea 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -736,7 +736,6 @@ static int watchdog_release(struct inode *inode, struct file *file) watchdog_ping(wdd); } - cancel_delayed_work_sync(&wd_data->work); watchdog_update_worker(wdd); /* make sure that /dev/watchdog can be re-opened */ diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 9b7a35c9e51d..030e91b38e32 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -8,6 +8,7 @@ nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_features.o := $(nostackp) CFLAGS_efi.o += -fshort-wchar +LDFLAGS += $(call ld-option, --no-wchar-size-warning) dom0-$(CONFIG_PCI) += pci.o dom0-$(CONFIG_USB_SUPPORT) += dbgp.o diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index cb7138c97c69..71d49a95f8c0 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -487,7 +487,8 @@ static void eoi_pirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - if (unlikely(irqd_is_setaffinity_pending(data))) { + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); @@ -1370,7 +1371,8 @@ static void ack_dynirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - if (unlikely(irqd_is_setaffinity_pending(data))) { + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index dc495383ad73..67939578cd6d 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -748,7 +748,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) return rc; } -#define GNTDEV_COPY_BATCH 24 +#define GNTDEV_COPY_BATCH 16 struct gntdev_copy_batch { struct gnttab_copy ops[GNTDEV_COPY_BATCH]; diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index ff932624eaad..d6950e0802b7 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -1399,15 +1399,6 @@ static void scsiback_release_cmd(struct se_cmd *se_cmd) percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); } -static int scsiback_shutdown_session(struct se_session *se_sess) -{ - return 0; -} - -static void scsiback_close_session(struct se_session *se_sess) -{ -} - static u32 scsiback_sess_get_index(struct se_session *se_sess) { return 0; @@ -1841,8 +1832,6 @@ static const struct target_core_fabric_ops scsiback_ops = { .tpg_get_inst_index = scsiback_tpg_get_inst_index, .check_stop_free = scsiback_check_stop_free, .release_cmd = scsiback_release_cmd, - .shutdown_session = scsiback_shutdown_session, - .close_session = scsiback_close_session, .sess_get_index = scsiback_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = scsiback_write_pending, |