diff options
Diffstat (limited to 'drivers/dma/idxd')
-rw-r--r-- | drivers/dma/idxd/cdev.c | 28 | ||||
-rw-r--r-- | drivers/dma/idxd/idxd.h | 17 | ||||
-rw-r--r-- | drivers/dma/idxd/init.c | 680 | ||||
-rw-r--r-- | drivers/dma/idxd/irq.c | 85 | ||||
-rw-r--r-- | drivers/dma/idxd/registers.h | 1 | ||||
-rw-r--r-- | drivers/dma/idxd/sysfs.c | 16 |
6 files changed, 650 insertions, 177 deletions
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 57f1bf2ab20b..7e4715f92773 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -28,7 +28,6 @@ struct idxd_cdev_context { * global to avoid conflict file names. */ static DEFINE_IDA(file_ida); -static DEFINE_MUTEX(ida_lock); /* * ictx is an array based off of accelerator types. enum idxd_type @@ -123,9 +122,7 @@ static void idxd_file_dev_release(struct device *dev) struct idxd_device *idxd = wq->idxd; int rc; - mutex_lock(&ida_lock); ida_free(&file_ida, ctx->id); - mutex_unlock(&ida_lock); /* Wait for in-flight operations to complete. */ if (wq_shared(wq)) { @@ -225,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) struct idxd_wq *wq; struct device *dev, *fdev; int rc = 0; - struct iommu_sva *sva; + struct iommu_sva *sva = NULL; unsigned int pasid; struct idxd_cdev *idxd_cdev; @@ -284,9 +281,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) } idxd_cdev = wq->idxd_cdev; - mutex_lock(&ida_lock); ctx->id = ida_alloc(&file_ida, GFP_KERNEL); - mutex_unlock(&ida_lock); if (ctx->id < 0) { dev_warn(dev, "ida alloc failure\n"); goto failed_ida; @@ -322,7 +317,7 @@ failed_set_pasid: if (device_user_pasid_enabled(idxd)) idxd_xa_pasid_remove(ctx); failed_get_pasid: - if (device_user_pasid_enabled(idxd)) + if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) iommu_sva_unbind_device(sva); failed: mutex_unlock(&wq->wq_lock); @@ -354,7 +349,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) set_bit(h, evl->bmap); h = (h + 1) % size; } - drain_workqueue(wq->wq); + if (wq->wq) + drain_workqueue(wq->wq); + mutex_unlock(&evl->lock); } @@ -412,6 +409,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) return -EPERM; + if (current->mm != ctx->mm) + return -EPERM; + rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; @@ -444,10 +444,12 @@ static int idxd_submit_user_descriptor(struct idxd_user_context *ctx, * DSA devices are capable of indirect ("batch") command submission. * On devices where direct user submissions are not safe, we cannot * allow this since there is no good way for us to verify these - * indirect commands. + * indirect commands. Narrow the restriction of operations with the + * BATCH opcode to only DSA version 1 devices. */ if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH && - !wq->idxd->user_submission_safe) + wq->idxd->hw.version == DEVICE_VERSION_1 && + !wq->idxd->user_submission_safe) return -EINVAL; /* * As per the programming specification, the completion address must be @@ -478,6 +480,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t ssize_t written = 0; int i; + if (current->mm != ctx->mm) + return -EPERM; + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { int rc = idxd_submit_user_descriptor(ctx, udesc + i); @@ -498,6 +503,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, struct idxd_device *idxd = wq->idxd; __poll_t out = 0; + if (current->mm != ctx->mm) + return POLLNVAL; + poll_wait(filp, &wq->err_queue, wait); spin_lock(&idxd->dev_lock); if (idxd->sw_err.valid) diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index d84e21daa991..74e6695881e6 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -19,7 +19,6 @@ #define IDXD_DRIVER_VERSION "1.00" -extern struct kmem_cache *idxd_desc_pool; extern bool tc_override; struct idxd_wq; @@ -171,7 +170,6 @@ struct idxd_cdev { #define DRIVER_NAME_SIZE 128 -#define IDXD_ALLOCATED_BATCH_SIZE 128U #define WQ_NAME_SIZE 1024 #define WQ_TYPE_SIZE 10 @@ -374,6 +372,17 @@ struct idxd_device { struct dentry *dbgfs_evl_file; bool user_submission_safe; + + struct idxd_saved_states *idxd_saved; +}; + +struct idxd_saved_states { + struct idxd_device saved_idxd; + struct idxd_evl saved_evl; + struct idxd_engine **saved_engines; + struct idxd_wq **saved_wqs; + struct idxd_group **saved_groups; + unsigned long *saved_wq_enable_map; }; static inline unsigned int evl_ent_size(struct idxd_device *idxd) @@ -725,8 +734,6 @@ static inline void idxd_desc_complete(struct idxd_desc *desc, &desc->txd, &status); } -int idxd_register_bus_type(void); -void idxd_unregister_bus_type(void); int idxd_register_devices(struct idxd_device *idxd); void idxd_unregister_devices(struct idxd_device *idxd); void idxd_wqs_quiesce(struct idxd_device *idxd); @@ -742,6 +749,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd); /* device control */ int idxd_device_drv_probe(struct idxd_dev *idxd_dev); +int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, + const struct pci_device_id *id); void idxd_device_drv_remove(struct idxd_dev *idxd_dev); int idxd_drv_enable_wq(struct idxd_wq *wq); void idxd_drv_disable_wq(struct idxd_wq *wq); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 140f8d772bee..80355d03004d 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -78,6 +78,8 @@ static struct pci_device_id idxd_pci_tbl[] = { { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, /* IAA on DMR platforms */ { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) }, + /* IAA PTL platforms */ + { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) }, { 0, } }; MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); @@ -153,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd) pci_free_irq_vectors(pdev); } +static void idxd_clean_wqs(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); + conf_dev = wq_confdev(wq); + put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + kfree(idxd->wqs); +} + static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -167,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); if (!idxd->wq_enable_map) { - kfree(idxd->wqs); - return -ENOMEM; + rc = -ENOMEM; + goto err_bitmap; } for (i = 0; i < idxd->max_wqs; i++) { @@ -187,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_wq_device_type; rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); - if (rc < 0) { - put_device(conf_dev); + if (rc < 0) goto err; - } mutex_init(&wq->wq_lock); init_waitqueue_head(&wq->err_queue); @@ -201,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd) wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { - put_device(conf_dev); rc = -ENOMEM; goto err; } @@ -209,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) if (idxd->hw.wq_cap.op_config) { wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); if (!wq->opcap_bmap) { - put_device(conf_dev); rc = -ENOMEM; - goto err; + goto err_opcap_bmap; } bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } @@ -222,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd) return 0; - err: +err_opcap_bmap: + kfree(wq->wqcfg); + +err: + put_device(conf_dev); + kfree(wq); + while (--i >= 0) { wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); conf_dev = wq_confdev(wq); put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + +err_bitmap: + kfree(idxd->wqs); + return rc; } +static void idxd_clean_engines(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_engines; i++) { + engine = idxd->engines[i]; + conf_dev = engine_confdev(engine); + put_device(conf_dev); + kfree(engine); + } + kfree(idxd->engines); +} + static int idxd_setup_engines(struct idxd_device *idxd) { struct idxd_engine *engine; @@ -261,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); if (rc < 0) { put_device(conf_dev); + kfree(engine); goto err; } @@ -274,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd) engine = idxd->engines[i]; conf_dev = engine_confdev(engine); put_device(conf_dev); + kfree(engine); } + kfree(idxd->engines); + return rc; } +static void idxd_clean_groups(struct idxd_device *idxd) +{ + struct idxd_group *group; + int i; + + for (i = 0; i < idxd->max_groups; i++) { + group = idxd->groups[i]; + put_device(group_confdev(group)); + kfree(group); + } + kfree(idxd->groups); +} + static int idxd_setup_groups(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -308,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); if (rc < 0) { put_device(conf_dev); + kfree(group); goto err; } @@ -332,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd) while (--i >= 0) { group = idxd->groups[i]; put_device(group_confdev(group)); + kfree(group); } + kfree(idxd->groups); + return rc; } static void idxd_cleanup_internals(struct idxd_device *idxd) { - int i; - - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_groups(idxd); + idxd_clean_engines(idxd); + idxd_clean_wqs(idxd); destroy_workqueue(idxd->wq); } @@ -388,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd) static int idxd_setup_internals(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int rc, i; + int rc; init_waitqueue_head(&idxd->cmd_waitq); @@ -419,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd) err_evl: destroy_workqueue(idxd->wq); err_wkq_create: - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); + idxd_clean_groups(idxd); err_group: - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); + idxd_clean_engines(idxd); err_engine: - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_wqs(idxd); err_wqs: return rc; } @@ -526,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd) idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); } +static void idxd_free(struct idxd_device *idxd) +{ + if (!idxd) + return; + + put_device(idxd_confdev(idxd)); + bitmap_free(idxd->opcap_bmap); + ida_free(&idxd_ida, idxd->id); + kfree(idxd); +} + static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) { struct device *dev = &pdev->dev; @@ -543,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); if (idxd->id < 0) - return NULL; + goto err_ida; idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); - if (!idxd->opcap_bmap) { - ida_free(&idxd_ida, idxd->id); - return NULL; - } + if (!idxd->opcap_bmap) + goto err_opcap; device_initialize(conf_dev); conf_dev->parent = dev; conf_dev->bus = &dsa_bus_type; conf_dev->type = idxd->data->dev_type; rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); - if (rc < 0) { - put_device(conf_dev); - return NULL; - } + if (rc < 0) + goto err_name; spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->cmd_lock); return idxd; + +err_name: + put_device(conf_dev); + bitmap_free(idxd->opcap_bmap); +err_opcap: + ida_free(&idxd_ida, idxd->id); +err_ida: + kfree(idxd); + + return NULL; } static int idxd_enable_system_pasid(struct idxd_device *idxd) @@ -624,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd) idxd->pasid = IOMMU_PASID_INVALID; } -static int idxd_enable_sva(struct pci_dev *pdev) -{ - int ret; - - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); - if (ret) - return ret; - - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - if (ret) - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); - - return ret; -} - -static void idxd_disable_sva(struct pci_dev *pdev) -{ - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); -} - static int idxd_probe(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; @@ -659,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - if (idxd_enable_sva(pdev)) { - dev_warn(dev, "Unable to turn on user SVA feature.\n"); - } else { - set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); + set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); - rc = idxd_enable_system_pasid(idxd); - if (rc) - dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); - else - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); - } + rc = idxd_enable_system_pasid(idxd); + if (rc) + dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } @@ -707,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd) err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(pdev); return rc; } @@ -719,71 +770,465 @@ static void idxd_cleanup(struct idxd_device *idxd) idxd_cleanup_internals(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(idxd->pdev); } -static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +/* + * Attach IDXD device to IDXD driver. + */ +static int idxd_bind(struct device_driver *drv, const char *buf) { - struct device *dev = &pdev->dev; - struct idxd_device *idxd; - struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; + const struct bus_type *bus = drv->bus; + struct device *dev; + int err = -ENODEV; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev) + err = device_driver_attach(drv, dev); + + put_device(dev); + + return err; +} + +/* + * Detach IDXD device from driver. + */ +static void idxd_unbind(struct device_driver *drv, const char *buf) +{ + const struct bus_type *bus = drv->bus; + struct device *dev; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev && dev->driver == drv) + device_release_driver(dev); + + put_device(dev); +} + +#define idxd_free_saved_configs(saved_configs, count) \ + do { \ + int i; \ + \ + for (i = 0; i < (count); i++) \ + kfree(saved_configs[i]); \ + } while (0) + +static void idxd_free_saved(struct idxd_group **saved_groups, + struct idxd_engine **saved_engines, + struct idxd_wq **saved_wqs, + struct idxd_device *idxd) +{ + if (saved_groups) + idxd_free_saved_configs(saved_groups, idxd->max_groups); + if (saved_engines) + idxd_free_saved_configs(saved_engines, idxd->max_engines); + if (saved_wqs) + idxd_free_saved_configs(saved_wqs, idxd->max_wqs); +} + +/* + * Save IDXD device configurations including engines, groups, wqs etc. + * The saved configurations can be restored when needed. + */ +static int idxd_device_config_save(struct idxd_device *idxd, + struct idxd_saved_states *idxd_saved) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd)); + + if (idxd->evl) { + memcpy(&idxd_saved->saved_evl, idxd->evl, + sizeof(struct idxd_evl)); + } + + struct idxd_group **saved_groups __free(kfree) = + kcalloc_node(idxd->max_groups, + sizeof(struct idxd_group *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_groups) + return -ENOMEM; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *saved_group __free(kfree) = + kzalloc_node(sizeof(*saved_group), GFP_KERNEL, + dev_to_node(dev)); + + if (!saved_group) { + /* Free saved groups */ + idxd_free_saved(saved_groups, NULL, NULL, idxd); + + return -ENOMEM; + } + + memcpy(saved_group, idxd->groups[i], sizeof(*saved_group)); + saved_groups[i] = no_free_ptr(saved_group); + } + + struct idxd_engine **saved_engines = + kcalloc_node(idxd->max_engines, + sizeof(struct idxd_engine *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_engines) { + /* Free saved groups */ + idxd_free_saved(saved_groups, NULL, NULL, idxd); + + return -ENOMEM; + } + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *saved_engine __free(kfree) = + kzalloc_node(sizeof(*saved_engine), GFP_KERNEL, + dev_to_node(dev)); + if (!saved_engine) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, + idxd); + + return -ENOMEM; + } + + memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine)); + saved_engines[i] = no_free_ptr(saved_engine); + } + + unsigned long *saved_wq_enable_map __free(bitmap) = + bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, + dev_to_node(dev)); + if (!saved_wq_enable_map) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, idxd); + + return -ENOMEM; + } + + bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs); + + struct idxd_wq **saved_wqs __free(kfree) = + kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), + GFP_KERNEL, dev_to_node(dev)); + if (!saved_wqs) { + /* Free saved groups and engines */ + idxd_free_saved(saved_groups, saved_engines, NULL, idxd); + + return -ENOMEM; + } + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *saved_wq __free(kfree) = + kzalloc_node(sizeof(*saved_wq), GFP_KERNEL, + dev_to_node(dev)); + struct idxd_wq *wq; + + if (!saved_wq) { + /* Free saved groups, engines, and wqs */ + idxd_free_saved(saved_groups, saved_engines, saved_wqs, + idxd); + + return -ENOMEM; + } + + if (!test_bit(i, saved_wq_enable_map)) + continue; + + wq = idxd->wqs[i]; + mutex_lock(&wq->wq_lock); + memcpy(saved_wq, wq, sizeof(*saved_wq)); + saved_wqs[i] = no_free_ptr(saved_wq); + mutex_unlock(&wq->wq_lock); + } + + /* Save configurations */ + idxd_saved->saved_groups = no_free_ptr(saved_groups); + idxd_saved->saved_engines = no_free_ptr(saved_engines); + idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map); + idxd_saved->saved_wqs = no_free_ptr(saved_wqs); + + return 0; +} + +/* + * Restore IDXD device configurations including engines, groups, wqs etc + * that were saved before. + */ +static void idxd_device_config_restore(struct idxd_device *idxd, + struct idxd_saved_states *idxd_saved) +{ + struct idxd_evl *saved_evl = &idxd_saved->saved_evl; + int i; + + idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit; + + idxd->evl->size = saved_evl->size; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *saved_group, *group; + + saved_group = idxd_saved->saved_groups[i]; + group = idxd->groups[i]; + + group->rdbufs_allowed = saved_group->rdbufs_allowed; + group->rdbufs_reserved = saved_group->rdbufs_reserved; + group->tc_a = saved_group->tc_a; + group->tc_b = saved_group->tc_b; + group->use_rdbuf_limit = saved_group->use_rdbuf_limit; + + kfree(saved_group); + } + kfree(idxd_saved->saved_groups); + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *saved_engine, *engine; + + saved_engine = idxd_saved->saved_engines[i]; + engine = idxd->engines[i]; + + engine->group = saved_engine->group; + + kfree(saved_engine); + } + kfree(idxd_saved->saved_engines); + + bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map, + idxd->max_wqs); + bitmap_free(idxd_saved->saved_wq_enable_map); + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *saved_wq, *wq; + size_t len; + + if (!test_bit(i, idxd->wq_enable_map)) + continue; + + saved_wq = idxd_saved->saved_wqs[i]; + wq = idxd->wqs[i]; + + mutex_lock(&wq->wq_lock); + + wq->group = saved_wq->group; + wq->flags = saved_wq->flags; + wq->threshold = saved_wq->threshold; + wq->size = saved_wq->size; + wq->priority = saved_wq->priority; + wq->type = saved_wq->type; + len = strlen(saved_wq->name) + 1; + strscpy(wq->name, saved_wq->name, len); + wq->max_xfer_bytes = saved_wq->max_xfer_bytes; + wq->max_batch_size = saved_wq->max_batch_size; + wq->enqcmds_retries = saved_wq->enqcmds_retries; + wq->descs = saved_wq->descs; + wq->idxd_chan = saved_wq->idxd_chan; + len = strlen(saved_wq->driver_name) + 1; + strscpy(wq->driver_name, saved_wq->driver_name, len); + + mutex_unlock(&wq->wq_lock); + + kfree(saved_wq); + } + + kfree(idxd_saved->saved_wqs); +} + +static void idxd_reset_prepare(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + struct device *dev = &idxd->pdev->dev; + const char *idxd_name; int rc; - rc = pci_enable_device(pdev); - if (rc) - return rc; + dev = &idxd->pdev->dev; + idxd_name = dev_name(idxd_confdev(idxd)); - dev_dbg(dev, "Alloc IDXD context\n"); - idxd = idxd_alloc(pdev, data); - if (!idxd) { - rc = -ENOMEM; - goto err_idxd_alloc; + struct idxd_saved_states *idxd_saved __free(kfree) = + kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!idxd_saved) { + dev_err(dev, "HALT: no memory\n"); + + return; } - dev_dbg(dev, "Mapping BARs\n"); - idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); - if (!idxd->reg_base) { - rc = -ENOMEM; - goto err_iomap; + /* Save IDXD configurations. */ + rc = idxd_device_config_save(idxd, idxd_saved); + if (rc < 0) { + dev_err(dev, "HALT: cannot save %s configs\n", idxd_name); + + return; } - dev_dbg(dev, "Set DMA masks\n"); - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + idxd->idxd_saved = no_free_ptr(idxd_saved); + + /* Save PCI device state. */ + pci_save_state(idxd->pdev); +} + +static void idxd_reset_done(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + const char *idxd_name; + struct device *dev; + int rc, i; + + if (!idxd->idxd_saved) + return; + + dev = &idxd->pdev->dev; + idxd_name = dev_name(idxd_confdev(idxd)); + + /* Restore PCI device state. */ + pci_restore_state(idxd->pdev); + + /* Unbind idxd device from driver. */ + idxd_unbind(&idxd_drv.drv, idxd_name); + + /* + * Probe PCI device without allocating or changing + * idxd software data which keeps the same as before FLR. + */ + idxd_pci_probe_alloc(idxd, NULL, NULL); + + /* Restore IDXD configurations. */ + idxd_device_config_restore(idxd, idxd->idxd_saved); + + /* Re-configure IDXD device if allowed. */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + rc = idxd_device_config(idxd); + if (rc < 0) { + dev_err(dev, "HALT: %s config fails\n", idxd_name); + goto out; + } + } + + /* Bind IDXD device to driver. */ + rc = idxd_bind(&idxd_drv.drv, idxd_name); + if (rc < 0) { + dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name); + goto out; + } + + /* Bind enabled wq in the IDXD device to driver. */ + for (i = 0; i < idxd->max_wqs; i++) { + if (test_bit(i, idxd->wq_enable_map)) { + struct idxd_wq *wq = idxd->wqs[i]; + char wq_name[32]; + + wq->state = IDXD_WQ_DISABLED; + sprintf(wq_name, "wq%d.%d", idxd->id, wq->id); + /* + * Bind to user driver depending on wq type. + * + * Currently only support user type WQ. Will support + * kernel type WQ in the future. + */ + if (wq->type == IDXD_WQT_USER) + rc = idxd_bind(&idxd_user_drv.drv, wq_name); + else + rc = -EINVAL; + if (rc < 0) { + clear_bit(i, idxd->wq_enable_map); + dev_err(dev, + "HALT: unable to re-enable wq %s\n", + dev_name(wq_confdev(wq))); + } + } + } +out: + kfree(idxd->idxd_saved); +} + +static const struct pci_error_handlers idxd_error_handler = { + .reset_prepare = idxd_reset_prepare, + .reset_done = idxd_reset_done, +}; + +/* + * Probe idxd PCI device. + * If idxd is not given, need to allocate idxd and set up its data. + * + * If idxd is given, idxd was allocated and setup already. Just need to + * configure device without re-allocating and re-configuring idxd data. + * This is useful for recovering from FLR. + */ +int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, + const struct pci_device_id *id) +{ + bool alloc_idxd = idxd ? false : true; + struct idxd_driver_data *data; + struct device *dev; + int rc; + + pdev = idxd ? idxd->pdev : pdev; + dev = &pdev->dev; + data = id ? (struct idxd_driver_data *)id->driver_data : NULL; + rc = pci_enable_device(pdev); if (rc) - goto err; + return rc; + + if (alloc_idxd) { + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev, data); + if (!idxd) { + rc = -ENOMEM; + goto err_idxd_alloc; + } + + dev_dbg(dev, "Mapping BARs\n"); + idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); + if (!idxd->reg_base) { + rc = -ENOMEM; + goto err_iomap; + } + + dev_dbg(dev, "Set DMA masks\n"); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + goto err; + } dev_dbg(dev, "Set PCI master\n"); pci_set_master(pdev); pci_set_drvdata(pdev, idxd); - idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); - rc = idxd_probe(idxd); - if (rc) { - dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); - goto err; - } + if (alloc_idxd) { + idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); + rc = idxd_probe(idxd); + if (rc) { + dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); + goto err; + } + + if (data->load_device_defaults) { + rc = data->load_device_defaults(idxd); + if (rc) + dev_warn(dev, "IDXD loading device defaults failed\n"); + } - if (data->load_device_defaults) { - rc = data->load_device_defaults(idxd); + rc = idxd_register_devices(idxd); + if (rc) { + dev_err(dev, "IDXD sysfs setup failed\n"); + goto err_dev_register; + } + + rc = idxd_device_init_debugfs(idxd); if (rc) - dev_warn(dev, "IDXD loading device defaults failed\n"); + dev_warn(dev, "IDXD debugfs failed to setup\n"); } - rc = idxd_register_devices(idxd); - if (rc) { - dev_err(dev, "IDXD sysfs setup failed\n"); - goto err_dev_register; - } + if (!alloc_idxd) { + /* Release interrupts in the IDXD device. */ + idxd_cleanup_interrupts(idxd); - rc = idxd_device_init_debugfs(idxd); - if (rc) - dev_warn(dev, "IDXD debugfs failed to setup\n"); + /* Re-enable interrupts in the IDXD device. */ + rc = idxd_setup_interrupts(idxd); + if (rc) + dev_warn(dev, "IDXD interrupts failed to setup\n"); + } dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", idxd->hw.version); - idxd->user_submission_safe = data->user_submission_safe; + if (data) + idxd->user_submission_safe = data->user_submission_safe; return 0; @@ -792,12 +1237,17 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) err: pci_iounmap(pdev, idxd->reg_base); err_iomap: - put_device(idxd_confdev(idxd)); + idxd_free(idxd); err_idxd_alloc: pci_disable_device(pdev); return rc; } +static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return idxd_pci_probe_alloc(NULL, pdev, id); +} + void idxd_wqs_quiesce(struct idxd_device *idxd) { struct idxd_wq *wq; @@ -829,7 +1279,6 @@ static void idxd_shutdown(struct pci_dev *pdev) static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); - struct idxd_irq_entry *irq_entry; idxd_unregister_devices(idxd); /* @@ -842,20 +1291,12 @@ static void idxd_remove(struct pci_dev *pdev) get_device(idxd_confdev(idxd)); device_unregister(idxd_confdev(idxd)); idxd_shutdown(pdev); - if (device_pasid_enabled(idxd)) - idxd_disable_system_pasid(idxd); idxd_device_remove_debugfs(idxd); - - irq_entry = idxd_get_ie(idxd, 0); - free_irq(irq_entry->vector, irq_entry); - pci_free_irq_vectors(pdev); + idxd_cleanup(idxd); pci_iounmap(pdev, idxd->reg_base); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(pdev); - pci_disable_device(pdev); - destroy_workqueue(idxd->wq); - perfmon_pmu_remove(idxd); put_device(idxd_confdev(idxd)); + idxd_free(idxd); + pci_disable_device(pdev); } static struct pci_driver idxd_pci_driver = { @@ -864,6 +1305,7 @@ static struct pci_driver idxd_pci_driver = { .probe = idxd_pci_probe, .remove = idxd_remove, .shutdown = idxd_shutdown, + .err_handler = &idxd_error_handler, }; static int __init idxd_init_module(void) diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index fc049c9c9892..1107db3ce0a3 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -383,15 +383,65 @@ static void process_evl_entries(struct idxd_device *idxd) mutex_unlock(&evl->lock); } +static void idxd_device_flr(struct work_struct *work) +{ + struct idxd_device *idxd = container_of(work, struct idxd_device, work); + int rc; + + /* + * IDXD device requires a Function Level Reset (FLR). + * pci_reset_function() will reset the device with FLR. + */ + rc = pci_reset_function(idxd->pdev); + if (rc) + dev_err(&idxd->pdev->dev, "FLR failed\n"); +} + +static irqreturn_t idxd_halt(struct idxd_device *idxd) +{ + union gensts_reg gensts; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + if (gensts.state == IDXD_DEVICE_STATE_HALT) { + idxd->state = IDXD_DEV_HALTED; + if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { + /* + * If we need a software reset, we will throw the work + * on a system workqueue in order to allow interrupts + * for the device command completions. + */ + INIT_WORK(&idxd->work, idxd_device_reinit); + queue_work(idxd->wq, &idxd->work); + } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) { + idxd->state = IDXD_DEV_HALTED; + idxd_mask_error_interrupts(idxd); + dev_dbg(&idxd->pdev->dev, + "idxd halted, doing FLR. After FLR, configs are restored\n"); + INIT_WORK(&idxd->work, idxd_device_flr); + queue_work(idxd->wq, &idxd->work); + + } else { + idxd->state = IDXD_DEV_HALTED; + idxd_wqs_quiesce(idxd); + idxd_wqs_unmap_portal(idxd); + idxd_device_clear_state(idxd); + dev_err(&idxd->pdev->dev, + "idxd halted, need system reset"); + + return -ENXIO; + } + } + + return IRQ_HANDLED; +} + irqreturn_t idxd_misc_thread(int vec, void *data) { struct idxd_irq_entry *irq_entry = data; struct idxd_device *idxd = ie_to_idxd(irq_entry); struct device *dev = &idxd->pdev->dev; - union gensts_reg gensts; u32 val = 0; int i; - bool err = false; u32 cause; cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); @@ -401,7 +451,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); if (cause & IDXD_INTC_HALT_STATE) - goto halt; + return idxd_halt(idxd); if (cause & IDXD_INTC_ERR) { spin_lock(&idxd->dev_lock); @@ -435,7 +485,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data) for (i = 0; i < 4; i++) dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n", i, idxd->sw_err.bits[i]); - err = true; } if (cause & IDXD_INTC_INT_HANDLE_REVOKED) { @@ -480,34 +529,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data) dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", val); - if (!err) - goto out; - -halt: - gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); - if (gensts.state == IDXD_DEVICE_STATE_HALT) { - idxd->state = IDXD_DEV_HALTED; - if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { - /* - * If we need a software reset, we will throw the work - * on a system workqueue in order to allow interrupts - * for the device command completions. - */ - INIT_WORK(&idxd->work, idxd_device_reinit); - queue_work(idxd->wq, &idxd->work); - } else { - idxd->state = IDXD_DEV_HALTED; - idxd_wqs_quiesce(idxd); - idxd_wqs_unmap_portal(idxd); - idxd_device_clear_state(idxd); - dev_err(&idxd->pdev->dev, - "idxd halted, need %s.\n", - gensts.reset_type == IDXD_DEVICE_RESET_FLR ? - "FLR" : "system reset"); - } - } - -out: return IRQ_HANDLED; } diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index c426511f2104..006ba206ab1b 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -9,6 +9,7 @@ #define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb #define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 #define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 +#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d #define DEVICE_VERSION_1 0x100 #define DEVICE_VERSION_2 0x200 diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index f706eae0e76b..9f0701021af0 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1208,9 +1208,11 @@ static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long * /* On systems where direct user submissions are not safe, we need to clear out * the BATCH capability from the capability mask in sysfs since we cannot support - * that command on such systems. + * that command on such systems. Narrow the restriction of operations with the + * BATCH opcode to only DSA version 1 devices. */ - if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe) + if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe && + confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1) clear_bit(DSA_OPCODE_BATCH % 64, &val); pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val); @@ -1979,13 +1981,3 @@ void idxd_unregister_devices(struct idxd_device *idxd) device_unregister(group_confdev(group)); } } - -int idxd_register_bus_type(void) -{ - return bus_register(&dsa_bus_type); -} - -void idxd_unregister_bus_type(void) -{ - bus_unregister(&dsa_bus_type); -} |