diff options
Diffstat (limited to 'drivers/crypto/qat/qat_common/adf_isr.c')
-rw-r--r-- | drivers/crypto/qat/qat_common/adf_isr.c | 190 |
1 files changed, 83 insertions, 107 deletions
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index c678d5c531aa..40593c9449a2 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -16,46 +16,31 @@ #include "adf_transport_internal.h" #define ADF_MAX_NUM_VFS 32 -#define ADF_ERRSOU3 (0x3A000 + 0x0C) -#define ADF_ERRSOU5 (0x3A000 + 0xD8) -#define ADF_ERRMSK3 (0x3A000 + 0x1C) -#define ADF_ERRMSK5 (0x3A000 + 0xDC) -#define ADF_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9) -#define ADF_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16) static int adf_enable_msix(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 msix_num_entries = 1; + u32 msix_num_entries = hw_data->num_banks + 1; + int ret; if (hw_data->set_msix_rttable) hw_data->set_msix_rttable(accel_dev); - /* If SR-IOV is disabled, add entries for each bank */ - if (!accel_dev->pf.vf_info) { - int i; - - msix_num_entries += hw_data->num_banks; - for (i = 0; i < msix_num_entries; i++) - pci_dev_info->msix_entries.entries[i].entry = i; - } else { - pci_dev_info->msix_entries.entries[0].entry = - hw_data->num_banks; - } - - if (pci_enable_msix_exact(pci_dev_info->pci_dev, - pci_dev_info->msix_entries.entries, - msix_num_entries)) { - dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); - return -EFAULT; + ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries, + msix_num_entries, PCI_IRQ_MSIX); + if (unlikely(ret < 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to allocate %d MSI-X vectors\n", + msix_num_entries); + return ret; } return 0; } static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) { - pci_disable_msix(pci_dev_info->pci_dev); + pci_free_irq_vectors(pci_dev_info->pci_dev); } static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) @@ -80,22 +65,10 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_addr = pmisc->virt_addr; - u32 errsou3, errsou5, errmsk3, errmsk5; unsigned long vf_mask; /* Get the interrupt sources triggered by VFs */ - errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3); - errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5); - vf_mask = ADF_ERR_REG_VF2PF_L(errsou3); - vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5); - - /* To avoid adding duplicate entries to work queue, clear - * vf_int_mask_sets bits that are already masked in ERRMSK register. - */ - errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3); - errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5); - vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3); - vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5); + vf_mask = hw_data->get_vf2pf_sources(pmisc_addr); if (vf_mask) { struct adf_accel_vf_info *vf_info; @@ -135,13 +108,39 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) return IRQ_NONE; } +static void adf_free_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; + struct adf_etr_data *etr_data = accel_dev->transport; + int clust_irq = hw_data->num_banks; + int irq, i = 0; + + if (pci_dev_info->msix_entries.num_entries > 1) { + for (i = 0; i < hw_data->num_banks; i++) { + if (irqs[i].enabled) { + irq = pci_irq_vector(pci_dev_info->pci_dev, i); + irq_set_affinity_hint(irq, NULL); + free_irq(irq, &etr_data->banks[i]); + } + } + } + + if (irqs[i].enabled) { + irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); + free_irq(irq, accel_dev); + } +} + static int adf_request_irqs(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; struct adf_etr_data *etr_data = accel_dev->transport; - int ret, i = 0; + int clust_irq = hw_data->num_banks; + int ret, irq, i = 0; char *name; /* Request msix irq for all banks unless SR-IOV enabled */ @@ -150,105 +149,82 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev) struct adf_etr_bank_data *bank = &etr_data->banks[i]; unsigned int cpu, cpus = num_online_cpus(); - name = *(pci_dev_info->msix_entries.names + i); + name = irqs[i].name; snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, "qat%d-bundle%d", accel_dev->accel_id, i); - ret = request_irq(msixe[i].vector, - adf_msix_isr_bundle, 0, name, bank); + irq = pci_irq_vector(pci_dev_info->pci_dev, i); + if (unlikely(irq < 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to get IRQ number of device vector %d - %s\n", + i, name); + ret = irq; + goto err; + } + ret = request_irq(irq, adf_msix_isr_bundle, 0, + &name[0], bank); if (ret) { dev_err(&GET_DEV(accel_dev), - "failed to enable irq %d for %s\n", - msixe[i].vector, name); - return ret; + "Failed to allocate IRQ %d for %s\n", + irq, name); + goto err; } cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; - irq_set_affinity_hint(msixe[i].vector, - get_cpu_mask(cpu)); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + irqs[i].enabled = true; } } /* Request msix irq for AE */ - name = *(pci_dev_info->msix_entries.names + i); + name = irqs[i].name; snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, "qat%d-ae-cluster", accel_dev->accel_id); - ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); + irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); + if (unlikely(irq < 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to get IRQ number of device vector %d - %s\n", + i, name); + ret = irq; + goto err; + } + ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev); if (ret) { dev_err(&GET_DEV(accel_dev), - "failed to enable irq %d, for %s\n", - msixe[i].vector, name); - return ret; + "Failed to allocate IRQ %d for %s\n", irq, name); + goto err; } + irqs[i].enabled = true; + return ret; +err: + adf_free_irqs(accel_dev); return ret; } -static void adf_free_irqs(struct adf_accel_dev *accel_dev) -{ - struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct msix_entry *msixe = pci_dev_info->msix_entries.entries; - struct adf_etr_data *etr_data = accel_dev->transport; - int i = 0; - - if (pci_dev_info->msix_entries.num_entries > 1) { - for (i = 0; i < hw_data->num_banks; i++) { - irq_set_affinity_hint(msixe[i].vector, NULL); - free_irq(msixe[i].vector, &etr_data->banks[i]); - } - } - irq_set_affinity_hint(msixe[i].vector, NULL); - free_irq(msixe[i].vector, accel_dev); -} - -static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) +static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) { - int i; - char **names; - struct msix_entry *entries; struct adf_hw_device_data *hw_data = accel_dev->hw_device; u32 msix_num_entries = 1; + struct adf_irq *irqs; /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - entries = kcalloc_node(msix_num_entries, sizeof(*entries), - GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); - if (!entries) + irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), + GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); + if (!irqs) return -ENOMEM; - names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); - if (!names) { - kfree(entries); - return -ENOMEM; - } - for (i = 0; i < msix_num_entries; i++) { - *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); - if (!(*(names + i))) - goto err; - } accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; - accel_dev->accel_pci_dev.msix_entries.entries = entries; - accel_dev->accel_pci_dev.msix_entries.names = names; + accel_dev->accel_pci_dev.msix_entries.irqs = irqs; return 0; -err: - for (i = 0; i < msix_num_entries; i++) - kfree(*(names + i)); - kfree(entries); - kfree(names); - return -ENOMEM; } -static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) +static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev) { - char **names = accel_dev->accel_pci_dev.msix_entries.names; - int i; - - kfree(accel_dev->accel_pci_dev.msix_entries.entries); - for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++) - kfree(*(names + i)); - kfree(names); + kfree(accel_dev->accel_pci_dev.msix_entries.irqs); + accel_dev->accel_pci_dev.msix_entries.irqs = NULL; } static int adf_setup_bh(struct adf_accel_dev *accel_dev) @@ -287,7 +263,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev) adf_free_irqs(accel_dev); adf_cleanup_bh(accel_dev); adf_disable_msix(&accel_dev->accel_pci_dev); - adf_isr_free_msix_entry_table(accel_dev); + adf_isr_free_msix_vectors_data(accel_dev); } EXPORT_SYMBOL_GPL(adf_isr_resource_free); @@ -303,7 +279,7 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) { int ret; - ret = adf_isr_alloc_msix_entry_table(accel_dev); + ret = adf_isr_alloc_msix_vectors_data(accel_dev); if (ret) goto err_out; @@ -328,7 +304,7 @@ err_disable_msix: adf_disable_msix(&accel_dev->accel_pci_dev); err_free_msix_table: - adf_isr_free_msix_entry_table(accel_dev); + adf_isr_free_msix_vectors_data(accel_dev); err_out: return ret; |