diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/Kconfig | 1 | ||||
-rw-r--r-- | drivers/pci/controller/pci-hyperv.c | 110 | ||||
-rw-r--r-- | drivers/pci/msi/irqdomain.c | 25 | ||||
-rw-r--r-- | drivers/pci/tph.c | 11 |
4 files changed, 113 insertions, 34 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 9c0e4aaf4e8c..9a249c65aedc 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -223,6 +223,7 @@ config PCI_HYPERV tristate "Hyper-V PCI Frontend" depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && SYSFS select PCI_HYPERV_INTERFACE + select IRQ_MSI_LIB help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 13680363ff19..d2b7e8ea710b 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -44,6 +44,7 @@ #include <linux/delay.h> #include <linux/semaphore.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/msi.h> #include <linux/hyperv.h> #include <linux/refcount.h> @@ -508,7 +509,6 @@ struct hv_pcibus_device { struct list_head children; struct list_head dr_list; - struct msi_domain_info msi_info; struct irq_domain *irq_domain; struct workqueue_struct *wq; @@ -576,9 +576,8 @@ struct hv_pci_compl { static void hv_pci_onchannelcallback(void *context); #ifdef CONFIG_X86 -#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED -#define FLOW_HANDLER handle_edge_irq -#define FLOW_NAME "edge" +#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED +#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_ACK static int hv_pci_irqchip_init(void) { @@ -737,8 +736,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) #define HV_PCI_MSI_SPI_START 64 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) #define DELIVERY_MODE 0 -#define FLOW_HANDLER NULL -#define FLOW_NAME NULL +#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI #define hv_msi_prepare NULL struct hv_pci_chip_data { @@ -1701,7 +1699,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, struct msi_desc *msi = irq_data_get_msi_desc(irq_data); pdev = msi_desc_to_pci_dev(msi); - hbus = info->data; + hbus = domain->host_data; int_desc = irq_data_get_irq_chip_data(irq_data); if (!int_desc) return; @@ -1719,7 +1717,6 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, static void hv_irq_mask(struct irq_data *data) { - pci_msi_mask_irq(data); if (data->parent_data->chip->irq_mask) irq_chip_mask_parent(data); } @@ -1730,7 +1727,6 @@ static void hv_irq_unmask(struct irq_data *data) if (data->parent_data->chip->irq_unmask) irq_chip_unmask_parent(data); - pci_msi_unmask_irq(data); } struct compose_comp_ctxt { @@ -2115,24 +2111,87 @@ return_null_message: msg->data = 0; } +static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + info->ops->msi_prepare = hv_msi_prepare; + + chip->irq_set_affinity = irq_chip_set_affinity_parent; + + if (IS_ENABLED(CONFIG_X86)) + chip->flags |= IRQCHIP_MOVE_DEFERRED; + + return true; +} + +#define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) +#define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_PCI_MSIX_ALLOC_DYN | \ + MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops hv_pcie_msi_parent_ops = { + .required_flags = HV_PCIE_MSI_FLAGS_REQUIRED, + .supported_flags = HV_PCIE_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = HV_MSI_CHIP_FLAGS, + .prefix = "HV-", + .init_dev_msi_info = hv_pcie_init_dev_msi_info, +}; + /* HW Interrupt Chip Descriptor */ static struct irq_chip hv_msi_irq_chip = { .name = "Hyper-V PCIe MSI", .irq_compose_msi_msg = hv_compose_msi_msg, .irq_set_affinity = irq_chip_set_affinity_parent, -#ifdef CONFIG_X86 .irq_ack = irq_chip_ack_parent, - .flags = IRQCHIP_MOVE_DEFERRED, -#elif defined(CONFIG_ARM64) .irq_eoi = irq_chip_eoi_parent, -#endif .irq_mask = hv_irq_mask, .irq_unmask = hv_irq_unmask, }; -static struct msi_domain_ops hv_msi_ops = { - .msi_prepare = hv_msi_prepare, - .msi_free = hv_msi_free, +static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, + void *arg) +{ + /* + * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg() + * should be moved here. + */ + int ret; + + ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg); + if (ret < 0) + return ret; + + for (int i = 0; i < nr_irqs; i++) { + irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL); + if (IS_ENABLED(CONFIG_X86)) + __irq_set_handler(virq + i, handle_edge_irq, 0, "edge"); + } + + return 0; +} + +static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs) +{ + struct msi_domain_info *info = d->host_data; + + for (int i = 0; i < nr_irqs; i++) + hv_msi_free(d, info, virq + i); + + irq_domain_free_irqs_top(d, virq, nr_irqs); +} + +static const struct irq_domain_ops hv_pcie_domain_ops = { + .alloc = hv_pcie_domain_alloc, + .free = hv_pcie_domain_free, }; /** @@ -2150,17 +2209,14 @@ static struct msi_domain_ops hv_msi_ops = { */ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) { - hbus->msi_info.chip = &hv_msi_irq_chip; - hbus->msi_info.ops = &hv_msi_ops; - hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | - MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_MSIX); - hbus->msi_info.handler = FLOW_HANDLER; - hbus->msi_info.handler_name = FLOW_NAME; - hbus->msi_info.data = hbus; - hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode, - &hbus->msi_info, - hv_pci_get_root_domain()); + struct irq_domain_info info = { + .fwnode = hbus->fwnode, + .ops = &hv_pcie_domain_ops, + .host_data = hbus, + .parent = hv_pci_get_root_domain(), + }; + + hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops); if (!hbus->irq_domain) { dev_err(&hbus->hdev->device, "Failed to build an MSI IRQ domain\n"); diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index c05152733993..0938ef7ebabf 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -222,13 +222,14 @@ static void pci_irq_unmask_msix(struct irq_data *data) pci_msix_unmask(irq_data_get_msi_desc(data)); } -static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, - struct msi_desc *desc) +void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc) { /* Don't fiddle with preallocated MSI descriptors */ if (!desc->pci.mask_base) msix_prepare_msi_desc(to_pci_dev(desc->dev), desc); } +EXPORT_SYMBOL_GPL(pci_msix_prepare_desc); static const struct msi_domain_template pci_msix_template = { .chip = { @@ -428,6 +429,26 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) } /** + * pci_msi_map_rid_ctlr_node - Get the MSI controller node and MSI requester id (RID) + * @pdev: The PCI device + * @node: Pointer to store the MSI controller device node + * + * Use the firmware data to find the MSI controller node for @pdev. + * If found map the RID and initialize @node with it. @node value must + * be set to NULL on entry. + * + * Returns: The RID. + */ +u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node) +{ + u32 rid = pci_dev_id(pdev); + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + + return of_msi_xlate(&pdev->dev, node, rid); +} + +/** * pci_msi_get_device_domain - Get the MSI domain for a given PCI device * @pdev: The PCI device * diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c index 77fce5e1b830..cc64f93709a4 100644 --- a/drivers/pci/tph.c +++ b/drivers/pci/tph.c @@ -168,7 +168,7 @@ static u32 get_st_table_loc(struct pci_dev *pdev) * Return the size of ST table. If ST table is not in TPH Requester Extended * Capability space, return 0. Otherwise return the ST Table Size + 1. */ -static u16 get_st_table_size(struct pci_dev *pdev) +u16 pcie_tph_get_st_table_size(struct pci_dev *pdev) { u32 reg; u32 loc; @@ -185,6 +185,7 @@ static u16 get_st_table_size(struct pci_dev *pdev) return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1; } +EXPORT_SYMBOL(pcie_tph_get_st_table_size); /* Return device's Root Port completer capability */ static u8 get_rp_completer_type(struct pci_dev *pdev) @@ -211,7 +212,7 @@ static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag) int offset; /* Check if index is out of bound */ - st_table_size = get_st_table_size(pdev); + st_table_size = pcie_tph_get_st_table_size(pdev); if (index >= st_table_size) return -ENXIO; @@ -443,7 +444,7 @@ void pci_restore_tph_state(struct pci_dev *pdev) pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++); st_entry = (u16 *)cap; offset = PCI_TPH_BASE_SIZEOF; - num_entries = get_st_table_size(pdev); + num_entries = pcie_tph_get_st_table_size(pdev); for (i = 0; i < num_entries; i++) { pci_write_config_word(pdev, pdev->tph_cap + offset, *st_entry++); @@ -475,7 +476,7 @@ void pci_save_tph_state(struct pci_dev *pdev) /* Save all ST entries in extended capability structure */ st_entry = (u16 *)cap; offset = PCI_TPH_BASE_SIZEOF; - num_entries = get_st_table_size(pdev); + num_entries = pcie_tph_get_st_table_size(pdev); for (i = 0; i < num_entries; i++) { pci_read_config_word(pdev, pdev->tph_cap + offset, st_entry++); @@ -499,7 +500,7 @@ void pci_tph_init(struct pci_dev *pdev) if (!pdev->tph_cap) return; - num_entries = get_st_table_size(pdev); + num_entries = pcie_tph_get_st_table_size(pdev); save_size = sizeof(u32) + num_entries * sizeof(u16); pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size); } |