diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/host/pci-xgene-msi.c | 69 | ||||
-rw-r--r-- | drivers/pci/host/pcie-designware-plat.c | 2 | ||||
-rw-r--r-- | drivers/pci/host/pcie-designware.c | 7 | ||||
-rw-r--r-- | drivers/pci/host/pcie-qcom.c | 2 | ||||
-rw-r--r-- | drivers/pci/host/pcie-rockchip.c | 62 | ||||
-rw-r--r-- | drivers/pci/msi.c | 78 | ||||
-rw-r--r-- | drivers/pci/pci-mid.c | 6 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/aer_inject.c | 14 | ||||
-rw-r--r-- | drivers/pci/probe.c | 28 | ||||
-rw-r--r-- | drivers/pci/setup-res.c | 8 | ||||
-rw-r--r-- | drivers/pci/xen-pcifront.c | 6 |
11 files changed, 175 insertions, 107 deletions
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index a6456b578269..1f38d0836751 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -360,16 +360,16 @@ static void xgene_msi_isr(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static enum cpuhp_state pci_xgene_online; + static int xgene_msi_remove(struct platform_device *pdev) { - int virq, i; struct xgene_msi *msi = platform_get_drvdata(pdev); - for (i = 0; i < NR_HW_IRQS; i++) { - virq = msi->msi_groups[i].gic_irq; - if (virq != 0) - irq_set_chained_handler_and_data(virq, NULL, NULL); - } + if (pci_xgene_online) + cpuhp_remove_state(pci_xgene_online); + cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); + kfree(msi->msi_groups); kfree(msi->bitmap); @@ -427,7 +427,7 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu) return 0; } -static void xgene_msi_hwirq_free(unsigned int cpu) +static int xgene_msi_hwirq_free(unsigned int cpu) { struct xgene_msi *msi = &xgene_msi_ctrl; struct xgene_msi_group *msi_group; @@ -441,33 +441,9 @@ static void xgene_msi_hwirq_free(unsigned int cpu) irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, NULL); } + return 0; } -static int xgene_msi_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - xgene_msi_hwirq_alloc(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - xgene_msi_hwirq_free(cpu); - break; - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block xgene_msi_cpu_notifier = { - .notifier_call = xgene_msi_cpu_callback, -}; - static const struct of_device_id xgene_msi_match_table[] = { {.compatible = "apm,xgene1-msi"}, {}, @@ -478,7 +454,6 @@ static int xgene_msi_probe(struct platform_device *pdev) struct resource *res; int rc, irq_index; struct xgene_msi *xgene_msi; - unsigned int cpu; int virt_msir; u32 msi_val, msi_idx; @@ -540,28 +515,22 @@ static int xgene_msi_probe(struct platform_device *pdev) } } - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - if (xgene_msi_hwirq_alloc(cpu)) { - dev_err(&pdev->dev, "failed to register MSI handlers\n"); - cpu_notifier_register_done(); - goto error; - } - - rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); - if (rc) { - dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); - cpu_notifier_register_done(); - goto error; - } - - cpu_notifier_register_done(); + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", + xgene_msi_hwirq_alloc, NULL); + if (rc) + goto err_cpuhp; + pci_xgene_online = rc; + rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, + xgene_msi_hwirq_free); + if (rc) + goto err_cpuhp; dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); return 0; +err_cpuhp: + dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); error: xgene_msi_remove(pdev); return rc; diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index 8df6312ed300..1a02038c4640 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c @@ -3,7 +3,7 @@ * * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) * - * Authors: Joao Pinto <jpmpinto@gmail.com> + * Authors: Joao Pinto <Joao.Pinto@synopsys.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 035f50c03281..bed19994c1e9 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -637,8 +637,6 @@ int dw_pcie_host_init(struct pcie_port *pp) } } - pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); - if (pp->ops->host_init) pp->ops->host_init(pp); @@ -809,6 +807,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp) { u32 val; + /* get iATU unroll support */ + pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); + dev_dbg(pp->dev, "iATU unroll: %s\n", + pp->iatu_unroll_enabled ? "enabled" : "disabled"); + /* set the number of lanes */ val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); val &= ~PORT_LINK_MODE_MASK; diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c index ef0a84c7a588..35936409b2d4 100644 --- a/drivers/pci/host/pcie-qcom.c +++ b/drivers/pci/host/pcie-qcom.c @@ -533,11 +533,11 @@ static int qcom_pcie_probe(struct platform_device *pdev) if (IS_ERR(pcie->phy)) return PTR_ERR(pcie->phy); + pp->dev = dev; ret = pcie->ops->get_resources(pcie); if (ret) return ret; - pp->dev = dev; pp->root_bus_nr = -1; pp->ops = &qcom_pcie_dw_ops; diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index e0b22dab9b7a..e04f69beb42d 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -190,6 +190,9 @@ struct rockchip_pcie { struct reset_control *mgmt_rst; struct reset_control *mgmt_sticky_rst; struct reset_control *pipe_rst; + struct reset_control *pm_rst; + struct reset_control *aclk_rst; + struct reset_control *pclk_rst; struct clk *aclk_pcie; struct clk *aclk_perf_pcie; struct clk *hclk_pcie; @@ -408,6 +411,44 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) gpiod_set_value(rockchip->ep_gpio, 0); + err = reset_control_assert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "assert aclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "assert pclk_rst err %d\n", err); + return err; + } + + err = reset_control_assert(rockchip->pm_rst); + if (err) { + dev_err(dev, "assert pm_rst err %d\n", err); + return err; + } + + udelay(10); + + err = reset_control_deassert(rockchip->pm_rst); + if (err) { + dev_err(dev, "deassert pm_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->aclk_rst); + if (err) { + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); + return err; + } + + err = reset_control_deassert(rockchip->pclk_rst); + if (err) { + dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); + return err; + } + err = phy_init(rockchip->phy); if (err < 0) { dev_err(dev, "fail to init phy, err %d\n", err); @@ -781,6 +822,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return PTR_ERR(rockchip->pipe_rst); } + rockchip->pm_rst = devm_reset_control_get(dev, "pm"); + if (IS_ERR(rockchip->pm_rst)) { + if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pm reset property in node\n"); + return PTR_ERR(rockchip->pm_rst); + } + + rockchip->pclk_rst = devm_reset_control_get(dev, "pclk"); + if (IS_ERR(rockchip->pclk_rst)) { + if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing pclk reset property in node\n"); + return PTR_ERR(rockchip->pclk_rst); + } + + rockchip->aclk_rst = devm_reset_control_get(dev, "aclk"); + if (IS_ERR(rockchip->aclk_rst)) { + if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) + dev_err(dev, "missing aclk reset property in node\n"); + return PTR_ERR(rockchip->aclk_rst); + } + rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); if (IS_ERR(rockchip->ep_gpio)) { dev_err(dev, "missing ep-gpios property in node\n"); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index bfdd0744b686..dd27f73a45fc 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -551,14 +551,14 @@ error_attrs: } static struct msi_desc * -msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity) +msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) { struct cpumask *masks = NULL; struct msi_desc *entry; u16 control; - if (affinity) { - masks = irq_create_affinity_masks(dev->irq_affinity, nvec); + if (affd) { + masks = irq_create_affinity_masks(nvec, affd); if (!masks) pr_err("Unable to allocate affinity masks, ignoring\n"); } @@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev) * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function * @nvec: number of interrupts to allocate + * @affinity: flag to indicate cpu irq affinity mask should be set * * Setup the MSI capability structure of the device with the requested * number of interrupts. A return value of zero indicates the successful @@ -617,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev) * an error, and a positive return value indicates the number of interrupts * which could have been allocated. */ -static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) +static int msi_capability_init(struct pci_dev *dev, int nvec, + const struct irq_affinity *affd) { struct msi_desc *entry; int ret; @@ -625,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ - entry = msi_setup_entry(dev, nvec, affinity); + entry = msi_setup_entry(dev, nvec, affd); if (!entry) return -ENOMEM; @@ -689,14 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec, - bool affinity) + const struct irq_affinity *affd) { struct cpumask *curmsk, *masks = NULL; struct msi_desc *entry; int ret, i; - if (affinity) { - masks = irq_create_affinity_masks(dev->irq_affinity, nvec); + if (affd) { + masks = irq_create_affinity_masks(nvec, affd); if (!masks) pr_err("Unable to allocate affinity masks, ignoring\n"); } @@ -752,13 +754,14 @@ static void msix_program_entries(struct pci_dev *dev, * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of struct msix_entry entries * @nvec: number of @entries + * @affd: Optional pointer to enable automatic affinity assignement * * Setup the MSI-X capability structure of device function with a * single MSI-X irq. A return of zero indicates the successful setup of * requested MSI-X entries with allocated irqs or non-zero for otherwise. **/ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, - int nvec, bool affinity) + int nvec, const struct irq_affinity *affd) { int ret; u16 control; @@ -773,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, if (!base) return -ENOMEM; - ret = msix_setup_entries(dev, base, entries, nvec, affinity); + ret = msix_setup_entries(dev, base, entries, nvec, affd); if (ret) return ret; @@ -954,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev) EXPORT_SYMBOL(pci_msix_vec_count); static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, - int nvec, bool affinity) + int nvec, const struct irq_affinity *affd) { int nr_entries; int i, j; @@ -986,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); return -EINVAL; } - return msix_capability_init(dev, entries, nvec, affinity); + return msix_capability_init(dev, entries, nvec, affd); } /** @@ -1006,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, **/ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) { - return __pci_enable_msix(dev, entries, nvec, false); + return __pci_enable_msix(dev, entries, nvec, NULL); } EXPORT_SYMBOL(pci_enable_msix); @@ -1057,9 +1060,8 @@ int pci_msi_enabled(void) EXPORT_SYMBOL(pci_msi_enabled); static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, - unsigned int flags) + const struct irq_affinity *affd) { - bool affinity = flags & PCI_IRQ_AFFINITY; int nvec; int rc; @@ -1088,14 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, nvec = maxvec; for (;;) { - if (affinity) { - nvec = irq_calc_affinity_vectors(dev->irq_affinity, - nvec); + if (affd) { + nvec = irq_calc_affinity_vectors(nvec, affd); if (nvec < minvec) return -ENOSPC; } - rc = msi_capability_init(dev, nvec, affinity); + rc = msi_capability_init(dev, nvec, affd); if (rc == 0) return nvec; @@ -1122,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, **/ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) { - return __pci_enable_msi_range(dev, minvec, maxvec, 0); + return __pci_enable_msi_range(dev, minvec, maxvec, NULL); } EXPORT_SYMBOL(pci_enable_msi_range); static int __pci_enable_msix_range(struct pci_dev *dev, - struct msix_entry *entries, int minvec, int maxvec, - unsigned int flags) + struct msix_entry *entries, int minvec, + int maxvec, const struct irq_affinity *affd) { - bool affinity = flags & PCI_IRQ_AFFINITY; int rc, nvec = maxvec; if (maxvec < minvec) return -ERANGE; for (;;) { - if (affinity) { - nvec = irq_calc_affinity_vectors(dev->irq_affinity, - nvec); + if (affd) { + nvec = irq_calc_affinity_vectors(nvec, affd); if (nvec < minvec) return -ENOSPC; } - rc = __pci_enable_msix(dev, entries, nvec, affinity); + rc = __pci_enable_msix(dev, entries, nvec, affd); if (rc == 0) return nvec; @@ -1175,16 +1174,17 @@ static int __pci_enable_msix_range(struct pci_dev *dev, int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { - return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0); + return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL); } EXPORT_SYMBOL(pci_enable_msix_range); /** - * pci_alloc_irq_vectors - allocate multiple IRQs for a device + * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device * @dev: PCI device to operate on * @min_vecs: minimum number of vectors required (must be >= 1) * @max_vecs: maximum (desired) number of vectors * @flags: flags or quirks for the allocation + * @affd: optional description of the affinity requirements * * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI * vectors if available, and fall back to a single legacy vector @@ -1196,20 +1196,30 @@ EXPORT_SYMBOL(pci_enable_msix_range); * To get the Linux IRQ number used for a vector that can be passed to * request_irq() use the pci_irq_vector() helper. */ -int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags) +int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + const struct irq_affinity *affd) { + static const struct irq_affinity msi_default_affd; int vecs = -ENOSPC; + if (flags & PCI_IRQ_AFFINITY) { + if (!affd) + affd = &msi_default_affd; + } else { + if (WARN_ON(affd)) + affd = NULL; + } + if (flags & PCI_IRQ_MSIX) { vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, - flags); + affd); if (vecs > 0) return vecs; } if (flags & PCI_IRQ_MSI) { - vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); + vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); if (vecs > 0) return vecs; } @@ -1222,7 +1232,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, return vecs; } -EXPORT_SYMBOL(pci_alloc_irq_vectors); +EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); /** * pci_free_irq_vectors - free previously allocated IRQs for a device diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 55f453de562e..c7f3408e3148 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c @@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) return intel_mid_pci_set_power_state(pdev, state); } +static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) +{ + return intel_mid_pci_get_power_state(pdev); +} + static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) { return PCI_D3hot; @@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev) static struct pci_platform_pm_ops mid_pci_platform_pm = { .is_manageable = mid_pci_power_manageable, .set_state = mid_pci_set_power_state, + .get_state = mid_pci_get_power_state, .choose_state = mid_pci_choose_state, .sleep_wake = mid_pci_sleep_wake, .run_wake = mid_pci_run_wake, diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index db553dc22c8e..2b6a59266689 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c @@ -307,20 +307,6 @@ out: return 0; } -static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) -{ - while (1) { - if (!pci_is_pcie(dev)) - break; - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - return dev; - if (!dev->bus->self) - break; - dev = dev->bus->self; - } - return NULL; -} - static int find_aer_device_iter(struct device *device, void *data) { struct pcie_device **result = data; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ab002671fa60..104c46d53121 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1439,6 +1439,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) dev_warn(&dev->dev, "PCI-X settings not supported\n"); } +static bool pcie_root_rcb_set(struct pci_dev *dev) +{ + struct pci_dev *rp = pcie_find_root_port(dev); + u16 lnkctl; + + if (!rp) + return false; + + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_RCB) + return true; + + return false; +} + static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) { int pos; @@ -1468,9 +1483,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (pcie_cap_has_lnkctl(dev)) + if (pcie_cap_has_lnkctl(dev)) { + + /* + * If the Root Port supports Read Completion Boundary of + * 128, set RCB to 128. Otherwise, clear it. + */ + hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; + hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; + if (pcie_root_rcb_set(dev)) + hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); + } /* Find Advanced Error Reporting Enhanced Capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 66c4d8f42233..9526e341988b 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource) return -EINVAL; } + /* + * If we have a shadow copy in RAM, the PCI device doesn't respond + * to the shadow range, so we don't need to claim it, and upstream + * bridges don't need to route the range to the device. + */ + if (res->flags & IORESOURCE_ROM_SHADOW) + return 0; + root = pci_find_parent_resource(dev, res); if (!root) { dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n", diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index d6ff5e82377d..8fc2e9532575 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev) err = -ENOMEM; goto out; } - err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d", - &state); - if (err != 1) - state = XenbusStateUnknown; + state = xenbus_read_unsigned(pdev->xdev->otherend, str, + XenbusStateUnknown); if (state != XenbusStateClosing) continue; |