summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/nhi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/nhi.c')
-rw-r--r--drivers/thunderbolt/nhi.c306
1 files changed, 273 insertions, 33 deletions
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index a8c20413dbda..05af126a2435 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -13,7 +13,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/dmi.h>
+#include <linux/delay.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -21,6 +21,14 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
+/*
+ * Minimal number of vectors when we use MSI-X. Two for control channel
+ * Rx/Tx and the rest four are for cross domain DMA paths.
+ */
+#define MSIX_MIN_VECS 6
+#define MSIX_MAX_VECS 16
+
+#define NHI_MAILBOX_TIMEOUT 500 /* ms */
static int ring_interrupt_index(struct tb_ring *ring)
{
@@ -42,6 +50,37 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
int bit = ring_interrupt_index(ring) & 31;
int mask = 1 << bit;
u32 old, new;
+
+ if (ring->irq > 0) {
+ u32 step, shift, ivr, misc;
+ void __iomem *ivr_base;
+ int index;
+
+ if (ring->is_tx)
+ index = ring->hop;
+ else
+ index = ring->hop + ring->nhi->hop_count;
+
+ /*
+ * Ask the hardware to clear interrupt status bits automatically
+ * since we already know which interrupt was triggered.
+ */
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+ if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
+ misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
+ iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
+ }
+
+ ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
+ step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+ shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+ ivr = ioread32(ivr_base + step);
+ ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
+ if (active)
+ ivr |= ring->vector << shift;
+ iowrite32(ivr, ivr_base + step);
+ }
+
old = ioread32(ring->nhi->iobase + reg);
if (active)
new = old | mask;
@@ -239,8 +278,50 @@ int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
return ret;
}
+static irqreturn_t ring_msix(int irq, void *data)
+{
+ struct tb_ring *ring = data;
+
+ schedule_work(&ring->work);
+ return IRQ_HANDLED;
+}
+
+static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
+{
+ struct tb_nhi *nhi = ring->nhi;
+ unsigned long irqflags;
+ int ret;
+
+ if (!nhi->pdev->msix_enabled)
+ return 0;
+
+ ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ring->vector = ret;
+
+ ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
+ if (ring->irq < 0)
+ return ring->irq;
+
+ irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
+ return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+}
+
+static void ring_release_msix(struct tb_ring *ring)
+{
+ if (ring->irq <= 0)
+ return;
+
+ free_irq(ring->irq, ring);
+ ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ring->vector = 0;
+ ring->irq = 0;
+}
+
static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
- bool transmit)
+ bool transmit, unsigned int flags)
{
struct tb_ring *ring = NULL;
dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
@@ -271,9 +352,14 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
ring->hop = hop;
ring->is_tx = transmit;
ring->size = size;
+ ring->flags = flags;
ring->head = 0;
ring->tail = 0;
ring->running = false;
+
+ if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
+ goto err;
+
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
size * sizeof(*ring->descriptors),
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
@@ -295,14 +381,16 @@ err:
return NULL;
}
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags)
{
- return ring_alloc(nhi, hop, size, true);
+ return ring_alloc(nhi, hop, size, true, flags);
}
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags)
{
- return ring_alloc(nhi, hop, size, false);
+ return ring_alloc(nhi, hop, size, false, flags);
}
/**
@@ -314,6 +402,8 @@ void ring_start(struct tb_ring *ring)
{
mutex_lock(&ring->nhi->lock);
mutex_lock(&ring->lock);
+ if (ring->nhi->going_away)
+ goto err;
if (ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
goto err;
@@ -360,6 +450,8 @@ void ring_stop(struct tb_ring *ring)
mutex_lock(&ring->lock);
dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
RING_TYPE(ring), ring->hop);
+ if (ring->nhi->going_away)
+ goto err;
if (!ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
RING_TYPE(ring), ring->hop);
@@ -413,6 +505,8 @@ void ring_free(struct tb_ring *ring)
RING_TYPE(ring), ring->hop);
}
+ ring_release_msix(ring);
+
dma_free_coherent(&ring->nhi->pdev->dev,
ring->size * sizeof(*ring->descriptors),
ring->descriptors, ring->descriptors_dma);
@@ -428,15 +522,70 @@ void ring_free(struct tb_ring *ring)
mutex_unlock(&ring->nhi->lock);
/**
- * ring->work can no longer be scheduled (it is scheduled only by
- * nhi_interrupt_work and ring_stop). Wait for it to finish before
- * freeing the ring.
+ * ring->work can no longer be scheduled (it is scheduled only
+ * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
+ * to finish before freeing the ring.
*/
flush_work(&ring->work);
mutex_destroy(&ring->lock);
kfree(ring);
}
+/**
+ * nhi_mailbox_cmd() - Send a command through NHI mailbox
+ * @nhi: Pointer to the NHI structure
+ * @cmd: Command to send
+ * @data: Data to be send with the command
+ *
+ * Sends mailbox command to the firmware running on NHI. Returns %0 in
+ * case of success and negative errno in case of failure.
+ */
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
+{
+ ktime_t timeout;
+ u32 val;
+
+ iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
+
+ val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+ val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
+ val |= REG_INMAIL_OP_REQUEST | cmd;
+ iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
+
+ timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
+ do {
+ val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+ if (!(val & REG_INMAIL_OP_REQUEST))
+ break;
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ if (val & REG_INMAIL_OP_REQUEST)
+ return -ETIMEDOUT;
+ if (val & REG_INMAIL_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nhi_mailbox_mode() - Return current firmware operation mode
+ * @nhi: Pointer to the NHI structure
+ *
+ * The function reads current firmware operation mode using NHI mailbox
+ * registers and returns it to the caller.
+ */
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
+ val &= REG_OUTMAIL_CMD_OPMODE_MASK;
+ val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
+
+ return (enum nhi_fw_mode)val;
+}
+
static void nhi_interrupt_work(struct work_struct *work)
{
struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
@@ -498,16 +647,40 @@ static int nhi_suspend_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
- thunderbolt_suspend(tb);
- return 0;
+
+ return tb_domain_suspend_noirq(tb);
}
static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
- thunderbolt_resume(tb);
- return 0;
+
+ /*
+ * Check that the device is still there. It may be that the user
+ * unplugged last device which causes the host controller to go
+ * away on PCs.
+ */
+ if (!pci_device_is_present(pdev))
+ tb->nhi->going_away = true;
+
+ return tb_domain_resume_noirq(tb);
+}
+
+static int nhi_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_suspend(tb);
+}
+
+static void nhi_complete(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ tb_domain_complete(tb);
}
static void nhi_shutdown(struct tb_nhi *nhi)
@@ -528,9 +701,52 @@ static void nhi_shutdown(struct tb_nhi *nhi)
* We have to release the irq before calling flush_work. Otherwise an
* already executing IRQ handler could call schedule_work again.
*/
- devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
- flush_work(&nhi->interrupt_work);
+ if (!nhi->pdev->msix_enabled) {
+ devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
+ flush_work(&nhi->interrupt_work);
+ }
mutex_destroy(&nhi->lock);
+ ida_destroy(&nhi->msix_ida);
+}
+
+static int nhi_init_msi(struct tb_nhi *nhi)
+{
+ struct pci_dev *pdev = nhi->pdev;
+ int res, irq, nvec;
+
+ /* In case someone left them on. */
+ nhi_disable_interrupts(nhi);
+
+ ida_init(&nhi->msix_ida);
+
+ /*
+ * The NHI has 16 MSI-X vectors or a single MSI. We first try to
+ * get all MSI-X vectors and if we succeed, each ring will have
+ * one MSI-X. If for some reason that does not work out, we
+ * fallback to a single MSI.
+ */
+ nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
+ PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec < 0)
+ return nvec;
+
+ INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
+
+ irq = pci_irq_vector(nhi->pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ res = devm_request_irq(&pdev->dev, irq, nhi_msi,
+ IRQF_NO_SUSPEND, "thunderbolt", nhi);
+ if (res) {
+ dev_err(&pdev->dev, "request_irq failed, aborting\n");
+ return res;
+ }
+ }
+
+ return 0;
}
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -545,12 +761,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}
- res = pci_enable_msi(pdev);
- if (res) {
- dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
- return res;
- }
-
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
if (res) {
dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
@@ -568,7 +778,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (nhi->hop_count != 12 && nhi->hop_count != 32)
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
nhi->hop_count);
- INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -577,12 +786,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!nhi->tx_rings || !nhi->rx_rings)
return -ENOMEM;
- nhi_disable_interrupts(nhi); /* In case someone left them on. */
- res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
- IRQF_NO_SUSPEND, /* must work during _noirq */
- "thunderbolt", nhi);
+ res = nhi_init_msi(nhi);
if (res) {
- dev_err(&pdev->dev, "request_irq failed, aborting\n");
+ dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
return res;
}
@@ -593,13 +799,24 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* magic value - clock related? */
iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
- dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
- tb = thunderbolt_alloc_and_start(nhi);
+ tb = icm_probe(nhi);
+ if (!tb)
+ tb = tb_probe(nhi);
if (!tb) {
+ dev_err(&nhi->pdev->dev,
+ "failed to determine connection manager, aborting\n");
+ return -ENODEV;
+ }
+
+ dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+
+ res = tb_domain_add(tb);
+ if (res) {
/*
* At this point the RX/TX rings might already have been
* activated. Do a proper shutdown.
*/
+ tb_domain_put(tb);
nhi_shutdown(nhi);
return -EIO;
}
@@ -612,7 +829,8 @@ static void nhi_remove(struct pci_dev *pdev)
{
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
- thunderbolt_shutdown_and_free(tb);
+
+ tb_domain_remove(tb);
nhi_shutdown(nhi);
}
@@ -629,6 +847,10 @@ static const struct dev_pm_ops nhi_pm_ops = {
* pci-tunnels stay alive.
*/
.restore_noirq = nhi_resume_noirq,
+ .suspend = nhi_suspend,
+ .freeze = nhi_suspend,
+ .poweroff = nhi_suspend,
+ .complete = nhi_complete,
};
static struct pci_device_id nhi_ids[] = {
@@ -660,6 +882,17 @@ static struct pci_device_id nhi_ids[] = {
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
+
+ /* Thunderbolt 3 */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
+
{ 0,}
};
@@ -676,14 +909,21 @@ static struct pci_driver nhi_driver = {
static int __init nhi_init(void)
{
- if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
- return -ENOSYS;
- return pci_register_driver(&nhi_driver);
+ int ret;
+
+ ret = tb_domain_init();
+ if (ret)
+ return ret;
+ ret = pci_register_driver(&nhi_driver);
+ if (ret)
+ tb_domain_exit();
+ return ret;
}
static void __exit nhi_unload(void)
{
pci_unregister_driver(&nhi_driver);
+ tb_domain_exit();
}
module_init(nhi_init);