diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2012-06-01 17:20:23 +0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2012-06-04 14:47:44 +0400 |
commit | eee53537c476c947bf7faa1c916d2f5a0ae8ec93 (patch) | |
tree | 0ca87099455da1cd0bf48affee27516a4bd1fa38 /drivers/iommu | |
parent | c1bf94ec1e12d76838ad485158aecf208ebd8fb9 (diff) | |
download | linux-eee53537c476c947bf7faa1c916d2f5a0ae8ec93.tar.xz |
iommu/amd: Fix deadlock in ppr-handling error path
In the error path of the ppr_notifer it can happen that the
iommu->lock is taken recursivly. This patch fixes the
problem by releasing the iommu->lock before any notifier is
invoked. This also requires to move the erratum workaround
for the ppr-log (interrupt may be faster than data in the log)
one function up.
Cc: stable@vger.kernel.org # v3.3, v3.4
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 71 |
1 files changed, 44 insertions, 27 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d90a421e9cac..a2e418cba0ff 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) spin_unlock_irqrestore(&iommu->lock, flags); } -static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) +static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) { struct amd_iommu_fault fault; - volatile u64 *raw; - int i; INC_STATS_COUNTER(pri_requests); - raw = (u64 *)(iommu->ppr_log + head); - - /* - * Hardware bug: Interrupt may arrive before the entry is written to - * memory. If this happens we need to wait for the entry to arrive. - */ - for (i = 0; i < LOOP_TIMEOUT; ++i) { - if (PPR_REQ_TYPE(raw[0]) != 0) - break; - udelay(1); - } - if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); return; @@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) fault.tag = PPR_TAG(raw[0]); fault.flags = PPR_FLAGS(raw[0]); - /* - * To detect the hardware bug we need to clear the entry - * to back to zero. - */ - raw[0] = raw[1] = 0; - atomic_notifier_call_chain(&ppr_notifier, 0, &fault); } @@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) if (iommu->ppr_log == NULL) return; + /* enable ppr interrupts again */ + writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); + spin_lock_irqsave(&iommu->lock, flags); head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); while (head != tail) { + volatile u64 *raw; + u64 entry[2]; + int i; - /* Handle PPR entry */ - iommu_handle_ppr_entry(iommu, head); + raw = (u64 *)(iommu->ppr_log + head); + + /* + * Hardware bug: Interrupt may arrive before the entry is + * written to memory. If this happens we need to wait for the + * entry to arrive. + */ + for (i = 0; i < LOOP_TIMEOUT; ++i) { + if (PPR_REQ_TYPE(raw[0]) != 0) + break; + udelay(1); + } + + /* Avoid memcpy function-call overhead */ + entry[0] = raw[0]; + entry[1] = raw[1]; - /* Update and refresh ring-buffer state*/ + /* + * To detect the hardware bug we need to clear the entry + * back to zero. + */ + raw[0] = raw[1] = 0UL; + + /* Update head pointer of hardware ring-buffer */ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + + /* + * Release iommu->lock because ppr-handling might need to + * re-aquire it + */ + spin_unlock_irqrestore(&iommu->lock, flags); + + /* Handle PPR entry */ + iommu_handle_ppr_entry(iommu, entry); + + spin_lock_irqsave(&iommu->lock, flags); + + /* Refresh ring-buffer information */ + head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); } - /* enable ppr interrupts again */ - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); - spin_unlock_irqrestore(&iommu->lock, flags); } |