diff options
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 8 | ||||
-rw-r--r-- | arch/x86/pci/i386.c | 4 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 6 | ||||
-rw-r--r-- | drivers/pci/pci-acpi.c | 109 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 1 |
5 files changed, 89 insertions, 39 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 0c37f16b6950..c5ef1af8e79d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -385,11 +385,13 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) return memory; - if (!dev) + if (!dev) { dev = &fallback_dev; + gfp |= GFP_DMA; + } dma_mask = dev->coherent_dma_mask; if (dma_mask == 0) - dma_mask = DMA_32BIT_MASK; + dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; /* Device not DMA able */ if (dev->dma_mask == NULL) @@ -403,7 +405,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, larger than 16MB and in this case we have a chance of finding fitting memory in the next higher zone first. If not retry with true GFP_DMA. -AK */ - if (dma_mask <= DMA_32BIT_MASK) + if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) gfp |= GFP_DMA32; #endif diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 8af0f0bae2af..10fb308fded8 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -301,15 +301,13 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, prot = pgprot_val(vma->vm_page_prot); if (pat_wc_enabled && write_combine) prot |= _PAGE_CACHE_WC; - else if (pat_wc_enabled) + else if (pat_wc_enabled || boot_cpu_data.x86 > 3) /* * ioremap() and ioremap_nocache() defaults to UC MINUS for now. * To avoid attribute conflicts, request UC MINUS here * aswell. */ prot |= _PAGE_CACHE_UC_MINUS; - else if (boot_cpu_data.x86 > 3) - prot |= _PAGE_CACHE_UC; vma->vm_page_prot = __pgprot(prot); diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 1fd8bb765702..66c0fd21894b 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -49,7 +49,7 @@ #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 -#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */ +#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) @@ -490,12 +490,12 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ {\ - unsigned long start_time = jiffies;\ + cycles_t start_time = get_cycles();\ while (1) {\ sts = op (iommu->reg + offset);\ if (cond)\ break;\ - if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))\ + if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ panic("DMAR hardware is malfunctioning\n");\ cpu_relax();\ }\ diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 72f7476930c8..9d6fc8e6285d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -19,8 +19,31 @@ #include <linux/pci-acpi.h> #include "pci.h" -static u32 ctrlset_buf[3] = {0, 0, 0}; -static u32 global_ctrlsets = 0; +struct acpi_osc_data { + acpi_handle handle; + u32 ctrlset_buf[3]; + u32 global_ctrlsets; + struct list_head sibiling; +}; +static LIST_HEAD(acpi_osc_data_list); + +static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) +{ + struct acpi_osc_data *data; + + list_for_each_entry(data, &acpi_osc_data_list, sibiling) { + if (data->handle == handle) + return data; + } + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + INIT_LIST_HEAD(&data->sibiling); + data->handle = handle; + list_add_tail(&data->sibiling, &acpi_osc_data_list); + return data; +} + static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; static acpi_status @@ -37,8 +60,27 @@ acpi_query_osc ( union acpi_object *out_obj; u32 osc_dw0; acpi_status *ret_status = (acpi_status *)retval; + struct acpi_osc_data *osc_data; + u32 flags = (unsigned long)context, temp; + acpi_handle tmp; + + status = acpi_get_handle(handle, "_OSC", &tmp); + if (ACPI_FAILURE(status)) + return status; + + osc_data = acpi_get_osc_data(handle); + if (!osc_data) { + printk(KERN_ERR "acpi osc data array is full\n"); + return AE_ERROR; + } + + osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS); + + /* do _OSC query for all possible controls */ + temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE]; + osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; - /* Setting up input parameters */ input.count = 4; input.pointer = in_params; @@ -51,13 +93,11 @@ acpi_query_osc ( in_params[2].integer.value = 3; in_params[3].type = ACPI_TYPE_BUFFER; in_params[3].buffer.length = 12; - in_params[3].buffer.pointer = (u8 *)context; + in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf; status = acpi_evaluate_object(handle, "_OSC", &input, &output); - if (ACPI_FAILURE (status)) { - *ret_status = status; - return status; - } + if (ACPI_FAILURE(status)) + goto out_nofree; out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) { @@ -76,7 +116,8 @@ acpi_query_osc ( printk(KERN_DEBUG "_OSC invalid revision\n"); if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { /* Update Global Control Set */ - global_ctrlsets = *((u32 *)(out_obj->buffer.pointer+8)); + osc_data->global_ctrlsets = + *((u32 *)(out_obj->buffer.pointer + 8)); status = AE_OK; goto query_osc_out; } @@ -85,12 +126,21 @@ acpi_query_osc ( } /* Update Global Control Set */ - global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); + osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); status = AE_OK; query_osc_out: kfree(output.pointer); +out_nofree: *ret_status = status; + + osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE; + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp; + if (ACPI_FAILURE(status)) { + /* no osc support at all */ + osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0; + } + return status; } @@ -165,28 +215,15 @@ run_osc_out: **/ acpi_status __pci_osc_support_set(u32 flags, const char *hid) { - u32 temp; - acpi_status retval; + acpi_status retval = AE_NOT_FOUND; if (!(flags & OSC_SUPPORT_MASKS)) { return AE_TYPE; } - ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS); - - /* do _OSC query for all possible controls */ - temp = ctrlset_buf[OSC_CONTROL_TYPE]; - ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; - ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; acpi_get_devices(hid, acpi_query_osc, - ctrlset_buf, + (void *)(unsigned long)flags, (void **) &retval ); - ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE; - ctrlset_buf[OSC_CONTROL_TYPE] = temp; - if (ACPI_FAILURE(retval)) { - /* no osc support at all */ - ctrlset_buf[OSC_SUPPORT_TYPE] = 0; - } return AE_OK; } @@ -201,19 +238,31 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) { acpi_status status; u32 ctrlset; + acpi_handle tmp; + struct acpi_osc_data *osc_data; + + status = acpi_get_handle(handle, "_OSC", &tmp); + if (ACPI_FAILURE(status)) + return status; + + osc_data = acpi_get_osc_data(handle); + if (!osc_data) { + printk(KERN_ERR "acpi osc data array is full\n"); + return AE_ERROR; + } ctrlset = (flags & OSC_CONTROL_MASKS); if (!ctrlset) { return AE_TYPE; } - if (ctrlset_buf[OSC_SUPPORT_TYPE] && - ((global_ctrlsets & ctrlset) != ctrlset)) { + if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] && + ((osc_data->global_ctrlsets & ctrlset) != ctrlset)) { return AE_SUPPORT; } - ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; - status = acpi_run_osc(handle, ctrlset_buf); + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; + status = acpi_run_osc(handle, osc_data->ctrlset_buf); if (ACPI_FAILURE (status)) { - ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; } return status; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index afd914ebe215..f2d9c770f51a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1826,6 +1826,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) { |