From 27e8b237944af967e0a808580278d432cb028455 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Thu, 2 Dec 2010 10:24:13 -0500 Subject: ttm: Expand (*populate) to support an array of DMA addresses. We pass in the array of ttm pages to be populated in the GART/MM of the card (or AGP). Patch titled: "ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set." uses the DMA API to make those pages have a proper DMA addresses (in the situation where page_to_phys or virt_to_phys do not give use the DMA (bus) address). Since we are using the DMA API on those pages, we should pass in the DMA address to this function so it can save it in its proper fields (later patches use it). [v2: Added reviewed-by tag] Reviewed-by: Thomas Hellstrom Signed-off-by: Konrad Rzeszutek Wilk Tested-by: Ian Campbell --- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c') diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 288bacac7e5a..edc140ab4df1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -20,7 +20,8 @@ struct nouveau_sgdma_be { static int nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, - struct page **pages, struct page *dummy_read_page) + struct page **pages, struct page *dummy_read_page, + dma_addr_t *dma_addrs) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct drm_device *dev = nvbe->dev; -- cgit v1.2.3 From e0138c26cdeee8c033256ccd9e07d66db3c998be Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Thu, 2 Dec 2010 11:36:24 -0500 Subject: nouveau/ttm/PCIe: Use dma_addr if TTM has set it. If the TTM layer has used the DMA API to setup pages that are TTM_PAGE_FLAG_DMA32 (look at patch titled: "ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set"), lets use it when programming the GART in the PCIe type cards. This patch skips doing the pci_map_page (and pci_unmap_page) if there is a DMA addresses passed in for that page. If the dma_address is zero (or DMA_ERROR_CODE), then we continue on with our old behaviour. [v2: Added a review-by tag] Reviewed-by: Thomas Hellstrom Signed-off-by: Konrad Rzeszutek Wilk Tested-by: Ian Campbell --- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c') diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index edc140ab4df1..bbdd982cbb3e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -12,6 +12,7 @@ struct nouveau_sgdma_be { struct drm_device *dev; dma_addr_t *pages; + bool *ttm_alloced; unsigned nr_pages; unsigned pte_start; @@ -35,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, if (!nvbe->pages) return -ENOMEM; + nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); + if (!nvbe->ttm_alloced) + return -ENOMEM; + nvbe->nr_pages = 0; while (num_pages--) { - nvbe->pages[nvbe->nr_pages] = - pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, + if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { + nvbe->pages[nvbe->nr_pages] = + dma_addrs[nvbe->nr_pages]; + nvbe->ttm_alloced[nvbe->nr_pages] = true; + } else { + nvbe->pages[nvbe->nr_pages] = + pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->pdev, - nvbe->pages[nvbe->nr_pages])) { - be->func->clear(be); - return -EFAULT; + if (pci_dma_mapping_error(dev->pdev, + nvbe->pages[nvbe->nr_pages])) { + be->func->clear(be); + return -EFAULT; + } } nvbe->nr_pages++; @@ -66,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be) be->func->unbind(be); while (nvbe->nr_pages--) { - pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], + if (!nvbe->ttm_alloced[nvbe->nr_pages]) + pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } kfree(nvbe->pages); + kfree(nvbe->ttm_alloced); nvbe->pages = NULL; + nvbe->ttm_alloced = NULL; nvbe->nr_pages = 0; } } -- cgit v1.2.3