summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_tt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_tt.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c105
1 files changed, 17 insertions, 88 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index b8b6c4a5f983..7bcb89f39ce8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -34,76 +34,13 @@
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/swap.h>
+#include "drm_cache.h"
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
-#if defined(CONFIG_X86)
-static void ttm_tt_clflush_page(struct page *page)
-{
- uint8_t *page_virtual;
- unsigned int i;
-
- if (unlikely(page == NULL))
- return;
-
- page_virtual = kmap_atomic(page, KM_USER0);
-
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- clflush(page_virtual + i);
-
- kunmap_atomic(page_virtual, KM_USER0);
-}
-
-static void ttm_tt_cache_flush_clflush(struct page *pages[],
- unsigned long num_pages)
-{
- unsigned long i;
-
- mb();
- for (i = 0; i < num_pages; ++i)
- ttm_tt_clflush_page(*pages++);
- mb();
-}
-#elif !defined(__powerpc__)
-static void ttm_tt_ipi_handler(void *null)
-{
- ;
-}
-#endif
-
-void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
-{
-
-#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
- ttm_tt_cache_flush_clflush(pages, num_pages);
- return;
- }
-#elif defined(__powerpc__)
- unsigned long i;
-
- for (i = 0; i < num_pages; ++i) {
- struct page *page = pages[i];
- void *page_virtual;
-
- if (unlikely(page == NULL))
- continue;
-
- page_virtual = kmap_atomic(page, KM_USER0);
- flush_dcache_range((unsigned long) page_virtual,
- (unsigned long) page_virtual + PAGE_SIZE);
- kunmap_atomic(page_virtual, KM_USER0);
- }
-#else
- if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
- printk(KERN_ERR TTM_PFX
- "Timed out waiting for drm cache flush.\n");
-#endif
-}
-
/**
* Allocates storage for pointers to the pages that back the ttm.
*
@@ -179,7 +116,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
set_page_dirty_lock(page);
ttm->pages[i] = NULL;
- ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
+ ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
put_page(page);
}
ttm->state = tt_unpopulated;
@@ -190,8 +127,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
- struct ttm_bo_device *bdev = ttm->bdev;
- struct ttm_mem_global *mem_glob = bdev->mem_glob;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
@@ -200,21 +136,14 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
if (!p)
return NULL;
- if (PageHighMem(p)) {
- ret =
- ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
- false, false, true);
- if (unlikely(ret != 0))
- goto out_err;
+ ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (PageHighMem(p))
ttm->pages[--ttm->first_himem_page] = p;
- } else {
- ret =
- ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
- false, false, false);
- if (unlikely(ret != 0))
- goto out_err;
+ else
ttm->pages[++ttm->last_lomem_page] = p;
- }
}
return p;
out_err:
@@ -310,7 +239,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
}
if (ttm->caching_state == tt_cached)
- ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
+ drm_clflush_pages(ttm->pages, ttm->num_pages);
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
@@ -350,6 +279,7 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
return ttm_tt_set_caching(ttm, state);
}
+EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
@@ -368,8 +298,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
- ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
- PageHighMem(cur_page));
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ cur_page);
__free_page(cur_page);
}
}
@@ -414,7 +344,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
struct mm_struct *mm = tsk->mm;
int ret;
int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
- struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
BUG_ON(num_pages != ttm->num_pages);
BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
@@ -424,7 +354,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
*/
ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
- false, false, false);
+ false, false);
if (unlikely(ret != 0))
return ret;
@@ -435,7 +365,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
if (ret != num_pages && write) {
ttm_tt_free_user_pages(ttm);
- ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
return -ENOMEM;
}
@@ -459,8 +389,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
if (!ttm)
return NULL;
- ttm->bdev = bdev;
-
+ ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;