diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-20 21:15:30 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-20 21:15:30 +0400 |
commit | 43813f399c72aa22e01a680559c1cb5274bf2140 (patch) | |
tree | 933c0e7c445b9c3478b5a0db06a162d0d39f00f2 /drivers | |
parent | a552f0af753eb4b5bbbe9eff205fe874b04c4583 (diff) | |
parent | 0b7af262aba912f52bc6ef76f1bc0960b01b8502 (diff) | |
download | linux-43813f399c72aa22e01a680559c1cb5274bf2140.tar.xz |
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (24 commits)
agp/intel: Make intel_i965_mask_memory use dma_addr_t for physical addresses
agp: add user mapping support to ATI AGP bridge.
drm/i915: enable GEM on PAE.
drm/radeon: fix unused variables warning
agp: switch AGP to use page array instead of unsigned long array
agpgart: detected ALi M???? chipset with M1621
drm/radeon: command stream checker for r3xx-r5xx hardware
drm/radeon: Fully initialize LVDS info also when we can't get it from the ROM.
radeon: Fix CP byte order on big endian architectures with KMS.
agp/uninorth: Handle user memory types.
drm/ttm: Add some powerpc cache flush code.
radeon: Enable modesetting on non-x86.
drm/radeon: Respect AGP cant_use_aperture flag.
drm: EDID endianness fixes.
drm/radeon: this VRAM vs aperture test is wrong, just remove it.
drm/ttm: fix an error path to exit function correctly
drm: Apply "Memory fragmentation from lost alignment blocks"
ttm: Return -ERESTART when a signal interrupts bo eviction.
drm: Remove memory debugging infrastructure.
drm/i915: Clear fence register on tiling stride change.
...
Diffstat (limited to 'drivers')
78 files changed, 1332 insertions, 883 deletions
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 46f507531177..178e2e9e9f09 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -107,7 +107,7 @@ struct agp_bridge_driver { void (*agp_enable)(struct agp_bridge_data *, u32); void (*cleanup)(void); void (*tlb_flush)(struct agp_memory *); - unsigned long (*mask_memory)(struct agp_bridge_data *, unsigned long, int); + unsigned long (*mask_memory)(struct agp_bridge_data *, struct page *, int); void (*cache_flush)(void); int (*create_gatt_table)(struct agp_bridge_data *); int (*free_gatt_table)(struct agp_bridge_data *); @@ -115,9 +115,9 @@ struct agp_bridge_driver { int (*remove_memory)(struct agp_memory *, off_t, int); struct agp_memory *(*alloc_by_type) (size_t, int); void (*free_by_type)(struct agp_memory *); - void *(*agp_alloc_page)(struct agp_bridge_data *); + struct page *(*agp_alloc_page)(struct agp_bridge_data *); int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t); - void (*agp_destroy_page)(void *, int flags); + void (*agp_destroy_page)(struct page *, int flags); void (*agp_destroy_pages)(struct agp_memory *); int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); void (*chipset_flush)(struct agp_bridge_data *); @@ -278,10 +278,10 @@ int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type); int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type); struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); void agp_generic_free_by_type(struct agp_memory *curr); -void *agp_generic_alloc_page(struct agp_bridge_data *bridge); +struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge); int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge, struct agp_memory *memory, size_t page_count); -void agp_generic_destroy_page(void *addr, int flags); +void agp_generic_destroy_page(struct page *page, int flags); void agp_generic_destroy_pages(struct agp_memory *memory); void agp_free_key(int key); int agp_num_entries(void); @@ -291,7 +291,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge); void global_cache_flush(void); void get_agp_version(struct agp_bridge_data *bridge); unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type); + struct page *page, int type); int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, int type); struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index dc8d1a90971f..201ef3ffd484 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c @@ -141,37 +141,37 @@ static void m1541_cache_flush(void) } } -static void *m1541_alloc_page(struct agp_bridge_data *bridge) +static struct page *m1541_alloc_page(struct agp_bridge_data *bridge) { - void *addr = agp_generic_alloc_page(agp_bridge); + struct page *page = agp_generic_alloc_page(agp_bridge); u32 temp; - if (!addr) + if (!page) return NULL; pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN )); - return addr; + phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN )); + return page; } -static void ali_destroy_page(void * addr, int flags) +static void ali_destroy_page(struct page *page, int flags) { - if (addr) { + if (page) { if (flags & AGP_PAGE_DESTROY_UNMAP) { global_cache_flush(); /* is this really needed? --hch */ - agp_generic_destroy_page(addr, flags); + agp_generic_destroy_page(page, flags); } else - agp_generic_destroy_page(addr, flags); + agp_generic_destroy_page(page, flags); } } -static void m1541_destroy_page(void * addr, int flags) +static void m1541_destroy_page(struct page *page, int flags) { u32 temp; - if (addr == NULL) + if (page == NULL) return; if (flags & AGP_PAGE_DESTROY_UNMAP) { @@ -180,9 +180,9 @@ static void m1541_destroy_page(void * addr, int flags) pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN)); + phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN)); } - agp_generic_destroy_page(addr, flags); + agp_generic_destroy_page(page, flags); } @@ -346,7 +346,7 @@ found: devs[j].chipset_name = "M1641"; break; case 0x43: - devs[j].chipset_name = "M????"; + devs[j].chipset_name = "M1621"; break; case 0x47: devs[j].chipset_name = "M1647"; diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 3f98254b911f..ba9bde71eaaf 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -325,7 +325,7 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_generic_mask_memory(agp_bridge, - mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); + mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } amd_irongate_tlbflush(mem); diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index d765afda9c2a..3bf5dda90f4a 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -79,7 +79,7 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { tmp = agp_bridge->driver->mask_memory(agp_bridge, - mem->memory[i], mask_type); + mem->pages[i], mask_type); BUG_ON(tmp & 0xffffff0000000ffcULL); pte = (tmp & 0x000000ff00000000ULL) >> 28; diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index f1537eece07f..33656e144cc5 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c @@ -269,12 +269,17 @@ static int ati_insert_memory(struct agp_memory * mem, int i, j, num_entries; unsigned long __iomem *cur_gatt; unsigned long addr; + int mask_type; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; - if (type != 0 || mem->type != 0) + mask_type = agp_generic_type_to_mask_type(mem->bridge, type); + if (mask_type != 0 || type != mem->type) return -EINVAL; + if (mem->page_count == 0) + return 0; + if ((pg_start + mem->page_count) > num_entries) return -EINVAL; @@ -296,10 +301,11 @@ static int ati_insert_memory(struct agp_memory * mem, for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); - writel(agp_bridge->driver->mask_memory(agp_bridge, - mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); - readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ + writel(agp_bridge->driver->mask_memory(agp_bridge, + mem->pages[i], mem->type), + cur_gatt+GET_GATT_OFF(addr)); } + readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ agp_bridge->driver->tlb_flush(mem); return 0; } @@ -310,17 +316,22 @@ static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, int i; unsigned long __iomem *cur_gatt; unsigned long addr; + int mask_type; - if (type != 0 || mem->type != 0) + mask_type = agp_generic_type_to_mask_type(mem->bridge, type); + if (mask_type != 0 || type != mem->type) return -EINVAL; + if (mem->page_count == 0) + return 0; + for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); - readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ } + readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ agp_bridge->driver->tlb_flush(mem); return 0; } diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 8c617ad7497f..cfa5a649dfe7 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -141,17 +141,17 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) bridge->version = &agp_current_version; if (bridge->driver->needs_scratch_page) { - void *addr = bridge->driver->agp_alloc_page(bridge); + struct page *page = bridge->driver->agp_alloc_page(bridge); - if (!addr) { + if (!page) { dev_err(&bridge->dev->dev, "can't get memory for scratch page\n"); return -ENOMEM; } - bridge->scratch_page_real = virt_to_gart(addr); + bridge->scratch_page_real = phys_to_gart(page_to_phys(page)); bridge->scratch_page = - bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0); + bridge->driver->mask_memory(bridge, page, 0); } size_value = bridge->driver->fetch_size(); diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 453543a1f293..35d50f2861b6 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -65,8 +65,9 @@ static const struct gatt_mask efficeon_generic_masks[] = }; /* This function does the same thing as mask_memory() for this chipset... */ -static inline unsigned long efficeon_mask_memory(unsigned long addr) +static inline unsigned long efficeon_mask_memory(struct page *page) { + unsigned long addr = phys_to_gart(page_to_phys(page)); return addr | 0x00000001; } @@ -257,7 +258,7 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t last_page = NULL; for (i = 0; i < count; i++) { int index = pg_start + i; - unsigned long insert = efficeon_mask_memory(mem->memory[i]); + unsigned long insert = efficeon_mask_memory(mem->pages[i]); page = (unsigned int *) efficeon_private.l1_table[index >> 10]; diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 2224b762b7fb..1e8b461b91f1 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -95,13 +95,13 @@ EXPORT_SYMBOL(agp_flush_chipset); void agp_alloc_page_array(size_t size, struct agp_memory *mem) { - mem->memory = NULL; + mem->pages = NULL; mem->vmalloc_flag = false; if (size <= 2*PAGE_SIZE) - mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); - if (mem->memory == NULL) { - mem->memory = vmalloc(size); + mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); + if (mem->pages == NULL) { + mem->pages = vmalloc(size); mem->vmalloc_flag = true; } } @@ -110,9 +110,9 @@ EXPORT_SYMBOL(agp_alloc_page_array); void agp_free_page_array(struct agp_memory *mem) { if (mem->vmalloc_flag) { - vfree(mem->memory); + vfree(mem->pages); } else { - kfree(mem->memory); + kfree(mem->pages); } } EXPORT_SYMBOL(agp_free_page_array); @@ -136,7 +136,7 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) agp_alloc_page_array(alloc_size, new); - if (new->memory == NULL) { + if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; @@ -162,7 +162,7 @@ struct agp_memory *agp_create_memory(int scratch_pages) agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); - if (new->memory == NULL) { + if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; @@ -206,15 +206,13 @@ void agp_free_memory(struct agp_memory *curr) } else { for (i = 0; i < curr->page_count; i++) { - curr->memory[i] = (unsigned long)gart_to_virt( - curr->memory[i]); curr->bridge->driver->agp_destroy_page( - (void *)curr->memory[i], + curr->pages[i], AGP_PAGE_DESTROY_UNMAP); } for (i = 0; i < curr->page_count; i++) { curr->bridge->driver->agp_destroy_page( - (void *)curr->memory[i], + curr->pages[i], AGP_PAGE_DESTROY_FREE); } } @@ -282,13 +280,13 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, } for (i = 0; i < page_count; i++) { - void *addr = bridge->driver->agp_alloc_page(bridge); + struct page *page = bridge->driver->agp_alloc_page(bridge); - if (addr == NULL) { + if (page == NULL) { agp_free_memory(new); return NULL; } - new->memory[i] = virt_to_gart(addr); + new->pages[i] = page; new->page_count++; } new->bridge = bridge; @@ -1134,7 +1132,7 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type), + writel(bridge->driver->mask_memory(bridge, mem->pages[i], mask_type), bridge->gatt_table+j); } readl(bridge->gatt_table+j-1); /* PCI Posting. */ @@ -1204,7 +1202,7 @@ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) return NULL; for (i = 0; i < page_count; i++) - new->memory[i] = 0; + new->pages[i] = 0; new->page_count = 0; new->type = type; new->num_scratch_pages = pages; @@ -1237,23 +1235,20 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m get_page(page); atomic_inc(&agp_bridge->current_memory_agp); - /* set_memory_array_uc() needs virtual address */ - mem->memory[i] = (unsigned long)page_address(page); + mem->pages[i] = page; mem->page_count++; } #ifdef CONFIG_X86 - set_memory_array_uc(mem->memory, num_pages); + set_pages_array_uc(mem->pages, num_pages); #endif ret = 0; out: - for (i = 0; i < mem->page_count; i++) - mem->memory[i] = virt_to_gart((void *)mem->memory[i]); return ret; } EXPORT_SYMBOL(agp_generic_alloc_pages); -void *agp_generic_alloc_page(struct agp_bridge_data *bridge) +struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) { struct page * page; @@ -1265,56 +1260,47 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge) get_page(page); atomic_inc(&agp_bridge->current_memory_agp); - return page_address(page); + return page; } EXPORT_SYMBOL(agp_generic_alloc_page); void agp_generic_destroy_pages(struct agp_memory *mem) { int i; - void *addr; struct page *page; if (!mem) return; - for (i = 0; i < mem->page_count; i++) - mem->memory[i] = (unsigned long)gart_to_virt(mem->memory[i]); - #ifdef CONFIG_X86 - set_memory_array_wb(mem->memory, mem->page_count); + set_pages_array_wb(mem->pages, mem->page_count); #endif for (i = 0; i < mem->page_count; i++) { - addr = (void *)mem->memory[i]; - page = virt_to_page(addr); + page = mem->pages[i]; #ifndef CONFIG_X86 unmap_page_from_agp(page); #endif - put_page(page); - free_page((unsigned long)addr); + __free_page(page); atomic_dec(&agp_bridge->current_memory_agp); - mem->memory[i] = 0; + mem->pages[i] = NULL; } } EXPORT_SYMBOL(agp_generic_destroy_pages); -void agp_generic_destroy_page(void *addr, int flags) +void agp_generic_destroy_page(struct page *page, int flags) { - struct page *page; - - if (addr == NULL) + if (page == NULL) return; - page = virt_to_page(addr); if (flags & AGP_PAGE_DESTROY_UNMAP) unmap_page_from_agp(page); if (flags & AGP_PAGE_DESTROY_FREE) { put_page(page); - free_page((unsigned long)addr); + __free_page(page); atomic_dec(&agp_bridge->current_memory_agp); } } @@ -1361,8 +1347,9 @@ void global_cache_flush(void) EXPORT_SYMBOL(global_cache_flush); unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type) + struct page *page, int type) { + unsigned long addr = phys_to_gart(page_to_phys(page)); /* memory type is ignored in the generic routine */ if (bridge->driver->masks) return addr | bridge->driver->masks[0].mask; diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c index 9c7e2343c399..8f3d4c184914 100644 --- a/drivers/char/agp/hp-agp.c +++ b/drivers/char/agp/hp-agp.c @@ -361,13 +361,11 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type) for (i = 0, j = io_pg_start; i < mem->page_count; i++) { unsigned long paddr; - paddr = mem->memory[i]; + paddr = page_to_phys(mem->pages[i]); for (k = 0; k < hp->io_pages_per_kpage; k++, j++, paddr += hp->io_page_size) { - hp->gatt[j] = - agp_bridge->driver->mask_memory(agp_bridge, - paddr, type); + hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr; } } @@ -397,8 +395,9 @@ hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type) static unsigned long hp_zx1_mask_memory (struct agp_bridge_data *bridge, - unsigned long addr, int type) + struct page *page, int type) { + unsigned long addr = phys_to_gart(page_to_phys(page)); return HP_ZX1_PDIR_VALID_BIT | addr; } diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c index 10da687d131a..60cc35bb5db7 100644 --- a/drivers/char/agp/i460-agp.c +++ b/drivers/char/agp/i460-agp.c @@ -60,6 +60,9 @@ */ #define WR_FLUSH_GATT(index) RD_GATT(index) +static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, + unsigned long addr, int type); + static struct { void *gatt; /* ioremap'd GATT area */ @@ -74,6 +77,7 @@ static struct { unsigned long *alloced_map; /* bitmap of kernel-pages in use */ int refcount; /* number of kernel pages using the large page */ u64 paddr; /* physical address of large page */ + struct page *page; /* page pointer */ } *lp_desc; } i460; @@ -294,7 +298,7 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem, void *temp; pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", - mem, pg_start, type, mem->memory[0]); + mem, pg_start, type, page_to_phys(mem->pages[0])); if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) return -EINVAL; @@ -321,10 +325,9 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem, io_page_size = 1UL << I460_IO_PAGE_SHIFT; for (i = 0, j = io_pg_start; i < mem->page_count; i++) { - paddr = mem->memory[i]; + paddr = phys_to_gart(page_to_phys(mem->pages[i])); for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) - WR_GATT(j, agp_bridge->driver->mask_memory(agp_bridge, - paddr, mem->type)); + WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); } WR_FLUSH_GATT(j - 1); return 0; @@ -364,10 +367,9 @@ static int i460_alloc_large_page (struct lp_desc *lp) { unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; size_t map_size; - void *lpage; - lpage = (void *) __get_free_pages(GFP_KERNEL, order); - if (!lpage) { + lp->page = alloc_pages(GFP_KERNEL, order); + if (!lp->page) { printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); return -ENOMEM; } @@ -375,12 +377,12 @@ static int i460_alloc_large_page (struct lp_desc *lp) map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; lp->alloced_map = kzalloc(map_size, GFP_KERNEL); if (!lp->alloced_map) { - free_pages((unsigned long) lpage, order); + __free_pages(lp->page, order); printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); return -ENOMEM; } - lp->paddr = virt_to_gart(lpage); + lp->paddr = phys_to_gart(page_to_phys(lp->page)); lp->refcount = 0; atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); return 0; @@ -391,7 +393,7 @@ static void i460_free_large_page (struct lp_desc *lp) kfree(lp->alloced_map); lp->alloced_map = NULL; - free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); + __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT); atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); } @@ -439,8 +441,8 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem, if (i460_alloc_large_page(lp) < 0) return -ENOMEM; pg = lp - i460.lp_desc; - WR_GATT(pg, agp_bridge->driver->mask_memory(agp_bridge, - lp->paddr, 0)); + WR_GATT(pg, i460_mask_memory(agp_bridge, + lp->paddr, 0)); WR_FLUSH_GATT(pg); } @@ -448,7 +450,7 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem, idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++, i++) { - mem->memory[i] = lp->paddr + idx*PAGE_SIZE; + mem->pages[i] = lp->page; __set_bit(idx, lp->alloced_map); ++lp->refcount; } @@ -463,7 +465,7 @@ static int i460_remove_memory_large_io_page (struct agp_memory *mem, struct lp_desc *start, *end, *lp; void *temp; - temp = agp_bridge->driver->current_size; + temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ @@ -477,7 +479,7 @@ static int i460_remove_memory_large_io_page (struct agp_memory *mem, idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++, i++) { - mem->memory[i] = 0; + mem->pages[i] = NULL; __clear_bit(idx, lp->alloced_map); --lp->refcount; } @@ -521,7 +523,7 @@ static int i460_remove_memory (struct agp_memory *mem, * Let's just hope nobody counts on the allocated AGP memory being there before bind time * (I don't think current drivers do)... */ -static void *i460_alloc_page (struct agp_bridge_data *bridge) +static struct page *i460_alloc_page (struct agp_bridge_data *bridge) { void *page; @@ -534,7 +536,7 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge) return page; } -static void i460_destroy_page (void *page, int flags) +static void i460_destroy_page (struct page *page, int flags) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { agp_generic_destroy_page(page, flags); @@ -544,13 +546,20 @@ static void i460_destroy_page (void *page, int flags) #endif /* I460_LARGE_IO_PAGES */ static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, - unsigned long addr, int type) + unsigned long addr, int type) { /* Make sure the returned address is a valid GATT entry */ return bridge->driver->masks[0].mask | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); } +static unsigned long i460_page_mask_memory(struct agp_bridge_data *bridge, + struct page *page, int type) +{ + unsigned long addr = phys_to_gart(page_to_phys(page)); + return i460_mask_memory(bridge, addr, type); +} + const struct agp_bridge_driver intel_i460_driver = { .owner = THIS_MODULE, .aperture_sizes = i460_sizes, @@ -560,7 +569,7 @@ const struct agp_bridge_driver intel_i460_driver = { .fetch_size = i460_fetch_size, .cleanup = i460_cleanup, .tlb_flush = i460_tlb_flush, - .mask_memory = i460_mask_memory, + .mask_memory = i460_page_mask_memory, .masks = i460_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 7a748fa0dfce..8c9d50db5c3a 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -257,7 +257,7 @@ static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) } /* Exists to support ARGB cursors */ -static void *i8xx_alloc_pages(void) +static struct page *i8xx_alloc_pages(void) { struct page *page; @@ -272,17 +272,14 @@ static void *i8xx_alloc_pages(void) } get_page(page); atomic_inc(&agp_bridge->current_memory_agp); - return page_address(page); + return page; } -static void i8xx_destroy_pages(void *addr) +static void i8xx_destroy_pages(struct page *page) { - struct page *page; - - if (addr == NULL) + if (page == NULL) return; - page = virt_to_page(addr); set_pages_wb(page, 4); put_page(page); __free_pages(page, 2); @@ -346,7 +343,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, global_cache_flush(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, - mem->memory[i], + mem->pages[i], mask_type), intel_private.registers+I810_PTE_BASE+(j*4)); } @@ -389,37 +386,37 @@ static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) { struct agp_memory *new; - void *addr; + struct page *page; switch (pg_count) { - case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); + case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); break; case 4: /* kludge to get 4 physical pages for ARGB cursor */ - addr = i8xx_alloc_pages(); + page = i8xx_alloc_pages(); break; default: return NULL; } - if (addr == NULL) + if (page == NULL) return NULL; new = agp_create_memory(pg_count); if (new == NULL) return NULL; - new->memory[0] = virt_to_gart(addr); + new->pages[0] = page; if (pg_count == 4) { /* kludge to get 4 physical pages for ARGB cursor */ - new->memory[1] = new->memory[0] + PAGE_SIZE; - new->memory[2] = new->memory[1] + PAGE_SIZE; - new->memory[3] = new->memory[2] + PAGE_SIZE; + new->pages[1] = new->pages[0] + 1; + new->pages[2] = new->pages[1] + 1; + new->pages[3] = new->pages[2] + 1; } new->page_count = pg_count; new->num_scratch_pages = pg_count; new->type = AGP_PHYS_MEMORY; - new->physical = new->memory[0]; + new->physical = page_to_phys(new->pages[0]); return new; } @@ -451,13 +448,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr) agp_free_key(curr->key); if (curr->type == AGP_PHYS_MEMORY) { if (curr->page_count == 4) - i8xx_destroy_pages(gart_to_virt(curr->memory[0])); + i8xx_destroy_pages(curr->pages[0]); else { - void *va = gart_to_virt(curr->memory[0]); - - agp_bridge->driver->agp_destroy_page(va, + agp_bridge->driver->agp_destroy_page(curr->pages[0], AGP_PAGE_DESTROY_UNMAP); - agp_bridge->driver->agp_destroy_page(va, + agp_bridge->driver->agp_destroy_page(curr->pages[0], AGP_PAGE_DESTROY_FREE); } agp_free_page_array(curr); @@ -466,8 +461,9 @@ static void intel_i810_free_by_type(struct agp_memory *curr) } static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type) + struct page *page, int type) { + unsigned long addr = phys_to_gart(page_to_phys(page)); /* Type checking must be done elsewhere */ return addr | bridge->driver->masks[type].mask; } @@ -855,7 +851,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, - mem->memory[i], mask_type), + mem->pages[i], mask_type), intel_private.registers+I810_PTE_BASE+(j*4)); } readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); @@ -1085,7 +1081,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, - mem->memory[i], mask_type), intel_private.gtt+j); + mem->pages[i], mask_type), intel_private.gtt+j); } readl(intel_private.gtt+j-1); @@ -1200,8 +1196,9 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) * this conditional. */ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type) + struct page *page, int type) { + dma_addr_t addr = phys_to_gart(page_to_phys(page)); /* Shift high bits down */ addr |= (addr >> 28) & 0xf0; diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 16acee2de117..263d71dd441c 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -225,7 +225,7 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, - mem->memory[i], mask_type), + mem->pages[i], mask_type), agp_bridge->gatt_table+nvidia_private.pg_offset+j); } diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 699e3422ad93..f4bb43fb8016 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -31,6 +31,10 @@ #define AGP8X_MODE_BIT 3 #define AGP8X_MODE (1 << AGP8X_MODE_BIT) +static unsigned long +parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr, + int type); + static struct _parisc_agp_info { void __iomem *ioc_regs; void __iomem *lba_regs; @@ -149,12 +153,12 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) for (i = 0, j = io_pg_start; i < mem->page_count; i++) { unsigned long paddr; - paddr = mem->memory[i]; + paddr = page_to_phys(mem->pages[i]); for (k = 0; k < info->io_pages_per_kpage; k++, j++, paddr += info->io_page_size) { info->gatt[j] = - agp_bridge->driver->mask_memory(agp_bridge, + parisc_agp_mask_memory(agp_bridge, paddr, type); } } @@ -185,9 +189,17 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) } static unsigned long -parisc_agp_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type) +parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr, + int type) +{ + return SBA_PDIR_VALID_BIT | addr; +} + +static unsigned long +parisc_agp_page_mask_memory(struct agp_bridge_data *bridge, struct page *page, + int type) { + unsigned long addr = phys_to_gart(page_to_phys(page)); return SBA_PDIR_VALID_BIT | addr; } diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c index b972d83bb1b2..d3ea2e4226b5 100644 --- a/drivers/char/agp/sgi-agp.c +++ b/drivers/char/agp/sgi-agp.c @@ -38,7 +38,7 @@ static struct aper_size_info_fixed sgi_tioca_sizes[] = { {0, 0, 0}, }; -static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge) +static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge) { struct page *page; int nid; @@ -52,7 +52,7 @@ static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge) get_page(page); atomic_inc(&agp_bridge->current_memory_agp); - return page_address(page); + return page; } /* @@ -71,8 +71,9 @@ static void sgi_tioca_tlbflush(struct agp_memory *mem) */ static unsigned long sgi_tioca_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type) + struct page *page, int type) { + unsigned long addr = phys_to_gart(page_to_phys(page)); return tioca_physpage_to_gart(addr); } @@ -189,7 +190,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start, for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { table[j] = - bridge->driver->mask_memory(bridge, mem->memory[i], + bridge->driver->mask_memory(bridge, mem->pages[i], mem->type); } diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 6224df8b7f0a..b964a2199329 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c @@ -349,7 +349,7 @@ static int serverworks_insert_memory(struct agp_memory *mem, for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); - writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); + writel(agp_bridge->driver->mask_memory(agp_bridge, mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); } serverworks_tlbflush(mem); return 0; diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 03f95ec08f59..f192c3b9ad41 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -146,13 +146,20 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, { int i, j, num_entries; void *temp; + int mask_type; temp = agp_bridge->current_size; num_entries = A_SIZE_32(temp)->num_entries; - if (type != 0 || mem->type != 0) + if (type != mem->type) + return -EINVAL; + + mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); + if (mask_type != 0) { /* We know nothing of memory types */ return -EINVAL; + } + if ((pg_start + mem->page_count) > num_entries) return -EINVAL; @@ -166,9 +173,9 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { agp_bridge->gatt_table[j] = - cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL); - flush_dcache_range((unsigned long)__va(mem->memory[i]), - (unsigned long)__va(mem->memory[i])+0x1000); + cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL); + flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), + (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); } (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]); mb(); @@ -184,13 +191,20 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) int i, num_entries; void *temp; u32 *gp; + int mask_type; temp = agp_bridge->current_size; num_entries = A_SIZE_32(temp)->num_entries; - if (type != 0 || mem->type != 0) + if (type != mem->type) + return -EINVAL; + + mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); + if (mask_type != 0) { /* We know nothing of memory types */ return -EINVAL; + } + if ((pg_start + mem->page_count) > num_entries) return -EINVAL; @@ -205,9 +219,9 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) } for (i = 0; i < mem->page_count; i++) { - gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL; - flush_dcache_range((unsigned long)__va(mem->memory[i]), - (unsigned long)__va(mem->memory[i])+0x1000); + gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; + flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), + (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); } mb(); flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]); diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 14796594e5d9..d68888fe3df9 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -203,7 +203,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) if (!dev->agp || !dev->agp->acquired) return -EINVAL; - if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) + if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL))) return -ENOMEM; memset(entry, 0, sizeof(*entry)); @@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request->type; if (!(memory = drm_alloc_agp(dev, pages, type))) { - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + kfree(entry); return -ENOMEM; } @@ -369,7 +369,7 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) list_del(&entry->head); drm_free_agp(entry->memory, entry->pages); - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + kfree(entry); return 0; } EXPORT_SYMBOL(drm_agp_free); @@ -397,13 +397,13 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) { struct drm_agp_head *head = NULL; - if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) + if (!(head = kmalloc(sizeof(*head), GFP_KERNEL))) return NULL; memset((void *)head, 0, sizeof(*head)); head->bridge = agp_find_bridge(dev->pdev); if (!head->bridge) { if (!(head->bridge = agp_backend_acquire(dev->pdev))) { - drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); + kfree(head); return NULL; } agp_copy_info(head->bridge, &head->agp_info); @@ -412,7 +412,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) agp_copy_info(head->bridge, &head->agp_info); } if (head->agp_info.chipset == NOT_SUPPORTED) { - drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); + kfree(head); return NULL; } INIT_LIST_HEAD(&head->memory); @@ -482,7 +482,7 @@ drm_agp_bind_pages(struct drm_device *dev, } for (i = 0; i < num_pages; i++) - mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); + mem->pages[i] = pages[i]; mem->page_count = num_pages; mem->is_flushed = true; diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index ca7a9ef5007b..932b5aa96a67 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -79,7 +79,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, struct drm_device *dev = master->minor->dev; DRM_DEBUG("%d\n", magic); - entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; memset(entry, 0, sizeof(*entry)); @@ -120,7 +120,7 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) list_del(&pt->head); mutex_unlock(&dev->struct_mutex); - drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + kfree(pt); return 0; } diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 80a257554b30..6246e3f3dad7 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -151,7 +151,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, unsigned long user_token; int ret; - map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); + map = kmalloc(sizeof(*map), GFP_KERNEL); if (!map) return -ENOMEM; @@ -165,7 +165,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, * when processes fork. */ if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EINVAL; } DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", @@ -179,7 +179,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, map->size = PAGE_ALIGN(map->size); if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EINVAL; } map->mtrr = -1; @@ -191,7 +191,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) if (map->offset + (map->size-1) < map->offset || map->offset < virt_to_phys(high_memory)) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EINVAL; } #endif @@ -212,7 +212,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, list->map->size = map->size; } - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); *maplist = list; return 0; } @@ -227,7 +227,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, if (map->type == _DRM_REGISTERS) { map->handle = ioremap(map->offset, map->size); if (!map->handle) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -ENOMEM; } } @@ -243,7 +243,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, list->map->size = map->size; } - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); *maplist = list; return 0; } @@ -251,7 +251,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, DRM_DEBUG("%lu %d %p\n", map->size, drm_order(map->size), map->handle); if (!map->handle) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -ENOMEM; } map->offset = (unsigned long)map->handle; @@ -259,7 +259,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, /* Prevent a 2nd X Server from creating a 2nd lock */ if (dev->primary->master->lock.hw_lock != NULL) { vfree(map->handle); - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EBUSY; } dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ @@ -270,7 +270,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, int valid = 0; if (!drm_core_has_AGP(dev)) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EINVAL; } #ifdef __alpha__ @@ -303,7 +303,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, } } if (!list_empty(&dev->agp->memory) && !valid) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EPERM; } DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", @@ -316,7 +316,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, } case _DRM_SCATTER_GATHER: if (!dev->sg) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EINVAL; } map->offset += (unsigned long)dev->sg->virtual; @@ -328,7 +328,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, * need to point to a 64bit variable first. */ dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); if (!dmah) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -ENOMEM; } map->handle = dmah->vaddr; @@ -336,15 +336,15 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, kfree(dmah); break; default: - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EINVAL; } - list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); + list = kmalloc(sizeof(*list), GFP_KERNEL); if (!list) { if (map->type == _DRM_REGISTERS) iounmap(map->handle); - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return -EINVAL; } memset(list, 0, sizeof(*list)); @@ -362,8 +362,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, if (ret) { if (map->type == _DRM_REGISTERS) iounmap(map->handle); - drm_free(map, sizeof(*map), DRM_MEM_MAPS); - drm_free(list, sizeof(*list), DRM_MEM_MAPS); + kfree(map); + kfree(list); mutex_unlock(&dev->struct_mutex); return ret; } @@ -448,7 +448,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) list_del(&r_list->head); drm_ht_remove_key(&dev->map_hash, r_list->user_token >> PAGE_SHIFT); - drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); + kfree(r_list); found = 1; break; } @@ -491,7 +491,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) DRM_ERROR("tried to rmmap GEM object\n"); break; } - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); return 0; } @@ -582,24 +582,16 @@ static void drm_cleanup_buf_error(struct drm_device * dev, drm_pci_free(dev, entry->seglist[i]); } } - drm_free(entry->seglist, - entry->seg_count * - sizeof(*entry->seglist), DRM_MEM_SEGS); + kfree(entry->seglist); entry->seg_count = 0; } if (entry->buf_count) { for (i = 0; i < entry->buf_count; i++) { - if (entry->buflist[i].dev_private) { - drm_free(entry->buflist[i].dev_private, - entry->buflist[i].dev_priv_size, - DRM_MEM_BUFS); - } + kfree(entry->buflist[i].dev_private); } - drm_free(entry->buflist, - entry->buf_count * - sizeof(*entry->buflist), DRM_MEM_BUFS); + kfree(entry->buflist); entry->buf_count = 0; } @@ -698,8 +690,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) return -EINVAL; } - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), - DRM_MEM_BUFS); + entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); @@ -729,7 +720,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; @@ -749,10 +740,9 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) DRM_DEBUG("byte_count: %d\n", byte_count); - temp_buflist = drm_realloc(dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), DRM_MEM_BUFS); + temp_buflist = krealloc(dma->buflist, + (dma->buf_count + entry->buf_count) * + sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); @@ -854,8 +844,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) return -EINVAL; } - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), - DRM_MEM_BUFS); + entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); @@ -863,11 +852,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); - entry->seglist = drm_alloc(count * sizeof(*entry->seglist), - DRM_MEM_SEGS); + entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL); if (!entry->seglist) { - drm_free(entry->buflist, - count * sizeof(*entry->buflist), DRM_MEM_BUFS); + kfree(entry->buflist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; @@ -877,13 +864,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) /* Keep the original pagelist until we know all the allocations * have succeeded */ - temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), DRM_MEM_PAGES); + temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * + sizeof(*dma->pagelist), GFP_KERNEL); if (!temp_pagelist) { - drm_free(entry->buflist, - count * sizeof(*entry->buflist), DRM_MEM_BUFS); - drm_free(entry->seglist, - count * sizeof(*entry->seglist), DRM_MEM_SEGS); + kfree(entry->buflist); + kfree(entry->seglist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; @@ -907,9 +892,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); - drm_free(temp_pagelist, - (dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), DRM_MEM_PAGES); + kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; @@ -940,18 +923,14 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = drm_alloc(buf->dev_priv_size, - DRM_MEM_BUFS); + buf->dev_private = kmalloc(buf->dev_priv_size, + GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); - drm_free(temp_pagelist, - (dma->page_count + - (count << page_order)) - * sizeof(*dma->pagelist), - DRM_MEM_PAGES); + kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; @@ -964,16 +943,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) byte_count += PAGE_SIZE << page_order; } - temp_buflist = drm_realloc(dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), DRM_MEM_BUFS); + temp_buflist = krealloc(dma->buflist, + (dma->buf_count + entry->buf_count) * + sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); - drm_free(temp_pagelist, - (dma->page_count + (count << page_order)) - * sizeof(*dma->pagelist), DRM_MEM_PAGES); + kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); return -ENOMEM; @@ -988,9 +964,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) * with the new one. */ if (dma->page_count) { - drm_free(dma->pagelist, - dma->page_count * sizeof(*dma->pagelist), - DRM_MEM_PAGES); + kfree(dma->pagelist); } dma->pagelist = temp_pagelist; @@ -1086,8 +1060,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request return -EINVAL; } - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), - DRM_MEM_BUFS); + entry->buflist = kmalloc(count * sizeof(*entry->buflist), + GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); @@ -1118,7 +1092,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; @@ -1139,10 +1113,9 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request DRM_DEBUG("byte_count: %d\n", byte_count); - temp_buflist = drm_realloc(dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), DRM_MEM_BUFS); + temp_buflist = krealloc(dma->buflist, + (dma->buf_count + entry->buf_count) * + sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); @@ -1248,8 +1221,8 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request return -EINVAL; } - entry->buflist = drm_alloc(count * sizeof(*entry->buflist), - DRM_MEM_BUFS); + entry->buflist = kmalloc(count * sizeof(*entry->buflist), + GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); atomic_dec(&dev->buf_alloc); @@ -1279,7 +1252,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); + buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL); if (!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; @@ -1299,10 +1272,9 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request DRM_DEBUG("byte_count: %d\n", byte_count); - temp_buflist = drm_realloc(dma->buflist, - dma->buf_count * sizeof(*dma->buflist), - (dma->buf_count + entry->buf_count) - * sizeof(*dma->buflist), DRM_MEM_BUFS); + temp_buflist = krealloc(dma->buflist, + (dma->buf_count + entry->buf_count) * + sizeof(*dma->buflist), GFP_KERNEL); if (!temp_buflist) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 7d1e53c10d4b..2607753a320b 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -341,7 +341,7 @@ int drm_addctx(struct drm_device *dev, void *data, } } - ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); + ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); if (!ctx_entry) { DRM_DEBUG("out of memory\n"); return -ENOMEM; @@ -456,7 +456,7 @@ int drm_rmctx(struct drm_device *dev, void *data, list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->handle == ctx->handle) { list_del(&pos->head); - drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); + kfree(pos); --dev->ctx_count; } } diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6ce0e2667a85..2960b6d73456 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -100,15 +100,13 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, (dev->driver->driver_features & features) != features) continue; - tmp = drm_alloc(sizeof(struct drm_info_node), - _DRM_DRIVER); + tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, root, tmp, &drm_debugfs_fops); if (!ent) { DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", name, files[i].name); - drm_free(tmp, sizeof(struct drm_info_node), - _DRM_DRIVER); + kfree(tmp); ret = -1; goto fail; } @@ -196,8 +194,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, if (tmp->info_ent == &files[i]) { debugfs_remove(tmp->dent); list_del(pos); - drm_free(tmp, sizeof(struct drm_info_node), - _DRM_DRIVER); + kfree(tmp); } } } diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index 7a8e2fba4678..13f1537413fb 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -47,7 +47,7 @@ int drm_dma_setup(struct drm_device *dev) { int i; - dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); + dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL); if (!dev->dma) return -ENOMEM; @@ -88,36 +88,19 @@ void drm_dma_takedown(struct drm_device *dev) drm_pci_free(dev, dma->bufs[i].seglist[j]); } } - drm_free(dma->bufs[i].seglist, - dma->bufs[i].seg_count - * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS); + kfree(dma->bufs[i].seglist); } if (dma->bufs[i].buf_count) { for (j = 0; j < dma->bufs[i].buf_count; j++) { - if (dma->bufs[i].buflist[j].dev_private) { - drm_free(dma->bufs[i].buflist[j]. - dev_private, - dma->bufs[i].buflist[j]. - dev_priv_size, DRM_MEM_BUFS); - } + kfree(dma->bufs[i].buflist[j].dev_private); } - drm_free(dma->bufs[i].buflist, - dma->bufs[i].buf_count * - sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS); + kfree(dma->bufs[i].buflist); } } - if (dma->buflist) { - drm_free(dma->buflist, - dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); - } - - if (dma->pagelist) { - drm_free(dma->pagelist, - dma->page_count * sizeof(*dma->pagelist), - DRM_MEM_PAGES); - } - drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); + kfree(dma->buflist); + kfree(dma->pagelist); + kfree(dev->dma); dev->dma = NULL; } diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c index 80be1cab62af..c53c9768cc11 100644 --- a/drivers/gpu/drm/drm_drawable.c +++ b/drivers/gpu/drm/drm_drawable.c @@ -85,9 +85,8 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) spin_unlock_irqrestore(&dev->drw_lock, irqflags); return -EINVAL; } - drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect), - DRM_MEM_BUFS); - drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); + kfree(info->rects); + kfree(info); idr_remove(&dev->drw_idr, draw->handle); @@ -106,12 +105,12 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file info = idr_find(&dev->drw_idr, update->handle); if (!info) { - info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { DRM_ERROR("No such drawable %d\n", update->handle); - drm_free(info, sizeof(*info), DRM_MEM_BUFS); + kfree(info); return -EINVAL; } } @@ -121,8 +120,9 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file if (update->num == 0) rects = NULL; else if (update->num != info->num_rects) { - rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), - DRM_MEM_BUFS); + rects = kmalloc(update->num * + sizeof(struct drm_clip_rect), + GFP_KERNEL); } else rects = info->rects; @@ -145,8 +145,7 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file spin_lock_irqsave(&dev->drw_lock, irqflags); if (rects != info->rects) { - drm_free(info->rects, info->num_rects * - sizeof(struct drm_clip_rect), DRM_MEM_BUFS); + kfree(info->rects); } info->rects = rects; @@ -166,8 +165,7 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file error: if (rects != info->rects) - drm_free(rects, update->num * sizeof(struct drm_clip_rect), - DRM_MEM_BUFS); + kfree(rects); return err; } @@ -186,9 +184,8 @@ static int drm_drawable_free(int idr, void *p, void *data) struct drm_drawable_info *info = p; if (info) { - drm_free(info->rects, info->num_rects * - sizeof(struct drm_clip_rect), DRM_MEM_BUFS); - drm_free(info, sizeof(*info), DRM_MEM_BUFS); + kfree(info->rects); + kfree(info); } return 0; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 1bf7efd8d334..b39d7bfc0c9c 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -189,7 +189,7 @@ int drm_lastclose(struct drm_device * dev) if (entry->bound) drm_unbind_agp(entry->memory); drm_free_agp(entry->memory, entry->pages); - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + kfree(entry); } INIT_LIST_HEAD(&dev->agp->memory); @@ -208,21 +208,15 @@ int drm_lastclose(struct drm_device * dev) /* Clear vma list (only built for debugging) */ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { list_del(&vma->head); - drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + kfree(vma); } if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { - if (dev->queuelist[i]) { - drm_free(dev->queuelist[i], - sizeof(*dev->queuelist[0]), - DRM_MEM_QUEUES); - dev->queuelist[i] = NULL; - } + kfree(dev->queuelist[i]); + dev->queuelist[i] = NULL; } - drm_free(dev->queuelist, - dev->queue_slots * sizeof(*dev->queuelist), - DRM_MEM_QUEUES); + kfree(dev->queuelist); dev->queuelist = NULL; } dev->queue_count = 0; @@ -344,8 +338,6 @@ static int __init drm_core_init(void) goto err_p3; } - drm_mem_init(); - DRM_INFO("Initialized %s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 801a0d0e0810..7d0835226f6e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -252,16 +252,18 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, { struct drm_display_mode *mode; int hsize = t->hsize * 8 + 248, vsize; + unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) + >> EDID_TIMING_ASPECT_SHIFT; mode = drm_mode_create(dev); if (!mode) return NULL; - if (t->aspect_ratio == 0) + if (aspect_ratio == 0) vsize = (hsize * 10) / 16; - else if (t->aspect_ratio == 1) + else if (aspect_ratio == 1) vsize = (hsize * 3) / 4; - else if (t->aspect_ratio == 2) + else if (aspect_ratio == 2) vsize = (hsize * 4) / 5; else vsize = (hsize * 9) / 16; @@ -288,17 +290,24 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, { struct drm_display_mode *mode; struct detailed_pixel_timing *pt = &timing->data.pixel_data; + unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo; + unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; + unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; + unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; + unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo; + unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf); + unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; /* ignore tiny modes */ - if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 || - ((pt->vactive_hi << 8) | pt->hactive_lo) < 64) + if (hactive < 64 || vactive < 64) return NULL; - if (pt->stereo) { + if (pt->misc & DRM_EDID_PT_STEREO) { printk(KERN_WARNING "stereo mode not supported\n"); return NULL; } - if (!pt->separate_sync) { + if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { printk(KERN_WARNING "integrated sync not supported\n"); return NULL; } @@ -310,41 +319,36 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->type = DRM_MODE_TYPE_DRIVER; if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) - timing->pixel_clock = 1088; - - mode->clock = timing->pixel_clock * 10; - - mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo; - mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) | - pt->hsync_offset_lo); - mode->hsync_end = mode->hsync_start + - ((pt->hsync_pulse_width_hi << 8) | - pt->hsync_pulse_width_lo); - mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo); - - mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo; - mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 4) | - pt->vsync_offset_lo); - mode->vsync_end = mode->vsync_start + - ((pt->vsync_pulse_width_hi << 4) | - pt->vsync_pulse_width_lo); - mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo); + timing->pixel_clock = cpu_to_le16(1088); + + mode->clock = le16_to_cpu(timing->pixel_clock) * 10; + + mode->hdisplay = hactive; + mode->hsync_start = mode->hdisplay + hsync_offset; + mode->hsync_end = mode->hsync_start + hsync_pulse_width; + mode->htotal = mode->hdisplay + hblank; + + mode->vdisplay = vactive; + mode->vsync_start = mode->vdisplay + vsync_offset; + mode->vsync_end = mode->vsync_start + vsync_pulse_width; + mode->vtotal = mode->vdisplay + vblank; drm_mode_set_name(mode); - if (pt->interlaced) + if (pt->misc & DRM_EDID_PT_INTERLACED) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { - pt->hsync_positive = 1; - pt->vsync_positive = 1; + pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; } - mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; - mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; - mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8); - mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8); + mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; + mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; if (quirks & EDID_QUIRK_DETAILED_IN_CM) { mode->width_mm *= 10; @@ -465,7 +469,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid struct drm_display_mode *newmode; /* If std timings bytes are 1, 1 it's empty */ - if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1) + if (t->hsize == 1 && t->vfreq_aspect == 1) continue; newmode = drm_mode_std(dev, &edid->standard_timings[i]); @@ -509,7 +513,7 @@ static int add_detailed_info(struct drm_connector *connector, continue; /* First detailed mode is preferred */ - if (i == 0 && edid->preferred_timing) + if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING)) newmode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, newmode); @@ -767,22 +771,22 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) edid_fixup_preferred(connector, quirks); - connector->display_info.serration_vsync = edid->serration_vsync; - connector->display_info.sync_on_green = edid->sync_on_green; - connector->display_info.composite_sync = edid->composite_sync; - connector->display_info.separate_syncs = edid->separate_syncs; - connector->display_info.blank_to_black = edid->blank_to_black; - connector->display_info.video_level = edid->video_level; - connector->display_info.digital = edid->digital; + connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0; + connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0; + connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0; + connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0; + connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0; + connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5; + connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0; connector->display_info.width_mm = edid->width_cm * 10; connector->display_info.height_mm = edid->height_cm * 10; connector->display_info.gamma = edid->gamma; - connector->display_info.gtf_supported = edid->default_gtf; - connector->display_info.standard_color = edid->standard_color; - connector->display_info.display_type = edid->display_type; - connector->display_info.active_off_supported = edid->pm_active_off; - connector->display_info.suspend_supported = edid->pm_suspend; - connector->display_info.standby_supported = edid->pm_standby; + connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0; + connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0; + connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3; + connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0; + connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0; + connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0; connector->display_info.gamma = edid->gamma; return num_modes; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 09a3571c9908..251bc0e3b5ec 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -240,7 +240,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); - priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); + priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -328,7 +328,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, return 0; out_free: - drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + kfree(priv); filp->private_data = NULL; return ret; } @@ -471,7 +471,7 @@ int drm_release(struct inode *inode, struct file *filp) drm_ctxbitmap_free(dev, pos->handle); list_del(&pos->head); - drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); + kfree(pos); --dev->ctx_count; } } @@ -516,7 +516,7 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); - drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); + kfree(file_priv); /* ======================================================== * End inline drm_release diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ec43005100d9..8104ecaea26f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -89,7 +89,7 @@ drm_gem_init(struct drm_device *dev) atomic_set(&dev->gtt_count, 0); atomic_set(&dev->gtt_memory, 0); - mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); + mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); if (!mm) { DRM_ERROR("out of memory\n"); return -ENOMEM; @@ -98,14 +98,14 @@ drm_gem_init(struct drm_device *dev) dev->mm_private = mm; if (drm_ht_create(&mm->offset_hash, 19)) { - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); + kfree(mm); return -ENOMEM; } if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE)) { drm_ht_remove(&mm->offset_hash); - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); + kfree(mm); return -ENOMEM; } @@ -119,7 +119,7 @@ drm_gem_destroy(struct drm_device *dev) drm_mm_takedown(&mm->offset_manager); drm_ht_remove(&mm->offset_hash); - drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); + kfree(mm); dev->mm_private = NULL; } diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index ac35145c3e20..f36b21c5b2e1 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -46,8 +46,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order) ht->table = NULL; ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); if (!ht->use_vmalloc) { - ht->table = drm_calloc(ht->size, sizeof(*ht->table), - DRM_MEM_HASHTAB); + ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL); } if (!ht->table) { ht->use_vmalloc = 1; @@ -200,8 +199,7 @@ void drm_ht_remove(struct drm_open_hash *ht) if (ht->use_vmalloc) vfree(ht->table); else - drm_free(ht->table, ht->size * sizeof(*ht->table), - DRM_MEM_HASHTAB); + kfree(ht->table); ht->table = NULL; } } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 1fad76289e66..9b9ff46c2378 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -93,7 +93,7 @@ int drm_setunique(struct drm_device *dev, void *data, master->unique_len = u->unique_len; master->unique_size = u->unique_len + 1; - master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); + master->unique = kmalloc(master->unique_size, GFP_KERNEL); if (!master->unique) return -ENOMEM; if (copy_from_user(master->unique, u->unique, master->unique_len)) @@ -101,9 +101,8 @@ int drm_setunique(struct drm_device *dev, void *data, master->unique[master->unique_len] = '\0'; - dev->devname = - drm_alloc(strlen(dev->driver->pci_driver.name) + - strlen(master->unique) + 2, DRM_MEM_DRIVER); + dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) + + strlen(master->unique) + 2, GFP_KERNEL); if (!dev->devname) return -ENOMEM; @@ -138,7 +137,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) master->unique_len = 40; master->unique_size = master->unique_len; - master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER); + master->unique = kmalloc(master->unique_size, GFP_KERNEL); if (master->unique == NULL) return -ENOMEM; @@ -152,9 +151,8 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) else master->unique_len = len; - dev->devname = - drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len + - 2, DRM_MEM_DRIVER); + dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) + + master->unique_len + 2, GFP_KERNEL); if (dev->devname == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index fc8e5acd9d9a..b4a3dbcebe9b 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -104,21 +104,13 @@ void drm_vblank_cleanup(struct drm_device *dev) vblank_disable_fn((unsigned long)dev); - drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, - DRM_MEM_DRIVER); - drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * - dev->num_crtcs, DRM_MEM_DRIVER); - drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * - dev->num_crtcs, DRM_MEM_DRIVER); - drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * - dev->num_crtcs, DRM_MEM_DRIVER); - drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, - DRM_MEM_DRIVER); - drm_free(dev->last_vblank_wait, - sizeof(*dev->last_vblank_wait) * dev->num_crtcs, - DRM_MEM_DRIVER); - drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * - dev->num_crtcs, DRM_MEM_DRIVER); + kfree(dev->vbl_queue); + kfree(dev->_vblank_count); + kfree(dev->vblank_refcount); + kfree(dev->vblank_enabled); + kfree(dev->last_vblank); + kfree(dev->last_vblank_wait); + kfree(dev->vblank_inmodeset); dev->num_crtcs = 0; } @@ -132,37 +124,33 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) spin_lock_init(&dev->vbl_lock); dev->num_crtcs = num_crtcs; - dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, - DRM_MEM_DRIVER); + dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, + GFP_KERNEL); if (!dev->vbl_queue) goto err; - dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, - DRM_MEM_DRIVER); + dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL); if (!dev->_vblank_count) goto err; - dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, - DRM_MEM_DRIVER); + dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs, + GFP_KERNEL); if (!dev->vblank_refcount) goto err; - dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), - DRM_MEM_DRIVER); + dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); if (!dev->vblank_enabled) goto err; - dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); + dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); if (!dev->last_vblank) goto err; - dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32), - DRM_MEM_DRIVER); + dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL); if (!dev->last_vblank_wait) goto err; - dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), - DRM_MEM_DRIVER); + dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL); if (!dev->vblank_inmodeset) goto err; diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 0c707f533eab..e4865f99989c 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -36,15 +36,6 @@ #include <linux/highmem.h> #include "drmP.h" -#ifdef DEBUG_MEMORY -#include "drm_memory_debug.h" -#else - -/** No-op. */ -void drm_mem_init(void) -{ -} - /** * Called when "/proc/dri/%dev%/mem" is read. * @@ -64,28 +55,15 @@ int drm_mem_info(char *buf, char **start, off_t offset, return 0; } -/** Wrapper around kmalloc() and kfree() */ -void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) -{ - void *pt; - - if (!(pt = kmalloc(size, GFP_KERNEL))) - return NULL; - if (oldpt && oldsize) { - memcpy(pt, oldpt, oldsize); - kfree(oldpt); - } - return pt; -} - #if __OS_HAS_AGP static void *agp_remap(unsigned long offset, unsigned long size, struct drm_device * dev) { - unsigned long *phys_addr_map, i, num_pages = + unsigned long i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; struct drm_agp_mem *agpmem; struct page **page_map; + struct page **phys_page_map; void *addr; size = PAGE_ALIGN(size); @@ -112,10 +90,9 @@ static void *agp_remap(unsigned long offset, unsigned long size, if (!page_map) return NULL; - phys_addr_map = - agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; + phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); for (i = 0; i < num_pages; ++i) - page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); + page_map[i] = phys_page_map[i]; addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); vfree(page_map); @@ -157,8 +134,6 @@ static inline void *agp_remap(unsigned long offset, unsigned long size, #endif /* agp */ -#endif /* debug_memory */ - void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) { if (drm_core_has_AGP(dev) && diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index a912a0ff11cc..3e47869d6dae 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -187,9 +187,10 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, } - -struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, - unsigned long size, unsigned alignment) +struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, + unsigned long size, + unsigned alignment, + int atomic) { struct drm_mm_node *align_splitoff = NULL; @@ -200,7 +201,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, if (tmp) { align_splitoff = - drm_mm_split_at_start(node, alignment - tmp, 0); + drm_mm_split_at_start(node, alignment - tmp, atomic); if (unlikely(align_splitoff == NULL)) return NULL; } @@ -209,7 +210,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, list_del_init(&node->fl_entry); node->free = 0; } else { - node = drm_mm_split_at_start(node, size, 0); + node = drm_mm_split_at_start(node, size, atomic); } if (align_splitoff) @@ -217,42 +218,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, return node; } - -EXPORT_SYMBOL(drm_mm_get_block); - -struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, - unsigned long size, - unsigned alignment) -{ - - struct drm_mm_node *align_splitoff = NULL; - struct drm_mm_node *child; - unsigned tmp = 0; - - if (alignment) - tmp = parent->start % alignment; - - if (tmp) { - align_splitoff = - drm_mm_split_at_start(parent, alignment - tmp, 1); - if (unlikely(align_splitoff == NULL)) - return NULL; - } - - if (parent->size == size) { - list_del_init(&parent->fl_entry); - parent->free = 0; - return parent; - } else { - child = drm_mm_split_at_start(parent, size, 1); - } - - if (align_splitoff) - drm_mm_put_block(align_splitoff); - - return child; -} -EXPORT_SYMBOL(drm_mm_get_block_atomic); +EXPORT_SYMBOL(drm_mm_get_block_generic); /* * Put a block. Merge with the previous and / or next block if they are free. diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index b55d5bc6ea61..577094fb1995 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -55,17 +55,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali unsigned long addr; size_t sz; #endif -#ifdef DRM_DEBUG_MEMORY - int area = DRM_MEM_DMA; - - spin_lock(&drm_mem_lock); - if ((drm_ram_used >> PAGE_SHIFT) - > (DRM_RAM_PERCENT * drm_ram_available) / 100) { - spin_unlock(&drm_mem_lock); - return 0; - } - spin_unlock(&drm_mem_lock); -#endif /* pci_alloc_consistent only guarantees alignment to the smallest * PAGE_SIZE order which is greater than or equal to the requested size. @@ -86,26 +75,10 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali dmah->size = size; dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); -#ifdef DRM_DEBUG_MEMORY - if (dmah->vaddr == NULL) { - spin_lock(&drm_mem_lock); - ++drm_mem_stats[area].fail_count; - spin_unlock(&drm_mem_lock); - kfree(dmah); - return NULL; - } - - spin_lock(&drm_mem_lock); - ++drm_mem_stats[area].succeed_count; - drm_mem_stats[area].bytes_allocated += size; - drm_ram_used += size; - spin_unlock(&drm_mem_lock); -#else if (dmah->vaddr == NULL) { kfree(dmah); return NULL; } -#endif memset(dmah->vaddr, 0, size); @@ -132,17 +105,8 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) unsigned long addr; size_t sz; #endif -#ifdef DRM_DEBUG_MEMORY - int area = DRM_MEM_DMA; - int alloc_count; - int free_count; -#endif - if (!dmah->vaddr) { -#ifdef DRM_DEBUG_MEMORY - DRM_MEM_ERROR(area, "Attempt to free address 0\n"); -#endif - } else { + if (dmah->vaddr) { /* XXX - Is virt_to_page() legal for consistent mem? */ /* Unreserve */ for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; @@ -152,21 +116,6 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, dmah->busaddr); } - -#ifdef DRM_DEBUG_MEMORY - spin_lock(&drm_mem_lock); - free_count = ++drm_mem_stats[area].free_count; - alloc_count = drm_mem_stats[area].succeed_count; - drm_mem_stats[area].bytes_freed += size; - drm_ram_used -= size; - spin_unlock(&drm_mem_lock); - if (free_count > alloc_count) { - DRM_MEM_ERROR(area, - "Excess frees: %d frees, %d allocs\n", - free_count, alloc_count); - } -#endif - } /** diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index bae5391165ac..bbd4b3d1074a 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -105,13 +105,12 @@ int drm_proc_create_files(struct drm_info_list *files, int count, (dev->driver->driver_features & features) != features) continue; - tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER); + tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root); if (!ent) { DRM_ERROR("Cannot create /proc/dri/%s/%s\n", name, files[i].name); - drm_free(tmp, sizeof(struct drm_info_node), - _DRM_DRIVER); + kfree(tmp); ret = -1; goto fail; } @@ -192,8 +191,7 @@ int drm_proc_remove_files(struct drm_info_list *files, int count, remove_proc_entry(files[i].name, minor->proc_root); list_del(pos); - drm_free(tmp, sizeof(struct drm_info_node), - _DRM_DRIVER); + kfree(tmp); } } } diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index b2b0f3d41714..c7823c863d4f 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -58,11 +58,9 @@ void drm_sg_cleanup(struct drm_sg_mem * entry) vfree(entry->virtual); - drm_free(entry->busaddr, - entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); - drm_free(entry->pagelist, - entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); + kfree(entry->busaddr); + kfree(entry->pagelist); + kfree(entry); } #ifdef _LP64 @@ -84,7 +82,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) if (dev->sg) return -EINVAL; - entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; @@ -93,34 +91,27 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); entry->pages = pages; - entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), - DRM_MEM_PAGES); + entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL); if (!entry->pagelist) { - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); + kfree(entry); return -ENOMEM; } memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); - entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr), - DRM_MEM_PAGES); + entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL); if (!entry->busaddr) { - drm_free(entry->pagelist, - entry->pages * sizeof(*entry->pagelist), - DRM_MEM_PAGES); - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); + kfree(entry->pagelist); + kfree(entry); return -ENOMEM; } memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); if (!entry->virtual) { - drm_free(entry->busaddr, - entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); - drm_free(entry->pagelist, - entry->pages * sizeof(*entry->pagelist), - DRM_MEM_PAGES); - drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); + kfree(entry->busaddr); + kfree(entry->pagelist); + kfree(entry); return -ENOMEM; } diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c index 926f146390ce..463aed9403db 100644 --- a/drivers/gpu/drm/drm_sman.c +++ b/drivers/gpu/drm/drm_sman.c @@ -48,9 +48,7 @@ void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); - if (sman->mm) - drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm), - DRM_MEM_MM); + kfree(sman->mm); } EXPORT_SYMBOL(drm_sman_takedown); @@ -61,8 +59,9 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers, { int ret = 0; - sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), - DRM_MEM_MM); + sman->mm = (struct drm_sman_mm *) kcalloc(num_managers, + sizeof(*sman->mm), + GFP_KERNEL); if (!sman->mm) { ret = -ENOMEM; goto out; @@ -78,7 +77,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers, drm_ht_remove(&sman->owner_hash_tab); out1: - drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM); + kfree(sman->mm); out: return ret; } @@ -110,7 +109,7 @@ static void drm_sman_mm_destroy(void *private) { struct drm_mm *mm = (struct drm_mm *) private; drm_mm_takedown(mm); - drm_free(mm, sizeof(*mm), DRM_MEM_MM); + kfree(mm); } static unsigned long drm_sman_mm_offset(void *private, void *ref) @@ -130,7 +129,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager, BUG_ON(manager >= sman->num_managers); sman_mm = &sman->mm[manager]; - mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM); + mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { return -ENOMEM; } @@ -138,7 +137,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager, ret = drm_mm_init(mm, start, size); if (ret) { - drm_free(mm, sizeof(*mm), DRM_MEM_MM); + kfree(mm); return ret; } @@ -176,7 +175,7 @@ static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, owner_hash); } - owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM); + owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL); if (!owner_item) goto out; @@ -189,7 +188,7 @@ static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, return owner_item; out1: - drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); + kfree(owner_item); out: return NULL; } @@ -212,7 +211,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man return NULL; } - memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM); + memblock = kzalloc(sizeof(*memblock), GFP_KERNEL); if (!memblock) goto out; @@ -237,7 +236,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man out2: drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); out1: - drm_free(memblock, sizeof(*memblock), DRM_MEM_MM); + kfree(memblock); out: sman_mm->free(sman_mm->private, tmp); @@ -253,7 +252,7 @@ static void drm_sman_free(struct drm_memblock_item *item) list_del(&item->owner_list); drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); item->mm->free(item->mm->private, item->mm_info); - drm_free(item, sizeof(*item), DRM_MEM_MM); + kfree(item); } int drm_sman_free_key(struct drm_sman *sman, unsigned int key) @@ -277,7 +276,7 @@ static void drm_sman_remove_owner(struct drm_sman *sman, { list_del(&owner_item->sman_list); drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); - drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); + kfree(owner_item); } int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 387a8de1bc7e..155a5bbce680 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -107,7 +107,7 @@ struct drm_master *drm_master_create(struct drm_minor *minor) { struct drm_master *master; - master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER); + master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) return NULL; @@ -149,7 +149,7 @@ static void drm_master_destroy(struct kref *kref) } if (master->unique) { - drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); + kfree(master->unique); master->unique = NULL; master->unique_len = 0; } @@ -157,12 +157,12 @@ static void drm_master_destroy(struct kref *kref) list_for_each_entry_safe(pt, next, &master->magicfree, head) { list_del(&pt->head); drm_ht_remove_item(&master->magiclist, &pt->hash_item); - drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + kfree(pt); } drm_ht_remove(&master->magiclist); - drm_free(master, sizeof(*master), DRM_MEM_DRIVER); + kfree(master); } void drm_master_put(struct drm_master **master) @@ -390,7 +390,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, DRM_DEBUG("\n"); - dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -443,7 +443,7 @@ err_g3: err_g2: pci_disable_device(pdev); err_g1: - drm_free(dev, sizeof(*dev), DRM_MEM_STUB); + kfree(dev); return ret; } EXPORT_SYMBOL(drm_get_dev); @@ -516,7 +516,7 @@ void drm_put_dev(struct drm_device *dev) dev->driver->unload(dev); if (drm_core_has_AGP(dev) && dev->agp) { - drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + kfree(dev->agp); dev->agp = NULL; } @@ -535,10 +535,9 @@ void drm_put_dev(struct drm_device *dev) drm_put_minor(&dev->primary); if (dev->devname) { - drm_free(dev->devname, strlen(dev->devname) + 1, - DRM_MEM_DRIVER); + kfree(dev->devname); dev->devname = NULL; } - drm_free(dev, sizeof(*dev), DRM_MEM_STUB); + kfree(dev); } EXPORT_SYMBOL(drm_put_dev); diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 22f76567ac7d..7e1fbe5d4779 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -144,14 +144,14 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * Get the page, inc the use count, and return it */ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; - page = virt_to_page(__va(agpmem->memory->memory[offset])); + page = agpmem->memory->pages[offset]; get_page(page); vmf->page = page; DRM_DEBUG ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n", (unsigned long long)baddr, - __va(agpmem->memory->memory[offset]), + agpmem->memory->pages[offset], (unsigned long long)offset, page_count(page)); return 0; @@ -227,7 +227,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) found_maps++; if (pt->vma == vma) { list_del(&pt->head); - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + kfree(pt); } } @@ -273,7 +273,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) DRM_ERROR("tried to rmmap GEM object\n"); break; } - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + kfree(map); } } mutex_unlock(&dev->struct_mutex); @@ -414,7 +414,7 @@ void drm_vm_open_locked(struct vm_area_struct *vma) vma->vm_start, vma->vm_end - vma->vm_start); atomic_inc(&dev->vma_count); - vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); + vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); if (vma_entry) { vma_entry->vma = vma; vma_entry->pid = current->pid; @@ -454,7 +454,7 @@ static void drm_vm_close(struct vm_area_struct *vma) list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { if (pt->vma == vma) { list_del(&pt->head); - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + kfree(pt); break; } } diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index e5de8ea41544..7d1d88cdf2dc 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -227,8 +227,7 @@ static int i810_dma_cleanup(struct drm_device * dev) /* Need to rewrite hardware status page */ I810_WRITE(0x02080, 0x1ffff000); } - drm_free(dev->dev_private, sizeof(drm_i810_private_t), - DRM_MEM_DRIVER); + kfree(dev->dev_private); dev->dev_private = NULL; for (i = 0; i < dma->buf_count; i++) { @@ -439,8 +438,7 @@ static int i810_dma_init(struct drm_device *dev, void *data, switch (init->func) { case I810_INIT_DMA_1_4: DRM_INFO("Using v1.4 init.\n"); - dev_priv = drm_alloc(sizeof(drm_i810_private_t), - DRM_MEM_DRIVER); + dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; retcode = i810_dma_initialize(dev, dev_priv, init); diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index a86ab30b4620..877bf6cb14a4 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c @@ -232,8 +232,7 @@ static int i830_dma_cleanup(struct drm_device * dev) I830_WRITE(0x02080, 0x1ffff000); } - drm_free(dev->dev_private, sizeof(drm_i830_private_t), - DRM_MEM_DRIVER); + kfree(dev->dev_private); dev->dev_private = NULL; for (i = 0; i < dma->buf_count; i++) { @@ -459,8 +458,7 @@ static int i830_dma_init(struct drm_device *dev, void *data, switch (init->func) { case I830_INIT_DMA: - dev_priv = drm_alloc(sizeof(drm_i830_private_t), - DRM_MEM_DRIVER); + dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; retcode = i830_dma_initialize(dev, dev_priv, init); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1a60626f6803..f112c769d533 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -643,9 +643,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, return -EINVAL; if (batch->num_cliprects) { - cliprects = drm_calloc(batch->num_cliprects, - sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + cliprects = kcalloc(batch->num_cliprects, + sizeof(struct drm_clip_rect), + GFP_KERNEL); if (cliprects == NULL) return -ENOMEM; @@ -664,9 +664,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_free: - drm_free(cliprects, - batch->num_cliprects * sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + kfree(cliprects); return ret; } @@ -692,7 +690,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, if (cmdbuf->num_cliprects < 0) return -EINVAL; - batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); + batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); if (batch_data == NULL) return -ENOMEM; @@ -701,9 +699,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, goto fail_batch_free; if (cmdbuf->num_cliprects) { - cliprects = drm_calloc(cmdbuf->num_cliprects, - sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + cliprects = kcalloc(cmdbuf->num_cliprects, + sizeof(struct drm_clip_rect), GFP_KERNEL); if (cliprects == NULL) goto fail_batch_free; @@ -726,11 +723,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_clip_free: - drm_free(cliprects, - cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + kfree(cliprects); fail_batch_free: - drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); + kfree(batch_data); return ret; } @@ -1067,7 +1062,7 @@ int i915_master_create(struct drm_device *dev, struct drm_master *master) { struct drm_i915_master_private *master_priv; - master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); + master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); if (!master_priv) return -ENOMEM; @@ -1082,7 +1077,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) if (!master_priv) return; - drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); + kfree(master_priv); master->driver_priv = NULL; } @@ -1111,12 +1106,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->types[8] = _DRM_STAT_SECONDARY; dev->types[9] = _DRM_STAT_DMA; - dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); + dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; - memset(dev_priv, 0, sizeof(drm_i915_private_t)); - dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; @@ -1153,13 +1146,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) "performance may suffer.\n"); } -#ifdef CONFIG_HIGHMEM64G - /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ - dev_priv->has_gem = 0; -#else /* enable GEM by default */ dev_priv->has_gem = 1; -#endif dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ @@ -1221,7 +1209,7 @@ out_iomapfree: out_rmmap: iounmap(dev_priv->regs); free_priv: - drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); + kfree(dev_priv); return ret; } @@ -1261,8 +1249,7 @@ int i915_driver_unload(struct drm_device *dev) i915_gem_lastclose(dev); } - drm_free(dev->dev_private, sizeof(drm_i915_private_t), - DRM_MEM_DRIVER); + kfree(dev->dev_private); return 0; } @@ -1273,7 +1260,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) DRM_DEBUG_DRIVER(I915_DRV, "\n"); i915_file_priv = (struct drm_i915_file_private *) - drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); + kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); if (!i915_file_priv) return -ENOMEM; @@ -1326,7 +1313,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; - drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); + kfree(i915_file_priv); } struct drm_ioctl_desc i915_ioctls[] = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8ef6bcec211b..7a84f04e8439 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -646,6 +646,8 @@ void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev); +int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); +int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_work_handler(struct work_struct *work); void i915_gem_clflush_object(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c0ae6bbbd9b5..fd2b8bdffe3f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,7 +46,6 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); -static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static int i915_gem_evict_something(struct drm_device *dev); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, @@ -1158,7 +1157,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Need a new fence register? */ if (obj_priv->fence_reg == I915_FENCE_REG_NONE && obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj, write); + ret = i915_gem_object_get_fence_reg(obj); if (ret) { mutex_unlock(&dev->struct_mutex); return VM_FAULT_SIGBUS; @@ -1208,8 +1207,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) /* Set the object up for mmap'ing */ list = &obj->map_list; - list->map = drm_calloc(1, sizeof(struct drm_map_list), - DRM_MEM_DRIVER); + list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); if (!list->map) return -ENOMEM; @@ -1249,7 +1247,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) out_free_mm: drm_mm_put_block(list->file_offset_node); out_free_list: - drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); + kfree(list->map); return ret; } @@ -1271,7 +1269,7 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) } if (list->map) { - drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); + kfree(list->map); list->map = NULL; } @@ -1494,7 +1492,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, if (file_priv != NULL) i915_file_priv = file_priv->driver_priv; - request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); + request = kzalloc(sizeof(*request), GFP_KERNEL); if (request == NULL) return 0; @@ -1676,7 +1674,7 @@ i915_gem_retire_requests(struct drm_device *dev) list_del(&request->list); list_del(&request->client_list); - drm_free(request, sizeof(*request), DRM_MEM_DRIVER); + kfree(request); } else break; } @@ -2163,13 +2161,11 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) val |= I830_FENCE_REG_VALID; I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); - } /** * i915_gem_object_get_fence_reg - set up a fence reg for an object * @obj: object to map through a fence reg - * @write: object is about to be written * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. @@ -2180,8 +2176,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) * It then sets up the reg based on the object's properties: address, pitch * and tiling format. */ -static int -i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) +int +i915_gem_object_get_fence_reg(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2331,6 +2327,42 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) } /** + * i915_gem_object_put_fence_reg - waits on outstanding fenced access + * to the buffer to finish, and then resets the fence register. + * @obj: tiled object holding a fence register. + * + * Zeroes out the fence register itself and clears out the associated + * data structures in dev_priv and obj_priv. + */ +int +i915_gem_object_put_fence_reg(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + if (obj_priv->fence_reg == I915_FENCE_REG_NONE) + return 0; + + /* On the i915, GPU access to tiled buffers is via a fence, + * therefore we must wait for any outstanding access to complete + * before clearing the fence. + */ + if (!IS_I965G(dev)) { + int ret; + + i915_gem_object_flush_gpu_write_domain(obj); + i915_gem_object_flush_gtt_write_domain(obj); + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + } + + i915_gem_clear_fence_reg (obj); + + return 0; +} + +/** * Finds free space in the GTT aperture and binds the object there. */ static int @@ -2800,8 +2832,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) /* Free the page_cpu_valid mappings which are now stale, whether * or not we've got I915_GEM_DOMAIN_CPU. */ - drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, - DRM_MEM_DRIVER); + kfree(obj_priv->page_cpu_valid); obj_priv->page_cpu_valid = NULL; } @@ -2843,8 +2874,8 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, * newly adding I915_GEM_DOMAIN_CPU */ if (obj_priv->page_cpu_valid == NULL) { - obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, - DRM_MEM_DRIVER); + obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, + GFP_KERNEL); if (obj_priv->page_cpu_valid == NULL) return -ENOMEM; } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) @@ -3267,8 +3298,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, } if (args->num_cliprects != 0) { - cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), - DRM_MEM_DRIVER); + cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), + GFP_KERNEL); if (cliprects == NULL) goto pre_mutex_err; @@ -3521,8 +3552,7 @@ err: pre_mutex_err: drm_free_large(object_list); drm_free_large(exec_list); - drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, - DRM_MEM_DRIVER); + kfree(cliprects); return ret; } @@ -3550,7 +3580,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (!IS_I965G(dev) && obj_priv->fence_reg == I915_FENCE_REG_NONE && obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj, true); + ret = i915_gem_object_get_fence_reg(obj); if (ret != 0) { if (ret != -EBUSY && ret != -ERESTARTSYS) DRM_ERROR("Failure to install fence: %d\n", @@ -3739,7 +3769,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv; - obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); + obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL); if (obj_priv == NULL) return -ENOMEM; @@ -3777,9 +3807,9 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_free_mmap_offset(obj); - drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); + kfree(obj_priv->page_cpu_valid); kfree(obj_priv->bit_17); - drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); + kfree(obj->driver_private); } /** Unbinds all objects that are on the given buffer list. */ @@ -4233,7 +4263,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, if (dev_priv->mm.phys_objs[id - 1] || !size) return 0; - phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); + phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); if (!phys_obj) return -ENOMEM; @@ -4252,7 +4282,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, return 0; kfree_obj: - drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); + kfree(phys_obj); return ret; } @@ -4312,6 +4342,8 @@ void i915_gem_detach_phys_object(struct drm_device *dev, } drm_clflush_pages(obj_priv->pages, page_count); drm_agp_chipset_flush(dev); + + i915_gem_object_put_pages(obj); out: obj_priv->phys_obj->cur_obj = NULL; obj_priv->phys_obj = NULL; @@ -4369,6 +4401,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, kunmap_atomic(src, KM_USER0); } + i915_gem_object_put_pages(obj); + return 0; out: return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 986f1082c596..28146e405e87 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -104,7 +104,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) if (obj->name) seq_printf(m, " (name: %d)", obj->name); if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); + seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg); seq_printf(m, "\n"); } @@ -318,7 +318,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, "RingTail : %08x\n", tail); seq_printf(m, "RingMask : %08x\n", mask); seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); - seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); + seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 9a05cadaa4ad..5c1ceec49f5b 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -408,7 +408,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) if (stride & (stride - 1)) return false; - /* We don't handle the aperture area covered by the fence being bigger + /* We don't 0handle the aperture area covered by the fence being bigger * than the object size. */ if (i915_get_fence_size(dev, size) != size) @@ -417,6 +417,33 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) return true; } +static bool +i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + if (obj_priv->gtt_space == NULL) + return true; + + if (tiling_mode == I915_TILING_NONE) + return true; + + if (!IS_I965G(dev)) { + if (obj_priv->gtt_offset & (obj->size - 1)) + return false; + if (IS_I9XX(dev)) { + if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + return false; + } else { + if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) + return false; + } + } + + return true; +} + /** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. @@ -429,6 +456,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + int ret = 0; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) @@ -436,14 +464,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj_priv = obj->driver_private; if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { + mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } - mutex_lock(&dev->struct_mutex); - if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; @@ -466,32 +495,38 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + args->stride = 0; } } - if (args->tiling_mode != obj_priv->tiling_mode) { - int ret; - /* Unbind the object, as switching tiling means we're - * switching the cache organization due to fencing, probably. + mutex_lock(&dev->struct_mutex); + if (args->tiling_mode != obj_priv->tiling_mode || + args->stride != obj_priv->stride) { + /* We need to rebind the object if its current allocation + * no longer meets the alignment restrictions for its new + * tiling mode. Otherwise we can just leave it alone, but + * need to ensure that any fence register is cleared. */ - ret = i915_gem_object_unbind(obj); + if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) + ret = i915_gem_object_unbind(obj); + else + ret = i915_gem_object_put_fence_reg(obj); if (ret != 0) { WARN(ret != -ERESTARTSYS, - "failed to unbind object for tiling switch"); + "failed to reset object for tiling switch"); args->tiling_mode = obj_priv->tiling_mode; - mutex_unlock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - - return ret; + args->stride = obj_priv->stride; + goto err; } + obj_priv->tiling_mode = args->tiling_mode; + obj_priv->stride = args->stride; } - obj_priv->stride = args->stride; - +err: drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } /** diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c index 96e271986d2a..83b7b81bb2b8 100644 --- a/drivers/gpu/drm/i915/i915_mem.c +++ b/drivers/gpu/drm/i915/i915_mem.c @@ -94,8 +94,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, { /* Maybe cut off the start of an existing block */ if (start > p->start) { - struct mem_block *newblock = - drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); + struct mem_block *newblock = kmalloc(sizeof(*newblock), + GFP_KERNEL); if (!newblock) goto out; newblock->start = start; @@ -111,8 +111,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, /* Maybe cut off the end of an existing block */ if (size < p->size) { - struct mem_block *newblock = - drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); + struct mem_block *newblock = kmalloc(sizeof(*newblock), + GFP_KERNEL); if (!newblock) goto out; newblock->start = start + size; @@ -169,7 +169,7 @@ static void free_block(struct mem_block *p) p->size += q->size; p->next = q->next; p->next->prev = p; - drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); + kfree(q); } if (p->prev->file_priv == NULL) { @@ -177,7 +177,7 @@ static void free_block(struct mem_block *p) q->size += p->size; q->next = p->next; q->next->prev = q; - drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS); + kfree(p); } } @@ -185,14 +185,14 @@ static void free_block(struct mem_block *p) */ static int init_heap(struct mem_block **heap, int start, int size) { - struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS); + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); if (!blocks) return -ENOMEM; - *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS); + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); if (!*heap) { - drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); + kfree(blocks); return -ENOMEM; } @@ -233,7 +233,7 @@ void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, p->size += q->size; p->next = q->next; p->next->prev = p; - drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); + kfree(q); } } } @@ -250,10 +250,10 @@ void i915_mem_takedown(struct mem_block **heap) for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; - drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); + kfree(q); } - drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); + kfree(*heap); *heap = NULL; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 754dd22fdd77..cdd126d068a7 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -124,8 +124,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, entry = &lvds_lfp_data->data[lvds_options->panel_type]; dvo_timing = &entry->dvo_timing; - panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), - DRM_MEM_DRIVER); + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); fill_detail_timing_data(panel_fixed_mode, dvo_timing); @@ -156,8 +155,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, if (!dvo_timing) return; - panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), - DRM_MEM_DRIVER); + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 028f5b66e3d8..3e1c78162119 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -828,19 +828,31 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, } mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(intel_fb->obj, alignment); + ret = i915_gem_object_pin(obj, alignment); if (ret != 0) { mutex_unlock(&dev->struct_mutex); return ret; } - ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret != 0) { - i915_gem_object_unpin(intel_fb->obj); + i915_gem_object_unpin(obj); mutex_unlock(&dev->struct_mutex); return ret; } + /* Pre-i965 needs to install a fence for tiled scan-out */ + if (!IS_I965G(dev) && + obj_priv->fence_reg == I915_FENCE_REG_NONE && + obj_priv->tiling_mode != I915_TILING_NONE) { + ret = i915_gem_object_get_fence_reg(obj); + if (ret != 0) { + i915_gem_object_unpin(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + dspcntr = I915_READ(dspcntr_reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; @@ -860,7 +872,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, break; default: DRM_ERROR("Unknown color depth\n"); - i915_gem_object_unpin(intel_fb->obj); + i915_gem_object_unpin(obj); mutex_unlock(&dev->struct_mutex); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 8e28e5993df5..1af7d68e3807 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -870,7 +870,11 @@ static int intelfb_single_fb_probe(struct drm_device *dev) */ void intelfb_restore(void) { - drm_crtc_helper_set_config(&kernelfb_mode); + int ret; + if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) { + printk(KERN_ERR "Failed to restore crtc configuration: %d\n", + ret); + } } static void intelfb_restore_work_fn(struct work_struct *ignored) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 50d7ed70b338..ea68992e4416 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1561,8 +1561,7 @@ intel_tv_destroy (struct drm_connector *connector) drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv), - DRM_MEM_DRIVER); + kfree(intel_output); } @@ -1695,8 +1694,8 @@ intel_tv_init(struct drm_device *dev) (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) return; - intel_output = drm_calloc(1, sizeof(struct intel_output) + - sizeof(struct intel_tv_priv), DRM_MEM_DRIVER); + intel_output = kzalloc(sizeof(struct intel_output) + + sizeof(struct intel_tv_priv), GFP_KERNEL); if (!intel_output) { return; } @@ -1730,8 +1729,8 @@ intel_tv_init(struct drm_device *dev) connector->doublescan_allowed = false; /* Create TV properties then attach current values */ - tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES, - DRM_MEM_DRIVER); + tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, + GFP_KERNEL); if (!tv_format_names) goto out; for (i = 0; i < NUM_TV_MODES; i++) diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 7a6bf9ffc5a3..6c67a02910c8 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c @@ -254,23 +254,20 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr int i; DRM_DEBUG("count=%d\n", dma->buf_count); - dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); + dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); if (dev_priv->head == NULL) return -ENOMEM; - memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; - entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); + entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); if (entry == NULL) return -ENOMEM; - memset(entry, 0, sizeof(drm_mga_freelist_t)); - entry->next = dev_priv->head->next; entry->prev = dev_priv->head; SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); @@ -301,7 +298,7 @@ static void mga_freelist_cleanup(struct drm_device * dev) entry = dev_priv->head; while (entry) { next = entry->next; - drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); + kfree(entry); entry = next; } @@ -399,12 +396,11 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags) drm_mga_private_t *dev_priv; int ret; - dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); + dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL); if (!dev_priv) return -ENOMEM; dev->dev_private = (void *)dev_priv; - memset(dev_priv, 0, sizeof(drm_mga_private_t)); dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; dev_priv->chipset = flags; @@ -1150,7 +1146,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data, */ int mga_driver_unload(struct drm_device * dev) { - drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); + kfree(dev->dev_private); dev->dev_private = NULL; return 0; diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index 077c0455a6b9..c75fd3564040 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -353,12 +353,10 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) DRM_DEBUG("\n"); - dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); + dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; - memset(dev_priv, 0, sizeof(drm_r128_private_t)); - dev_priv->is_pci = init->is_pci; if (dev_priv->is_pci && !dev->sg) { @@ -619,8 +617,7 @@ int r128_do_cleanup_cce(struct drm_device * dev) ("failed to cleanup PCI GART!\n"); } - drm_free(dev->dev_private, sizeof(drm_r128_private_t), - DRM_MEM_DRIVER); + kfree(dev->dev_private); dev->dev_private = NULL; } @@ -768,18 +765,17 @@ static int r128_freelist_init(struct drm_device * dev) drm_r128_freelist_t *entry; int i; - dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); + dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL); if (dev_priv->head == NULL) return -ENOMEM; - memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); dev_priv->head->age = R128_BUFFER_USED; for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; - entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); + entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL); if (!entry) return -ENOMEM; diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index f7a5b5740764..026a48c95c8f 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -910,24 +910,24 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, } buffer_size = depth->n * sizeof(u32); - buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); + buffer = kmalloc(buffer_size, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { - drm_free(buffer, buffer_size, DRM_MEM_BUFS); + kfree(buffer); return -EFAULT; } mask_size = depth->n * sizeof(u8); if (depth->mask) { - mask = drm_alloc(mask_size, DRM_MEM_BUFS); + mask = kmalloc(mask_size, GFP_KERNEL); if (mask == NULL) { - drm_free(buffer, buffer_size, DRM_MEM_BUFS); + kfree(buffer); return -ENOMEM; } if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { - drm_free(buffer, buffer_size, DRM_MEM_BUFS); - drm_free(mask, mask_size, DRM_MEM_BUFS); + kfree(buffer); + kfree(mask); return -EFAULT; } @@ -954,7 +954,7 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, } } - drm_free(mask, mask_size, DRM_MEM_BUFS); + kfree(mask); } else { for (i = 0; i < count; i++, x++) { BEGIN_RING(6); @@ -978,7 +978,7 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, } } - drm_free(buffer, buffer_size, DRM_MEM_BUFS); + kfree(buffer); return 0; } @@ -1000,54 +1000,54 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); - x = drm_alloc(xbuf_size, DRM_MEM_BUFS); + x = kmalloc(xbuf_size, GFP_KERNEL); if (x == NULL) { return -ENOMEM; } - y = drm_alloc(ybuf_size, DRM_MEM_BUFS); + y = kmalloc(ybuf_size, GFP_KERNEL); if (y == NULL) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); + kfree(x); return -ENOMEM; } if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); return -EFAULT; } if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); return -EFAULT; } buffer_size = depth->n * sizeof(u32); - buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); + buffer = kmalloc(buffer_size, GFP_KERNEL); if (buffer == NULL) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); return -ENOMEM; } if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); - drm_free(buffer, buffer_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); + kfree(buffer); return -EFAULT; } if (depth->mask) { mask_size = depth->n * sizeof(u8); - mask = drm_alloc(mask_size, DRM_MEM_BUFS); + mask = kmalloc(mask_size, GFP_KERNEL); if (mask == NULL) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); - drm_free(buffer, buffer_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); + kfree(buffer); return -ENOMEM; } if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); - drm_free(buffer, buffer_size, DRM_MEM_BUFS); - drm_free(mask, mask_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); + kfree(buffer); + kfree(mask); return -EFAULT; } @@ -1074,7 +1074,7 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, } } - drm_free(mask, mask_size, DRM_MEM_BUFS); + kfree(mask); } else { for (i = 0; i < count; i++) { BEGIN_RING(6); @@ -1098,9 +1098,9 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, } } - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); - drm_free(buffer, buffer_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); + kfree(buffer); return 0; } @@ -1167,23 +1167,23 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); - x = drm_alloc(xbuf_size, DRM_MEM_BUFS); + x = kmalloc(xbuf_size, GFP_KERNEL); if (x == NULL) { return -ENOMEM; } - y = drm_alloc(ybuf_size, DRM_MEM_BUFS); + y = kmalloc(ybuf_size, GFP_KERNEL); if (y == NULL) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); + kfree(x); return -ENOMEM; } if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); return -EFAULT; } if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); return -EFAULT; } @@ -1210,8 +1210,8 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, ADVANCE_RING(); } - drm_free(x, xbuf_size, DRM_MEM_BUFS); - drm_free(y, ybuf_size, DRM_MEM_BUFS); + kfree(x); + kfree(y); return 0; } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5225f5be7ea7..c550932a108f 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -551,6 +551,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) /* cp setup */ WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); WREG32(RADEON_CP_RB_CNTL, +#ifdef __BIG_ENDIAN + RADEON_BUF_SWAP_32BIT | +#endif REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | REG_SET(RADEON_MAX_FETCH, max_fetch) | @@ -644,7 +647,7 @@ int r100_cp_reset(struct radeon_device *rdev) */ int r100_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, - unsigned *auth, unsigned n, + const unsigned *auth, unsigned n, radeon_packet0_check_t check) { unsigned reg; @@ -654,6 +657,10 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p, idx = pkt->idx + 1; reg = pkt->reg; + /* Check that register fall into register range + * determined by the number of entry (n) in the + * safe register bitmap. + */ if (pkt->one_reg_wr) { if ((reg >> 7) > n) { return -EINVAL; @@ -683,24 +690,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p, return 0; } -int r100_cs_parse_packet3(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - unsigned *auth, unsigned n, - radeon_packet3_check_t check) -{ - unsigned i, m; - - if ((pkt->opcode >> 5) > n) { - return -EINVAL; - } - i = pkt->opcode >> 5; - m = 1 << (pkt->opcode & 31); - if (auth[i] & m) { - return check(p, pkt); - } - return 0; -} - void r100_cs_dump_packet(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { @@ -901,6 +890,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p, return 0; } +int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + struct radeon_object *robj) +{ + struct radeon_cs_chunk *ib_chunk; + unsigned idx; + + ib_chunk = &p->chunks[p->chunk_ib_idx]; + idx = pkt->idx + 1; + if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { + DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " + "(need %u have %lu) !\n", + ib_chunk->kdata[idx+2] + 1, + radeon_object_size(robj)); + return -EINVAL; + } + return 0; +} + static int r100_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { @@ -954,6 +962,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p, return r; } ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); + r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); + if (r) { + return r; + } break; case 0x23: /* FIXME: cleanup */ @@ -999,18 +1011,18 @@ int r100_cs_parse(struct radeon_cs_parser *p) } p->idx += pkt.count + 2; switch (pkt.type) { - case PACKET_TYPE0: - r = r100_packet0_check(p, &pkt); - break; - case PACKET_TYPE2: - break; - case PACKET_TYPE3: - r = r100_packet3_check(p, &pkt); - break; - default: - DRM_ERROR("Unknown packet type %d !\n", - pkt.type); - return -EINVAL; + case PACKET_TYPE0: + r = r100_packet0_check(p, &pkt); + break; + case PACKET_TYPE2: + break; + case PACKET_TYPE3: + r = r100_packet3_check(p, &pkt); + break; + default: + DRM_ERROR("Unknown packet type %d !\n", + pkt.type); + return -EINVAL; } if (r) { return r; @@ -1267,12 +1279,6 @@ void r100_vram_info(struct radeon_device *rdev) rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); - if (rdev->mc.aper_size > rdev->mc.vram_size) { - /* Why does some hw doesn't have CONFIG_MEMSIZE properly - * setup ? */ - rdev->mc.vram_size = rdev->mc.aper_size; - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - } } @@ -1352,6 +1358,11 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) } } +int r100_init(struct radeon_device *rdev) +{ + return 0; +} + /* * Debugfs info */ diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index f5870a099d4f..e2ed5bc08170 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -48,14 +48,13 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); int r100_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, - unsigned *auth, unsigned n, + const unsigned *auth, unsigned n, radeon_packet0_check_t check); -int r100_cs_parse_packet3(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - unsigned *auth, unsigned n, - radeon_packet3_check_t check); void r100_cs_dump_packet(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt); +int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + struct radeon_object *robj); /* This files gather functions specifics to: * r300,r350,rv350,rv370,rv380 @@ -288,7 +287,7 @@ int r300_copy_dma(struct radeon_device *rdev, return r; } /* Must wait for 2D idle & clean before DMA or hangs might happen */ - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 )); radeon_ring_write(rdev, (1 << 16)); for (i = 0; i < num_loops; i++) { cur_size = size; @@ -319,7 +318,7 @@ void r300_ring_start(struct radeon_device *rdev) /* Sub pixel 1/12 so we can have 4K rendering according to doc */ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); - switch (rdev->num_gb_pipes) { + switch(rdev->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; @@ -452,8 +451,8 @@ void r300_gpu_init(struct radeon_device *rdev) case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; - case 1: default: + case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; } @@ -725,18 +724,120 @@ struct r300_cs_track_cb { unsigned offset; }; +struct r300_cs_track_array { + struct radeon_object *robj; + unsigned esize; +}; + +struct r300_cs_track_texture { + struct radeon_object *robj; + unsigned pitch; + unsigned width; + unsigned height; + unsigned num_levels; + unsigned cpp; + unsigned tex_coord_type; + unsigned txdepth; + unsigned width_11; + unsigned height_11; + bool use_pitch; + bool enabled; + bool roundup_w; + bool roundup_h; +}; + struct r300_cs_track { - unsigned num_cb; - unsigned maxy; - struct r300_cs_track_cb cb[4]; - struct r300_cs_track_cb zb; - bool z_enabled; + unsigned num_cb; + unsigned maxy; + unsigned vtx_size; + unsigned vap_vf_cntl; + unsigned immd_dwords; + unsigned num_arrays; + unsigned max_indx; + struct r300_cs_track_array arrays[11]; + struct r300_cs_track_cb cb[4]; + struct r300_cs_track_cb zb; + struct r300_cs_track_texture textures[16]; + bool z_enabled; }; +static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t) +{ + DRM_ERROR("pitch %d\n", t->pitch); + DRM_ERROR("width %d\n", t->width); + DRM_ERROR("height %d\n", t->height); + DRM_ERROR("num levels %d\n", t->num_levels); + DRM_ERROR("depth %d\n", t->txdepth); + DRM_ERROR("bpp %d\n", t->cpp); + DRM_ERROR("coordinate type %d\n", t->tex_coord_type); + DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); + DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); +} + +static inline int r300_cs_track_texture_check(struct radeon_device *rdev, + struct r300_cs_track *track) +{ + struct radeon_object *robj; + unsigned long size; + unsigned u, i, w, h; + + for (u = 0; u < 16; u++) { + if (!track->textures[u].enabled) + continue; + robj = track->textures[u].robj; + if (robj == NULL) { + DRM_ERROR("No texture bound to unit %u\n", u); + return -EINVAL; + } + size = 0; + for (i = 0; i <= track->textures[u].num_levels; i++) { + if (track->textures[u].use_pitch) { + w = track->textures[u].pitch / (1 << i); + } else { + w = track->textures[u].width / (1 << i); + if (rdev->family >= CHIP_RV515) + w |= track->textures[u].width_11; + if (track->textures[u].roundup_w) + w = roundup_pow_of_two(w); + } + h = track->textures[u].height / (1 << i); + if (rdev->family >= CHIP_RV515) + h |= track->textures[u].height_11; + if (track->textures[u].roundup_h) + h = roundup_pow_of_two(h); + size += w * h; + } + size *= track->textures[u].cpp; + switch (track->textures[u].tex_coord_type) { + case 0: + break; + case 1: + size *= (1 << track->textures[u].txdepth); + break; + case 2: + size *= 6; + break; + default: + DRM_ERROR("Invalid texture coordinate type %u for unit " + "%u\n", track->textures[u].tex_coord_type, u); + return -EINVAL; + } + if (size > radeon_object_size(robj)) { + DRM_ERROR("Texture of unit %u needs %lu bytes but is " + "%lu\n", u, size, radeon_object_size(robj)); + r300_cs_track_texture_print(&track->textures[u]); + return -EINVAL; + } + } + return 0; +} + int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track) { unsigned i; unsigned long size; + unsigned prim_walk; + unsigned nverts; for (i = 0; i < track->num_cb; i++) { if (track->cb[i].robj == NULL) { @@ -769,7 +870,59 @@ int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track) return -EINVAL; } } - return 0; + prim_walk = (track->vap_vf_cntl >> 4) & 0x3; + nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; + switch (prim_walk) { + case 1: + for (i = 0; i < track->num_arrays; i++) { + size = track->arrays[i].esize * track->max_indx * 4; + if (track->arrays[i].robj == NULL) { + DRM_ERROR("(PW %u) Vertex array %u no buffer " + "bound\n", prim_walk, i); + return -EINVAL; + } + if (size > radeon_object_size(track->arrays[i].robj)) { + DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " + "have %lu dwords\n", prim_walk, i, + size >> 2, + radeon_object_size(track->arrays[i].robj) >> 2); + DRM_ERROR("Max indices %u\n", track->max_indx); + return -EINVAL; + } + } + break; + case 2: + for (i = 0; i < track->num_arrays; i++) { + size = track->arrays[i].esize * (nverts - 1) * 4; + if (track->arrays[i].robj == NULL) { + DRM_ERROR("(PW %u) Vertex array %u no buffer " + "bound\n", prim_walk, i); + return -EINVAL; + } + if (size > radeon_object_size(track->arrays[i].robj)) { + DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " + "have %lu dwords\n", prim_walk, i, size >> 2, + radeon_object_size(track->arrays[i].robj) >> 2); + return -EINVAL; + } + } + break; + case 3: + size = track->vtx_size * nverts; + if (size != track->immd_dwords) { + DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", + track->immd_dwords, size); + DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", + nverts, track->vtx_size); + return -EINVAL; + } + break; + default: + DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", + prim_walk); + return -EINVAL; + } + return r300_cs_track_texture_check(rdev, track); } static inline void r300_cs_track_clear(struct r300_cs_track *track) @@ -789,9 +942,33 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track) track->zb.pitch = 8192; track->zb.cpp = 4; track->zb.offset = 0; + track->vtx_size = 0x7F; + track->immd_dwords = 0xFFFFFFFFUL; + track->num_arrays = 11; + track->max_indx = 0x00FFFFFFUL; + for (i = 0; i < track->num_arrays; i++) { + track->arrays[i].robj = NULL; + track->arrays[i].esize = 0x7F; + } + for (i = 0; i < 16; i++) { + track->textures[i].pitch = 16536; + track->textures[i].width = 16536; + track->textures[i].height = 16536; + track->textures[i].width_11 = 1 << 11; + track->textures[i].height_11 = 1 << 11; + track->textures[i].num_levels = 12; + track->textures[i].txdepth = 16; + track->textures[i].cpp = 64; + track->textures[i].tex_coord_type = 1; + track->textures[i].robj = NULL; + /* CS IB emission code makes sure texture unit are disabled */ + track->textures[i].enabled = false; + track->textures[i].roundup_w = true; + track->textures[i].roundup_h = true; + } } -static unsigned r300_auth_reg[] = { +static const unsigned r300_reg_safe_bm[159] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, @@ -808,7 +985,7 @@ static unsigned r300_auth_reg[] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, - 0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000, + 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, @@ -824,9 +1001,9 @@ static unsigned r300_auth_reg[] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, - 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF, + 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, - 0x00000000, 0x00000000, 0xFFFF0000, 0x00000000, + 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, 0x0000C100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, @@ -848,8 +1025,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ib = p->ib->ptr; ib_chunk = &p->chunks[p->chunk_ib_idx]; - track = (struct r300_cs_track *)p->track; - switch (reg) { + track = (struct r300_cs_track*)p->track; + switch(reg) { case RADEON_DST_PITCH_OFFSET: case RADEON_SRC_PITCH_OFFSET: r = r100_cs_packet_next_reloc(p, &reloc); @@ -907,6 +1084,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_OFFSET_0+52: case R300_TX_OFFSET_0+56: case R300_TX_OFFSET_0+60: + i = (reg - R300_TX_OFFSET_0) >> 2; r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", @@ -915,11 +1093,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return r; } ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); + track->textures[i].robj = reloc->robj; break; /* Tracked registers */ + case 0x2084: + /* VAP_VF_CNTL */ + track->vap_vf_cntl = ib_chunk->kdata[idx]; + break; + case 0x20B4: + /* VAP_VTX_SIZE */ + track->vtx_size = ib_chunk->kdata[idx] & 0x7F; + break; + case 0x2134: + /* VAP_VF_MAX_VTX_INDX */ + track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; + break; case 0x43E4: /* SC_SCISSOR1 */ - track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; if (p->rdev->family < CHIP_RV515) { track->maxy -= 1440; @@ -994,8 +1184,166 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* ZB_DEPTHPITCH */ track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; break; + case 0x4104: + for (i = 0; i < 16; i++) { + bool enabled; + + enabled = !!(ib_chunk->kdata[idx] & (1 << i)); + track->textures[i].enabled = enabled; + } + break; + case 0x44C0: + case 0x44C4: + case 0x44C8: + case 0x44CC: + case 0x44D0: + case 0x44D4: + case 0x44D8: + case 0x44DC: + case 0x44E0: + case 0x44E4: + case 0x44E8: + case 0x44EC: + case 0x44F0: + case 0x44F4: + case 0x44F8: + case 0x44FC: + /* TX_FORMAT1_[0-15] */ + i = (reg - 0x44C0) >> 2; + tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; + track->textures[i].tex_coord_type = tmp; + switch ((ib_chunk->kdata[idx] & 0x1F)) { + case 0: + case 2: + case 5: + case 18: + case 20: + case 21: + track->textures[i].cpp = 1; + break; + case 1: + case 3: + case 6: + case 7: + case 10: + case 11: + case 19: + case 22: + case 24: + track->textures[i].cpp = 2; + break; + case 4: + case 8: + case 9: + case 12: + case 13: + case 23: + case 25: + case 27: + case 30: + track->textures[i].cpp = 4; + break; + case 14: + case 26: + case 28: + track->textures[i].cpp = 8; + break; + case 29: + track->textures[i].cpp = 16; + break; + default: + DRM_ERROR("Invalid texture format %u\n", + (ib_chunk->kdata[idx] & 0x1F)); + return -EINVAL; + break; + } + break; + case 0x4400: + case 0x4404: + case 0x4408: + case 0x440C: + case 0x4410: + case 0x4414: + case 0x4418: + case 0x441C: + case 0x4420: + case 0x4424: + case 0x4428: + case 0x442C: + case 0x4430: + case 0x4434: + case 0x4438: + case 0x443C: + /* TX_FILTER0_[0-15] */ + i = (reg - 0x4400) >> 2; + tmp = ib_chunk->kdata[idx] & 0x7;; + if (tmp == 2 || tmp == 4 || tmp == 6) { + track->textures[i].roundup_w = false; + } + tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;; + if (tmp == 2 || tmp == 4 || tmp == 6) { + track->textures[i].roundup_h = false; + } + break; + case 0x4500: + case 0x4504: + case 0x4508: + case 0x450C: + case 0x4510: + case 0x4514: + case 0x4518: + case 0x451C: + case 0x4520: + case 0x4524: + case 0x4528: + case 0x452C: + case 0x4530: + case 0x4534: + case 0x4538: + case 0x453C: + /* TX_FORMAT2_[0-15] */ + i = (reg - 0x4500) >> 2; + tmp = ib_chunk->kdata[idx] & 0x3FFF; + track->textures[i].pitch = tmp + 1; + if (p->rdev->family >= CHIP_RV515) { + tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11; + track->textures[i].width_11 = tmp; + tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11; + track->textures[i].height_11 = tmp; + } + break; + case 0x4480: + case 0x4484: + case 0x4488: + case 0x448C: + case 0x4490: + case 0x4494: + case 0x4498: + case 0x449C: + case 0x44A0: + case 0x44A4: + case 0x44A8: + case 0x44AC: + case 0x44B0: + case 0x44B4: + case 0x44B8: + case 0x44BC: + /* TX_FORMAT0_[0-15] */ + i = (reg - 0x4480) >> 2; + tmp = ib_chunk->kdata[idx] & 0x7FF; + track->textures[i].width = tmp + 1; + tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF; + track->textures[i].height = tmp + 1; + tmp = (ib_chunk->kdata[idx] >> 26) & 0xF; + track->textures[i].num_levels = tmp; + tmp = ib_chunk->kdata[idx] & (1 << 31); + track->textures[i].use_pitch = !!tmp; + tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; + track->textures[i].txdepth = tmp; + break; default: - printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx); + printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", + reg, idx); return -EINVAL; } return 0; @@ -1015,11 +1363,12 @@ static int r300_packet3_check(struct radeon_cs_parser *p, ib = p->ib->ptr; ib_chunk = &p->chunks[p->chunk_ib_idx]; idx = pkt->idx + 1; - track = (struct r300_cs_track *)p->track; - switch (pkt->opcode) { + track = (struct r300_cs_track*)p->track; + switch(pkt->opcode) { case PACKET3_3D_LOAD_VBPNTR: - c = ib_chunk->kdata[idx++]; - for (i = 0; i < (c - 1); i += 2, idx += 3) { + c = ib_chunk->kdata[idx++] & 0x1F; + track->num_arrays = c; + for (i = 0; i < (c - 1); i+=2, idx+=3) { r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for packet3 %d\n", @@ -1028,6 +1377,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p, return r; } ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); + track->arrays[i + 0].robj = reloc->robj; + track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; + track->arrays[i + 0].esize &= 0x7F; r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for packet3 %d\n", @@ -1036,6 +1388,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p, return r; } ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); + track->arrays[i + 1].robj = reloc->robj; + track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; + track->arrays[i + 1].esize &= 0x7F; } if (c & 1) { r = r100_cs_packet_next_reloc(p, &reloc); @@ -1046,6 +1401,9 @@ static int r300_packet3_check(struct radeon_cs_parser *p, return r; } ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); + track->arrays[i + 0].robj = reloc->robj; + track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; + track->arrays[i + 0].esize &= 0x7F; } break; case PACKET3_INDX_BUFFER: @@ -1056,14 +1414,65 @@ static int r300_packet3_check(struct radeon_cs_parser *p, return r; } ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); + r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); + if (r) { + return r; + } break; /* Draw packet */ - case PACKET3_3D_DRAW_VBUF: case PACKET3_3D_DRAW_IMMD: - case PACKET3_3D_DRAW_INDX: - case PACKET3_3D_DRAW_VBUF_2: + /* Number of dwords is vtx_size * (num_vertices - 1) + * PRIM_WALK must be equal to 3 vertex data in embedded + * in cmd stream */ + if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { + DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); + return -EINVAL; + } + track->vap_vf_cntl = ib_chunk->kdata[idx+1]; + track->immd_dwords = pkt->count - 1; + r = r300_cs_track_check(p->rdev, track); + if (r) { + return r; + } + break; case PACKET3_3D_DRAW_IMMD_2: + /* Number of dwords is vtx_size * (num_vertices - 1) + * PRIM_WALK must be equal to 3 vertex data in embedded + * in cmd stream */ + if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { + DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); + return -EINVAL; + } + track->vap_vf_cntl = ib_chunk->kdata[idx]; + track->immd_dwords = pkt->count; + r = r300_cs_track_check(p->rdev, track); + if (r) { + return r; + } + break; + case PACKET3_3D_DRAW_VBUF: + track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; + r = r300_cs_track_check(p->rdev, track); + if (r) { + return r; + } + break; + case PACKET3_3D_DRAW_VBUF_2: + track->vap_vf_cntl = ib_chunk->kdata[idx]; + r = r300_cs_track_check(p->rdev, track); + if (r) { + return r; + } + break; + case PACKET3_3D_DRAW_INDX: + track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; + r = r300_cs_track_check(p->rdev, track); + if (r) { + return r; + } + break; case PACKET3_3D_DRAW_INDX_2: + track->vap_vf_cntl = ib_chunk->kdata[idx]; r = r300_cs_track_check(p->rdev, track); if (r) { return r; @@ -1095,8 +1504,8 @@ int r300_cs_parse(struct radeon_cs_parser *p) switch (pkt.type) { case PACKET_TYPE0: r = r100_cs_parse_packet0(p, &pkt, - r300_auth_reg, - ARRAY_SIZE(r300_auth_reg), + p->rdev->config.r300.reg_safe_bm, + p->rdev->config.r300.reg_safe_bm_size, &r300_packet0_check); break; case PACKET_TYPE2: @@ -1114,3 +1523,10 @@ int r300_cs_parse(struct radeon_cs_parser *p) } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); return 0; } + +int r300_init(struct radeon_device *rdev) +{ + rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; + rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); + return 0; +} diff --git a/drivers/gpu/drm/radeon/r300.h b/drivers/gpu/drm/radeon/r300.h new file mode 100644 index 000000000000..8486b4da9d69 --- /dev/null +++ b/drivers/gpu/drm/radeon/r300.h @@ -0,0 +1,36 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef R300_H +#define R300_H + +struct r300_asic { + const unsigned *reg_safe_bm; + unsigned reg_safe_bm_size; +}; + +#endif diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index c3f24cc56009..d61f2fc61df5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -51,7 +51,7 @@ #include "radeon_mode.h" #include "radeon_reg.h" - +#include "r300.h" /* * Modules parameters. @@ -496,6 +496,7 @@ int r100_debugfs_cp_init(struct radeon_device *rdev); * ASIC specific functions. */ struct radeon_asic { + int (*init)(struct radeon_device *rdev); void (*errata)(struct radeon_device *rdev); void (*vram_info)(struct radeon_device *rdev); int (*gpu_reset)(struct radeon_device *rdev); @@ -536,6 +537,10 @@ struct radeon_asic { void (*set_clock_gating)(struct radeon_device *rdev, int enable); }; +union radeon_asic_config { + struct r300_asic r300; +}; + /* * IOCTL. @@ -573,6 +578,7 @@ struct radeon_device { struct drm_device *ddev; struct pci_dev *pdev; /* ASIC */ + union radeon_asic_config config; enum radeon_family family; unsigned long flags; int usec_timeout; @@ -763,6 +769,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) /* * ASICs macro. */ +#define radeon_init(rdev) (rdev)->asic->init((rdev)) #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) #define radeon_errata(rdev) (rdev)->asic->errata((rdev)) #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e57d8a784e9f..e2e567395df8 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -41,6 +41,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); /* * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ +int r100_init(struct radeon_device *rdev); uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void r100_errata(struct radeon_device *rdev); @@ -72,6 +73,7 @@ int r100_copy_blit(struct radeon_device *rdev, struct radeon_fence *fence); static struct radeon_asic r100_asic = { + .init = &r100_init, .errata = &r100_errata, .vram_info = &r100_vram_info, .gpu_reset = &r100_gpu_reset, @@ -104,6 +106,7 @@ static struct radeon_asic r100_asic = { /* * r300,r350,rv350,rv380 */ +int r300_init(struct radeon_device *rdev); void r300_errata(struct radeon_device *rdev); void r300_vram_info(struct radeon_device *rdev); int r300_gpu_reset(struct radeon_device *rdev); @@ -126,6 +129,7 @@ int r300_copy_dma(struct radeon_device *rdev, unsigned num_pages, struct radeon_fence *fence); static struct radeon_asic r300_asic = { + .init = &r300_init, .errata = &r300_errata, .vram_info = &r300_vram_info, .gpu_reset = &r300_gpu_reset, @@ -162,6 +166,7 @@ void r420_vram_info(struct radeon_device *rdev); int r420_mc_init(struct radeon_device *rdev); void r420_mc_fini(struct radeon_device *rdev); static struct radeon_asic r420_asic = { + .init = &r300_init, .errata = &r420_errata, .vram_info = &r420_vram_info, .gpu_reset = &r300_gpu_reset, @@ -205,6 +210,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); static struct radeon_asic rs400_asic = { + .init = &r300_init, .errata = &rs400_errata, .vram_info = &rs400_vram_info, .gpu_reset = &r300_gpu_reset, @@ -249,6 +255,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); static struct radeon_asic rs600_asic = { + .init = &r300_init, .errata = &rs600_errata, .vram_info = &rs600_vram_info, .gpu_reset = &r300_gpu_reset, @@ -288,6 +295,7 @@ void rs690_mc_fini(struct radeon_device *rdev); uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); static struct radeon_asic rs690_asic = { + .init = &r300_init, .errata = &rs690_errata, .vram_info = &rs690_vram_info, .gpu_reset = &r300_gpu_reset, @@ -320,6 +328,7 @@ static struct radeon_asic rs690_asic = { /* * rv515 */ +int rv515_init(struct radeon_device *rdev); void rv515_errata(struct radeon_device *rdev); void rv515_vram_info(struct radeon_device *rdev); int rv515_gpu_reset(struct radeon_device *rdev); @@ -331,6 +340,7 @@ void rv515_ring_start(struct radeon_device *rdev); uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); static struct radeon_asic rv515_asic = { + .init = &rv515_init, .errata = &rv515_errata, .vram_info = &rv515_vram_info, .gpu_reset = &rv515_gpu_reset, @@ -349,7 +359,7 @@ static struct radeon_asic rv515_asic = { .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r100_cs_parse, + .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, .copy_dma = &r300_copy_dma, .copy = &r100_copy_blit, @@ -368,6 +378,7 @@ void r520_vram_info(struct radeon_device *rdev); int r520_mc_init(struct radeon_device *rdev); void r520_mc_fini(struct radeon_device *rdev); static struct radeon_asic r520_asic = { + .init = &rv515_init, .errata = &r520_errata, .vram_info = &r520_vram_info, .gpu_reset = &rv515_gpu_reset, @@ -386,7 +397,7 @@ static struct radeon_asic r520_asic = { .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .fence_ring_emit = &r300_fence_ring_emit, - .cs_parse = &r100_cs_parse, + .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, .copy_dma = &r300_copy_dma, .copy = &r100_copy_blit, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 786632d3e378..1f5a1a490984 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -835,7 +835,6 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; - int i; struct radeon_encoder_primary_dac *p_dac = NULL; atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); @@ -867,7 +866,6 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; - int i; struct radeon_encoder_tv_dac *tv_dac = NULL; atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 06e8038bc4ac..afc4db280b94 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -799,6 +799,7 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct struct radeon_encoder_lvds *lvds = NULL; uint32_t fp_vert_stretch, fp_horz_stretch; uint32_t ppll_div_sel, ppll_val; + uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL); @@ -808,6 +809,14 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH); fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH); + /* These should be fail-safe defaults, fingers crossed */ + lvds->panel_pwr_delay = 200; + lvds->panel_vcc_delay = 2000; + + lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); + lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf; + lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf; + if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE) lvds->native_mode.panel_yres = ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >> diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 89c4c44169f7..d8356827ef17 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -2045,11 +2045,10 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) drm_radeon_private_t *dev_priv; int ret = 0; - dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); + dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; - memset(dev_priv, 0, sizeof(drm_radeon_private_t)); dev->dev_private = (void *)dev_priv; dev_priv->flags = flags; @@ -2103,7 +2102,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master) unsigned long sareapage; int ret; - master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); + master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); if (!master_priv) return -ENOMEM; @@ -2137,7 +2136,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) if (master_priv->sarea) drm_rmmap_locked(dev, master_priv->sarea); - drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); + kfree(master_priv); master->driver_priv = NULL; } @@ -2171,7 +2170,7 @@ int radeon_driver_unload(struct drm_device *dev) drm_rmmap(dev, dev_priv->mmio); - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); + kfree(dev_priv); dev->dev_private = NULL; return 0; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 5fd2b639bf66..f30aa7274a54 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -470,6 +470,10 @@ int radeon_device_init(struct radeon_device *rdev, if (r) { return r; } + r = radeon_init(rdev); + if (r) { + return r; + } /* Report DMA addressing limitation */ r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5452bb9d925e..3efcf1a526be 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -351,7 +351,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) radeon_i2c_do_lock(radeon_connector, 0); if (edid) { /* update digital bits here */ - if (edid->digital) + if (edid->input & DRM_EDID_INPUT_DIGITAL) radeon_connector->use_digital = 1; else radeon_connector->use_digital = 0; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index c815a2cbf7b3..09c9fb9f6210 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -313,7 +313,7 @@ static int __init radeon_init(void) { driver = &driver_old; driver->num_ioctls = radeon_max_ioctl; -#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86) +#if defined(CONFIG_DRM_RADEON_KMS) /* if enabled by default */ if (radeon_modeset == -1) { DRM_INFO("radeon default to kernel modesetting.\n"); diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 71465ed2688a..dd438d32e5c0 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -162,7 +162,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_chan *i2c; int ret; - i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); + i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); if (i2c == NULL) return NULL; @@ -189,7 +189,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, return i2c; out_free: - drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); + kfree(i2c); return NULL; } @@ -200,7 +200,7 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) return; i2c_del_adapter(&i2c->adapter); - drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER); + kfree(i2c); } struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 64f42b19cbfa..4612a7c146d1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -169,7 +169,7 @@ int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master) unsigned long sareapage; int ret; - master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); + master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); if (master_priv == NULL) { return -ENOMEM; } @@ -199,7 +199,7 @@ void radeon_master_destroy_kms(struct drm_device *dev, if (master_priv->sarea) { drm_rmmap_locked(dev, master_priv->sarea); } - drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); + kfree(master_priv); master->driver_priv = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c index 4af5286a36fb..ed95155c4b1d 100644 --- a/drivers/gpu/drm/radeon/radeon_mem.c +++ b/drivers/gpu/drm/radeon/radeon_mem.c @@ -43,8 +43,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, { /* Maybe cut off the start of an existing block */ if (start > p->start) { - struct mem_block *newblock = - drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); + struct mem_block *newblock = kmalloc(sizeof(*newblock), + GFP_KERNEL); if (!newblock) goto out; newblock->start = start; @@ -60,8 +60,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size, /* Maybe cut off the end of an existing block */ if (size < p->size) { - struct mem_block *newblock = - drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); + struct mem_block *newblock = kmalloc(sizeof(*newblock), + GFP_KERNEL); if (!newblock) goto out; newblock->start = start + size; @@ -118,7 +118,7 @@ static void free_block(struct mem_block *p) p->size += q->size; p->next = q->next; p->next->prev = p; - drm_free(q, sizeof(*q), DRM_MEM_BUFS); + kfree(q); } if (p->prev->file_priv == NULL) { @@ -126,7 +126,7 @@ static void free_block(struct mem_block *p) q->size += p->size; q->next = p->next; q->next->prev = q; - drm_free(p, sizeof(*q), DRM_MEM_BUFS); + kfree(p); } } @@ -134,14 +134,14 @@ static void free_block(struct mem_block *p) */ static int init_heap(struct mem_block **heap, int start, int size) { - struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); if (!blocks) return -ENOMEM; - *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); if (!*heap) { - drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); + kfree(blocks); return -ENOMEM; } @@ -179,7 +179,7 @@ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap) p->size += q->size; p->next = q->next; p->next->prev = p; - drm_free(q, sizeof(*q), DRM_MEM_DRIVER); + kfree(q); } } } @@ -196,10 +196,10 @@ void radeon_mem_takedown(struct mem_block **heap) for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; - drm_free(q, sizeof(*q), DRM_MEM_DRIVER); + kfree(q); } - drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); + kfree(*heap); *heap = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 6d3d90406a24..e1b618574461 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -3184,6 +3184,7 @@ # define RADEON_RB_BUFSZ_MASK (0x3f << 0) # define RADEON_RB_BLKSZ_SHIFT 8 # define RADEON_RB_BLKSZ_MASK (0x3f << 8) +# define RADEON_BUF_SWAP_32BIT (1 << 17) # define RADEON_MAX_FETCH_SHIFT 18 # define RADEON_MAX_FETCH_MASK (0x3 << 18) # define RADEON_RB_NO_UPDATE (1 << 27) diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index fa728ec6ed34..46645f3e0328 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -2866,12 +2866,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file */ orig_bufsz = cmdbuf->bufsz; if (orig_bufsz != 0) { - kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); + kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL); if (kbuf == NULL) return -ENOMEM; if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, cmdbuf->bufsz)) { - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); + kfree(kbuf); return -EFAULT; } cmdbuf->buf = kbuf; @@ -2884,7 +2884,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); if (orig_bufsz != 0) - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); + kfree(kbuf); return temp; } @@ -2991,7 +2991,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file } if (orig_bufsz != 0) - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); + kfree(kbuf); DRM_DEBUG("DONE\n"); COMMIT_RING(); @@ -2999,7 +2999,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file err: if (orig_bufsz != 0) - drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); + kfree(kbuf); return -EINVAL; } @@ -3175,9 +3175,7 @@ int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv) struct drm_radeon_driver_file_fields *radeon_priv; DRM_DEBUG("\n"); - radeon_priv = - (struct drm_radeon_driver_file_fields *) - drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); + radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL); if (!radeon_priv) return -ENOMEM; @@ -3196,7 +3194,7 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) struct drm_radeon_driver_file_fields *radeon_priv = file_priv->driver_priv; - drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); + kfree(radeon_priv); } struct drm_ioctl_desc radeon_ioctls[] = { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 4c087c1510d7..1227a97f5169 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -133,6 +133,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->gpu_offset = 0; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { @@ -143,8 +144,9 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->io_offset = rdev->mc.agp_base; man->io_size = rdev->mc.gtt_size; man->io_addr = NULL; - man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | - TTM_MEMTYPE_FLAG_MAPPABLE; + if (!rdev->ddev->agp->cant_use_aperture) + man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | + TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; @@ -154,8 +156,6 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->io_offset = 0; man->io_size = 0; man->io_addr = NULL; - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | - TTM_MEMTYPE_FLAG_CMA; } break; case TTM_PL_VRAM: diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 7eab95db58ac..ffea37b1b3e2 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -225,6 +225,8 @@ void rv515_ring_start(struct radeon_device *rdev) radeon_ring_write(rdev, R300_GEOMETRY_ROUND_NEAREST | R300_COLOR_ROUND_NEAREST); + radeon_ring_write(rdev, PACKET0(0x20C8, 0)); + radeon_ring_write(rdev, 0); radeon_ring_unlock_commit(rdev); } @@ -502,3 +504,59 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) return 0; #endif } + + +/* + * Asic initialization + */ +static const unsigned r500_reg_safe_bm[159] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, + 0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF, + 0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF, + 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, +}; + + + +int rv515_init(struct radeon_device *rdev) +{ + rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; + rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); + return 0; +} diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index 456cd040f31a..bff6fc2524c8 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -298,8 +298,8 @@ static int savage_dma_init(drm_savage_private_t * dev_priv) dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / (SAVAGE_DMA_PAGE_SIZE * 4); - dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * - dev_priv->nr_dma_pages, DRM_MEM_DRIVER); + dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) * + dev_priv->nr_dma_pages, GFP_KERNEL); if (dev_priv->dma_pages == NULL) return -ENOMEM; @@ -539,7 +539,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) { drm_savage_private_t *dev_priv; - dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); + dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; @@ -671,7 +671,7 @@ int savage_driver_unload(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; - drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); + kfree(dev_priv); return 0; } @@ -804,8 +804,8 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) dev_priv->fake_dma.offset = 0; dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; dev_priv->fake_dma.type = _DRM_SHM; - dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, - DRM_MEM_DRIVER); + dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE, + GFP_KERNEL); if (!dev_priv->fake_dma.handle) { DRM_ERROR("could not allocate faked DMA buffer!\n"); savage_do_cleanup_bci(dev); @@ -903,9 +903,7 @@ static int savage_do_cleanup_bci(struct drm_device * dev) drm_savage_private_t *dev_priv = dev->dev_private; if (dev_priv->cmd_dma == &dev_priv->fake_dma) { - if (dev_priv->fake_dma.handle) - drm_free(dev_priv->fake_dma.handle, - SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); + kfree(dev_priv->fake_dma.handle); } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && dev_priv->cmd_dma->type == _DRM_AGP && dev_priv->dma_type == SAVAGE_DMA_AGP) @@ -920,10 +918,7 @@ static int savage_do_cleanup_bci(struct drm_device * dev) dev->agp_buffer_map = NULL; } - if (dev_priv->dma_pages) - drm_free(dev_priv->dma_pages, - sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, - DRM_MEM_DRIVER); + kfree(dev_priv->dma_pages); return 0; } diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index 5f6238fdf1fa..8a3e31599c94 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c @@ -988,20 +988,20 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ * for locking on FreeBSD. */ if (cmdbuf->size) { - kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); + kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL); if (kcmd_addr == NULL) return -ENOMEM; if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, cmdbuf->size * 8)) { - drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); + kfree(kcmd_addr); return -EFAULT; } cmdbuf->cmd_addr = kcmd_addr; } if (cmdbuf->vb_size) { - kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); + kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL); if (kvb_addr == NULL) { ret = -ENOMEM; goto done; @@ -1015,8 +1015,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ cmdbuf->vb_addr = kvb_addr; } if (cmdbuf->nbox) { - kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), + GFP_KERNEL); if (kbox_addr == NULL) { ret = -ENOMEM; goto done; @@ -1154,10 +1154,9 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ done: /* If we didn't need to allocate them, these'll be NULL */ - drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); - drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); - drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + kfree(kcmd_addr); + kfree(kvb_addr); + kfree(kbox_addr); return ret; } diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 7dacc64e9b56..e725cc0b1155 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -40,7 +40,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) drm_sis_private_t *dev_priv; int ret; - dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); + dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; @@ -48,7 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->chipset = chipset; ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); if (ret) { - drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); + kfree(dev_priv); } return ret; @@ -59,7 +59,7 @@ static int sis_driver_unload(struct drm_device *dev) drm_sis_private_t *dev_priv = dev->dev_private; drm_sman_takedown(&dev_priv->sman); - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); + kfree(dev_priv); return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index e8f6d2229d8c..4648ed2f0143 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -63,8 +63,7 @@ static int ttm_agp_populate(struct ttm_backend *backend, if (!page) page = dummy_read_page; - mem->memory[mem->page_count++] = - phys_to_gart(page_to_phys(page)); + mem->pages[mem->page_count++] = page; } agp_be->mem = mem; return 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1587aeca7bea..c1c407f7cca3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -282,7 +282,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) - return ret; + goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_bind(bo->ttm, mem); @@ -527,9 +527,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, ret = ttm_bo_wait(bo, false, interruptible, no_wait); spin_unlock(&bo->lock); - if (ret && ret != -ERESTART) { - printk(KERN_ERR TTM_PFX "Failed to expire sync object before " - "buffer eviction.\n"); + if (unlikely(ret != 0)) { + if (ret != -ERESTART) { + printk(KERN_ERR TTM_PFX + "Failed to expire sync object before " + "buffer eviction.\n"); + } goto out; } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index c27ab3a877ad..0331fa74cd3f 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -68,7 +68,7 @@ static void ttm_tt_cache_flush_clflush(struct page *pages[], ttm_tt_clflush_page(*pages++); mb(); } -#else +#elif !defined(__powerpc__) static void ttm_tt_ipi_handler(void *null) { ; @@ -83,6 +83,15 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) ttm_tt_cache_flush_clflush(pages, num_pages); return; } +#elif defined(__powerpc__) + unsigned long i; + + for (i = 0; i < num_pages; ++i) { + if (pages[i]) { + unsigned long start = (unsigned long)page_address(pages[i]); + flush_dcache_range(start, start + PAGE_SIZE); + } + } #else if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) printk(KERN_ERR TTM_PFX diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c index 2c4f0b485792..6e6f91591639 100644 --- a/drivers/gpu/drm/via/via_map.c +++ b/drivers/gpu/drm/via/via_map.c @@ -96,7 +96,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) drm_via_private_t *dev_priv; int ret = 0; - dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); + dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; @@ -106,14 +106,14 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); if (ret) { - drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); + kfree(dev_priv); return ret; } ret = drm_vblank_init(dev, 1); if (ret) { drm_sman_takedown(&dev_priv->sman); - drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); + kfree(dev_priv); return ret; } @@ -126,7 +126,7 @@ int via_driver_unload(struct drm_device *dev) drm_sman_takedown(&dev_priv->sman); - drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); + kfree(dev_priv); return 0; } |