From c62d81bcfe82526cc3da10cf4fc63faad368bc60 Mon Sep 17 00:00:00 2001 From: Alessandro Rubini Date: Sun, 20 Sep 2009 23:28:04 +0200 Subject: mtd: use bbm.h in nand.h This consolidates common code in nand.h and bbm.h. The comments and data structures were the same, this keeps the comment from nand.h as it fits 80 columns, while the one in bbm.h did not. Signed-off-by: Alessandro Rubini Signed-off-by: David Woodhouse --- include/linux/mtd/bbm.h | 35 +++++++++++----------- include/linux/mtd/nand.h | 76 +----------------------------------------------- 2 files changed, 19 insertions(+), 92 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index fff8c53e5434..9c3757c5759d 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -19,22 +19,21 @@ /** * struct nand_bbt_descr - bad block table descriptor - * @options: options for this descriptor - * @pages: the page(s) where we find the bbt, used with - * option BBT_ABSPAGE when bbt is searched, - * then we store the found bbts pages here. - * Its an array and supports up to 8 chips now - * @offs: offset of the pattern in the oob area of the page - * @veroffs: offset of the bbt version counter in the oob area of the page - * @version: version read from the bbt page during scan - * @len: length of the pattern, if 0 no pattern check is performed - * @maxblocks: maximum number of blocks to search for a bbt. This - * number of blocks is reserved at the end of the device - * where the tables are written. - * @reserved_block_code: if non-0, this pattern denotes a reserved - * (rather than bad) block in the stored bbt - * @pattern: pattern to identify bad block table or factory marked - * good / bad blocks, can be NULL, if len = 0 + * @options: options for this descriptor + * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE + * when bbt is searched, then we store the found bbts pages here. + * Its an array and supports up to 8 chips now + * @offs: offset of the pattern in the oob area of the page + * @veroffs: offset of the bbt version counter in the oob are of the page + * @version: version read from the bbt page during scan + * @len: length of the pattern, if 0 no pattern check is performed + * @maxblocks: maximum number of blocks to search for a bbt. This number of + * blocks is reserved at the end of the device where the tables are + * written. + * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than + * bad) block in the stored bbt + * @pattern: pattern to identify bad block table or factory marked good / + * bad blocks, can be NULL, if len = 0 * * Descriptor for the bad block table marker and the descriptor for the * pattern which identifies good and bad blocks. The assumption is made @@ -90,7 +89,9 @@ struct nand_bbt_descr { /* * Constants for oob configuration */ -#define ONENAND_BADBLOCK_POS 0 +#define NAND_SMALL_BADBLOCK_POS 5 +#define NAND_LARGE_BADBLOCK_POS 0 +#define ONENAND_BADBLOCK_POS 0 /* * Bad block scanning errors diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 7a232a9bdd62..d87ada538d17 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -21,6 +21,7 @@ #include #include #include +#include struct mtd_info; /* Scan and identify a NAND device */ @@ -470,75 +471,6 @@ struct nand_manufacturers { extern struct nand_flash_dev nand_flash_ids[]; extern struct nand_manufacturers nand_manuf_ids[]; -/** - * struct nand_bbt_descr - bad block table descriptor - * @options: options for this descriptor - * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE - * when bbt is searched, then we store the found bbts pages here. - * Its an array and supports up to 8 chips now - * @offs: offset of the pattern in the oob area of the page - * @veroffs: offset of the bbt version counter in the oob are of the page - * @version: version read from the bbt page during scan - * @len: length of the pattern, if 0 no pattern check is performed - * @maxblocks: maximum number of blocks to search for a bbt. This number of - * blocks is reserved at the end of the device where the tables are - * written. - * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than - * bad) block in the stored bbt - * @pattern: pattern to identify bad block table or factory marked good / - * bad blocks, can be NULL, if len = 0 - * - * Descriptor for the bad block table marker and the descriptor for the - * pattern which identifies good and bad blocks. The assumption is made - * that the pattern and the version count are always located in the oob area - * of the first block. - */ -struct nand_bbt_descr { - int options; - int pages[NAND_MAX_CHIPS]; - int offs; - int veroffs; - uint8_t version[NAND_MAX_CHIPS]; - int len; - int maxblocks; - int reserved_block_code; - uint8_t *pattern; -}; - -/* Options for the bad block table descriptors */ - -/* The number of bits used per block in the bbt on the device */ -#define NAND_BBT_NRBITS_MSK 0x0000000F -#define NAND_BBT_1BIT 0x00000001 -#define NAND_BBT_2BIT 0x00000002 -#define NAND_BBT_4BIT 0x00000004 -#define NAND_BBT_8BIT 0x00000008 -/* The bad block table is in the last good block of the device */ -#define NAND_BBT_LASTBLOCK 0x00000010 -/* The bbt is at the given page, else we must scan for the bbt */ -#define NAND_BBT_ABSPAGE 0x00000020 -/* The bbt is at the given page, else we must scan for the bbt */ -#define NAND_BBT_SEARCH 0x00000040 -/* bbt is stored per chip on multichip devices */ -#define NAND_BBT_PERCHIP 0x00000080 -/* bbt has a version counter at offset veroffs */ -#define NAND_BBT_VERSION 0x00000100 -/* Create a bbt if none axists */ -#define NAND_BBT_CREATE 0x00000200 -/* Search good / bad pattern through all pages of a block */ -#define NAND_BBT_SCANALLPAGES 0x00000400 -/* Scan block empty during good / bad block scan */ -#define NAND_BBT_SCANEMPTY 0x00000800 -/* Write bbt if neccecary */ -#define NAND_BBT_WRITE 0x00001000 -/* Read and write back block contents when writing bbt */ -#define NAND_BBT_SAVECONTENT 0x00002000 -/* Search good / bad pattern on the first and the second page */ -#define NAND_BBT_SCAN2NDPAGE 0x00004000 - -/* The maximum number of blocks to scan for a bbt */ -#define NAND_BBT_SCAN_MAXBLOCKS 4 - extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs); extern int nand_default_bbt(struct mtd_info *mtd); @@ -548,12 +480,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, uint8_t * buf); -/* -* Constants for oob configuration -*/ -#define NAND_SMALL_BADBLOCK_POS 5 -#define NAND_LARGE_BADBLOCK_POS 0 - /** * struct platform_nand_chip - chip level device structure * @nr_chips: max. number of chips to scan for -- cgit v1.2.3 From 30631cb82d5c6c662d5ec682beaa834c1f9f0987 Mon Sep 17 00:00:00 2001 From: Alessandro Rubini Date: Sun, 20 Sep 2009 23:28:14 +0200 Subject: mtd: unify status enum from three headers nand.h, onenand.h and flashchip.h defined enumeration types for chip status using the same symbolic names. This prevented a board file to include more than one of them. In particular, no nand and onenand platform devices could live in the same file. This patch augments flashchip.h with a few status values in order to cover all cases, so nand.h and onenand.h can use flstate_t without declaring their own status enum. Signed-off-by: Alessandro Rubini Signed-off-by: David Woodhouse --- include/linux/mtd/flashchip.h | 7 +++++++ include/linux/mtd/nand.h | 17 ++--------------- include/linux/mtd/onenand.h | 19 ++----------------- 3 files changed, 11 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index d4f38c5fd44e..f350a4879f75 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -38,6 +38,13 @@ typedef enum { FL_XIP_WHILE_ERASING, FL_XIP_WHILE_WRITING, FL_SHUTDOWN, + /* These 2 come from nand_state_t, which has been unified here */ + FL_READING, + FL_CACHEDPRG, + /* These 2 come from onenand_state_t, which has been unified here */ + FL_RESETING, + FL_OTPING, + FL_UNKNOWN } flstate_t; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index d87ada538d17..2476078a032f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -21,6 +21,7 @@ #include #include #include +#include #include struct mtd_info; @@ -203,20 +204,6 @@ typedef enum { #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C -/* - * nand_state_t - chip states - * Enumeration for NAND flash chip state - */ -typedef enum { - FL_READY, - FL_READING, - FL_WRITING, - FL_ERASING, - FL_SYNCING, - FL_CACHEDPRG, - FL_PM_SUSPENDED, -} nand_state_t; - /* Keep gcc happy */ struct nand_chip; @@ -403,7 +390,7 @@ struct nand_chip { uint8_t cellinfo; int badblockpos; - nand_state_t state; + flstate_t state; uint8_t *oob_poi; struct nand_hw_control *controller; diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 4e49f3350678..f57e29e17bb0 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips); /* Free resources held by the OneNAND device */ extern void onenand_release(struct mtd_info *mtd); -/* - * onenand_state_t - chip states - * Enumeration for OneNAND flash chip state - */ -typedef enum { - FL_READY, - FL_READING, - FL_WRITING, - FL_ERASING, - FL_SYNCING, - FL_LOCKING, - FL_RESETING, - FL_OTPING, - FL_PM_SUSPENDED, -} onenand_state_t; - /** * struct onenand_bufferram - OneNAND BufferRAM Data * @blockpage: block & page address in BufferRAM @@ -137,7 +122,7 @@ struct onenand_chip { spinlock_t chip_lock; wait_queue_head_t wq; - onenand_state_t state; + flstate_t state; unsigned char *page_buf; unsigned char *oob_buf; -- cgit v1.2.3 From 23fb064bb96f001ecb8682129f7ee1bc1ca691bc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 21 Jul 2009 21:18:35 +0900 Subject: percpu: kill legacy percpu allocator With ia64 converted, there's no arch left which still uses legacy percpu allocator. Kill it. Signed-off-by: Tejun Heo Delightedly-acked-by: Rusty Russell Cc: Ingo Molnar Cc: Christoph Lameter --- include/linux/percpu.h | 24 ------- kernel/module.c | 150 ----------------------------------------- mm/Makefile | 4 -- mm/allocpercpu.c | 177 ------------------------------------------------- mm/percpu.c | 2 - 5 files changed, 357 deletions(-) delete mode 100644 mm/allocpercpu.c (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 878836ca999c..5baf5b8788fb 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -34,8 +34,6 @@ #ifdef CONFIG_SMP -#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) @@ -130,28 +128,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) extern void *__alloc_reserved_percpu(size_t size, size_t align); - -#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ - -struct percpu_data { - void *ptrs[1]; -}; - -/* pointer disguising messes up the kmemleak objects tracking */ -#ifndef CONFIG_DEBUG_KMEMLEAK -#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) -#else -#define __percpu_disguise(pdata) (struct percpu_data *)(pdata) -#endif - -#define per_cpu_ptr(ptr, cpu) \ -({ \ - struct percpu_data *__p = __percpu_disguise(ptr); \ - (__typeof__(ptr))__p->ptrs[(cpu)]; \ -}) - -#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ - extern void *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void *__pdata); diff --git a/kernel/module.c b/kernel/module.c index 8b7d8805819d..64787cddeb5e 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module); #ifdef CONFIG_SMP -#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA - static void *percpu_modalloc(unsigned long size, unsigned long align, const char *name) { @@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme) free_percpu(freeme); } -#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */ - -/* Number of blocks used and allocated. */ -static unsigned int pcpu_num_used, pcpu_num_allocated; -/* Size of each block. -ve means used. */ -static int *pcpu_size; - -static int split_block(unsigned int i, unsigned short size) -{ - /* Reallocation required? */ - if (pcpu_num_used + 1 > pcpu_num_allocated) { - int *new; - - new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2, - GFP_KERNEL); - if (!new) - return 0; - - pcpu_num_allocated *= 2; - pcpu_size = new; - } - - /* Insert a new subblock */ - memmove(&pcpu_size[i+1], &pcpu_size[i], - sizeof(pcpu_size[0]) * (pcpu_num_used - i)); - pcpu_num_used++; - - pcpu_size[i+1] -= size; - pcpu_size[i] = size; - return 1; -} - -static inline unsigned int block_size(int val) -{ - if (val < 0) - return -val; - return val; -} - -static void *percpu_modalloc(unsigned long size, unsigned long align, - const char *name) -{ - unsigned long extra; - unsigned int i; - void *ptr; - int cpu; - - if (align > PAGE_SIZE) { - printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", - name, align, PAGE_SIZE); - align = PAGE_SIZE; - } - - ptr = __per_cpu_start; - for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { - /* Extra for alignment requirement. */ - extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr; - BUG_ON(i == 0 && extra != 0); - - if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size) - continue; - - /* Transfer extra to previous block. */ - if (pcpu_size[i-1] < 0) - pcpu_size[i-1] -= extra; - else - pcpu_size[i-1] += extra; - pcpu_size[i] -= extra; - ptr += extra; - - /* Split block if warranted */ - if (pcpu_size[i] - size > sizeof(unsigned long)) - if (!split_block(i, size)) - return NULL; - - /* add the per-cpu scanning areas */ - for_each_possible_cpu(cpu) - kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0, - GFP_KERNEL); - - /* Mark allocated */ - pcpu_size[i] = -pcpu_size[i]; - return ptr; - } - - printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n", - size); - return NULL; -} - -static void percpu_modfree(void *freeme) -{ - unsigned int i; - void *ptr = __per_cpu_start + block_size(pcpu_size[0]); - int cpu; - - /* First entry is core kernel percpu data. */ - for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { - if (ptr == freeme) { - pcpu_size[i] = -pcpu_size[i]; - goto free; - } - } - BUG(); - - free: - /* remove the per-cpu scanning areas */ - for_each_possible_cpu(cpu) - kmemleak_free(freeme + per_cpu_offset(cpu)); - - /* Merge with previous? */ - if (pcpu_size[i-1] >= 0) { - pcpu_size[i-1] += pcpu_size[i]; - pcpu_num_used--; - memmove(&pcpu_size[i], &pcpu_size[i+1], - (pcpu_num_used - i) * sizeof(pcpu_size[0])); - i--; - } - /* Merge with next? */ - if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) { - pcpu_size[i] += pcpu_size[i+1]; - pcpu_num_used--; - memmove(&pcpu_size[i+1], &pcpu_size[i+2], - (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0])); - } -} - -static int percpu_modinit(void) -{ - pcpu_num_used = 2; - pcpu_num_allocated = 2; - pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated, - GFP_KERNEL); - /* Static in-kernel percpu data (used). */ - pcpu_size[0] = -(__per_cpu_end-__per_cpu_start); - /* Free room. */ - pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0]; - if (pcpu_size[1] < 0) { - printk(KERN_ERR "No per-cpu room for modules.\n"); - pcpu_num_used = 1; - } - - return 0; -} -__initcall(percpu_modinit); - -#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ - static unsigned int find_pcpusec(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const char *secstrings) diff --git a/mm/Makefile b/mm/Makefile index ebf849042ed3..82131d0f8d85 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o -ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA obj-$(CONFIG_SMP) += percpu.o -else -obj-$(CONFIG_SMP) += allocpercpu.o -endif obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c deleted file mode 100644 index df34ceae0c67..000000000000 --- a/mm/allocpercpu.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * linux/mm/allocpercpu.c - * - * Separated from slab.c August 11, 2006 Christoph Lameter - */ -#include -#include -#include -#include - -#ifndef cache_line_size -#define cache_line_size() L1_CACHE_BYTES -#endif - -/** - * percpu_depopulate - depopulate per-cpu data for given cpu - * @__pdata: per-cpu data to depopulate - * @cpu: depopulate per-cpu data for this cpu - * - * Depopulating per-cpu data for a cpu going offline would be a typical - * use case. You need to register a cpu hotplug handler for that purpose. - */ -static void percpu_depopulate(void *__pdata, int cpu) -{ - struct percpu_data *pdata = __percpu_disguise(__pdata); - - kfree(pdata->ptrs[cpu]); - pdata->ptrs[cpu] = NULL; -} - -/** - * percpu_depopulate_mask - depopulate per-cpu data for some cpu's - * @__pdata: per-cpu data to depopulate - * @mask: depopulate per-cpu data for cpu's selected through mask bits - */ -static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask) -{ - int cpu; - for_each_cpu_mask_nr(cpu, *mask) - percpu_depopulate(__pdata, cpu); -} - -#define percpu_depopulate_mask(__pdata, mask) \ - __percpu_depopulate_mask((__pdata), &(mask)) - -/** - * percpu_populate - populate per-cpu data for given cpu - * @__pdata: per-cpu data to populate further - * @size: size of per-cpu object - * @gfp: may sleep or not etc. - * @cpu: populate per-data for this cpu - * - * Populating per-cpu data for a cpu coming online would be a typical - * use case. You need to register a cpu hotplug handler for that purpose. - * Per-cpu object is populated with zeroed buffer. - */ -static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) -{ - struct percpu_data *pdata = __percpu_disguise(__pdata); - int node = cpu_to_node(cpu); - - /* - * We should make sure each CPU gets private memory. - */ - size = roundup(size, cache_line_size()); - - BUG_ON(pdata->ptrs[cpu]); - if (node_online(node)) - pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); - else - pdata->ptrs[cpu] = kzalloc(size, gfp); - return pdata->ptrs[cpu]; -} - -/** - * percpu_populate_mask - populate per-cpu data for more cpu's - * @__pdata: per-cpu data to populate further - * @size: size of per-cpu object - * @gfp: may sleep or not etc. - * @mask: populate per-cpu data for cpu's selected through mask bits - * - * Per-cpu objects are populated with zeroed buffers. - */ -static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask) -{ - cpumask_t populated; - int cpu; - - cpus_clear(populated); - for_each_cpu_mask_nr(cpu, *mask) - if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { - __percpu_depopulate_mask(__pdata, &populated); - return -ENOMEM; - } else - cpu_set(cpu, populated); - return 0; -} - -#define percpu_populate_mask(__pdata, size, gfp, mask) \ - __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) - -/** - * alloc_percpu - initial setup of per-cpu data - * @size: size of per-cpu object - * @align: alignment - * - * Allocate dynamic percpu area. Percpu objects are populated with - * zeroed buffers. - */ -void *__alloc_percpu(size_t size, size_t align) -{ - /* - * We allocate whole cache lines to avoid false sharing - */ - size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); - void *pdata = kzalloc(sz, GFP_KERNEL); - void *__pdata = __percpu_disguise(pdata); - - /* - * Can't easily make larger alignment work with kmalloc. WARN - * on it. Larger alignment should only be used for module - * percpu sections on SMP for which this path isn't used. - */ - WARN_ON_ONCE(align > SMP_CACHE_BYTES); - - if (unlikely(!pdata)) - return NULL; - if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL, - &cpu_possible_map))) - return __pdata; - kfree(pdata); - return NULL; -} -EXPORT_SYMBOL_GPL(__alloc_percpu); - -/** - * free_percpu - final cleanup of per-cpu data - * @__pdata: object to clean up - * - * We simply clean up any per-cpu object left. No need for the client to - * track and specify through a bis mask which per-cpu objects are to free. - */ -void free_percpu(void *__pdata) -{ - if (unlikely(!__pdata)) - return; - __percpu_depopulate_mask(__pdata, cpu_possible_mask); - kfree(__percpu_disguise(__pdata)); -} -EXPORT_SYMBOL_GPL(free_percpu); - -/* - * Generic percpu area setup. - */ -#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; - -EXPORT_SYMBOL(__per_cpu_offset); - -void __init setup_per_cpu_areas(void) -{ - unsigned long size, i; - char *ptr; - unsigned long nr_possible_cpus = num_possible_cpus(); - - /* Copy section for each CPU (we discard the original) */ - size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); - ptr = alloc_bootmem_pages(size * nr_possible_cpus); - - for_each_possible_cpu(i) { - __per_cpu_offset[i] = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - ptr += size; - } -} -#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ diff --git a/mm/percpu.c b/mm/percpu.c index 4a048abad043..e4e08b87b77e 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -46,8 +46,6 @@ * * To use this allocator, arch code should do the followings. * - * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA - * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be * different from the default -- cgit v1.2.3 From 7340a0b15280c9d902c7dd0608b8e751b5a7c403 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:22 +0900 Subject: this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations This patch introduces two things: First this_cpu_ptr and then per cpu atomic operations. this_cpu_ptr ------------ A common operation when dealing with cpu data is to get the instance of the cpu data associated with the currently executing processor. This can be optimized by this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id). The problem with per_cpu_ptr(x, smp_processor_id) is that it requires an array lookup to find the offset for the cpu. Processors typically have the offset for the current cpu area in some kind of (arch dependent) efficiently accessible register or memory location. We can use that instead of doing the array lookup to speed up the determination of the address of the percpu variable. This is particularly significant because these lookups occur in performance critical paths of the core kernel. this_cpu_ptr() can avoid memory accesses and this_cpu_ptr comes in two flavors. The preemption context matters since we are referring the the currently executing processor. In many cases we must insure that the processor does not change while a code segment is executed. __this_cpu_ptr -> Do not check for preemption context this_cpu_ptr -> Check preemption context The parameter to these operations is a per cpu pointer. This can be the address of a statically defined per cpu variable (&per_cpu_var(xxx)) or the address of a per cpu variable allocated with the per cpu allocator. per cpu atomic operations: this_cpu_*(var, val) ----------------------------------------------- this_cpu_* operations (like this_cpu_add(struct->y, value) operate on abitrary scalars that are members of structures allocated with the new per cpu allocator. They can also operate on static per_cpu variables if they are passed to per_cpu_var() (See patch to use this_cpu_* operations for vm statistics). These operations are guaranteed to be atomic vs preemption when modifying the scalar. The calculation of the per cpu offset is also guaranteed to be atomic at the same time. This means that a this_cpu_* operation can be safely used to modify a per cpu variable in a context where interrupts are enabled and preemption is allowed. Many architectures can perform such a per cpu atomic operation with a single instruction. Note that the atomicity here is different from regular atomic operations. Atomicity is only guaranteed for data accessed from the currently executing processor. Modifications from other processors are still possible. There must be other guarantees that the per cpu data is not modified from another processor when using these instruction. The per cpu atomicity is created by the fact that the processor either executes and instruction or not. Embedded in the instruction is the relocation of the per cpu address to the are reserved for the current processor and the RMW action. Therefore interrupts or preemption cannot occur in the mids of this processing. Generic fallback functions are used if an arch does not define optimized this_cpu operations. The functions come also come in the two flavors used for this_cpu_ptr(). The firstparameter is a scalar that is a member of a structure allocated through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The operations are similar to what percpu_add() and friends do. this_cpu_read(scalar) this_cpu_write(scalar, value) this_cpu_add(scale, value) this_cpu_sub(scalar, value) this_cpu_inc(scalar) this_cpu_dec(scalar) this_cpu_and(scalar, value) this_cpu_or(scalar, value) this_cpu_xor(scalar, value) Arch code can override the generic functions and provide optimized atomic per cpu operations. These atomic operations must provide both the relocation (x86 does it through a segment override) and the operation on the data in a single instruction. Otherwise preempt needs to be disabled and there is no gain from providing arch implementations. A third variant is provided prefixed by irqsafe_. These variants are safe against hardware interrupts on the *same* processor (all per cpu atomic primitives are *always* *only* providing safety for code running on the *same* processor!). The increment needs to be implemented by the hardware in such a way that it is a single RMW instruction that is either processed before or after an interrupt. cc: David Howells cc: Ingo Molnar cc: Rusty Russell cc: Eric Dumazet Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/asm-generic/percpu.h | 5 + include/linux/percpu.h | 400 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 405 insertions(+) (limited to 'include') diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 90079c373f1c..8087b90d4673 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define __raw_get_cpu_var(var) \ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) +#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) + #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); @@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void); #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) #define __get_cpu_var(var) per_cpu_var(var) #define __raw_get_cpu_var(var) per_cpu_var(var) +#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5baf5b8788fb..3d9ba92b104f 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -219,4 +219,404 @@ do { \ # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) #endif +/* + * Branching function to split up a function into a set of functions that + * are called for different scalar sizes of the objects handled. + */ + +extern void __bad_size_call_parameter(void); + +#define __size_call_return(stem, variable) \ +({ typeof(variable) ret__; \ + switch(sizeof(variable)) { \ + case 1: ret__ = stem##1(variable);break; \ + case 2: ret__ = stem##2(variable);break; \ + case 4: ret__ = stem##4(variable);break; \ + case 8: ret__ = stem##8(variable);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ + ret__; \ +}) + +#define __size_call(stem, variable, ...) \ +do { \ + switch(sizeof(variable)) { \ + case 1: stem##1(variable, __VA_ARGS__);break; \ + case 2: stem##2(variable, __VA_ARGS__);break; \ + case 4: stem##4(variable, __VA_ARGS__);break; \ + case 8: stem##8(variable, __VA_ARGS__);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ +} while (0) + +/* + * Optimized manipulation for memory allocated through the per cpu + * allocator or for addresses of per cpu variables (can be determined + * using per_cpu_var(xx). + * + * These operation guarantee exclusivity of access for other operations + * on the *same* processor. The assumption is that per cpu data is only + * accessed by a single processor instance (the current one). + * + * The first group is used for accesses that must be done in a + * preemption safe way since we know that the context is not preempt + * safe. Interrupts may occur. If the interrupt modifies the variable + * too then RMW actions will not be reliable. + * + * The arch code can provide optimized functions in two ways: + * + * 1. Override the function completely. F.e. define this_cpu_add(). + * The arch must then ensure that the various scalar format passed + * are handled correctly. + * + * 2. Provide functions for certain scalar sizes. F.e. provide + * this_cpu_add_2() to provide per cpu atomic operations for 2 byte + * sized RMW actions. If arch code does not provide operations for + * a scalar size then the fallback in the generic code will be + * used. + */ + +#define _this_cpu_generic_read(pcp) \ +({ typeof(pcp) ret__; \ + preempt_disable(); \ + ret__ = *this_cpu_ptr(&(pcp)); \ + preempt_enable(); \ + ret__; \ +}) + +#ifndef this_cpu_read +# ifndef this_cpu_read_1 +# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_2 +# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_4 +# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_8 +# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) +# endif +# define this_cpu_read(pcp) __size_call_return(this_cpu_read_, (pcp)) +#endif + +#define _this_cpu_generic_to_op(pcp, val, op) \ +do { \ + preempt_disable(); \ + *__this_cpu_ptr(&pcp) op val; \ + preempt_enable(); \ +} while (0) + +#ifndef this_cpu_write +# ifndef this_cpu_write_1 +# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_2 +# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_4 +# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_8 +# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# define this_cpu_write(pcp, val) __size_call(this_cpu_write_, (pcp), (val)) +#endif + +#ifndef this_cpu_add +# ifndef this_cpu_add_1 +# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_2 +# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_4 +# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_8 +# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# define this_cpu_add(pcp, val) __size_call(this_cpu_add_, (pcp), (val)) +#endif + +#ifndef this_cpu_sub +# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) +#endif + +#ifndef this_cpu_inc +# define this_cpu_inc(pcp) this_cpu_add((pcp), 1) +#endif + +#ifndef this_cpu_dec +# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) +#endif + +#ifndef this_cpu_and +# ifndef this_cpu_and_1 +# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_2 +# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_4 +# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_8 +# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# define this_cpu_and(pcp, val) __size_call(this_cpu_and_, (pcp), (val)) +#endif + +#ifndef this_cpu_or +# ifndef this_cpu_or_1 +# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_2 +# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_4 +# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_8 +# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# define this_cpu_or(pcp, val) __size_call(this_cpu_or_, (pcp), (val)) +#endif + +#ifndef this_cpu_xor +# ifndef this_cpu_xor_1 +# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_2 +# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_4 +# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_8 +# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define this_cpu_xor(pcp, val) __size_call(this_cpu_or_, (pcp), (val)) +#endif + +/* + * Generic percpu operations that do not require preemption handling. + * Either we do not care about races or the caller has the + * responsibility of handling preemptions issues. Arch code can still + * override these instructions since the arch per cpu code may be more + * efficient and may actually get race freeness for free (that is the + * case for x86 for example). + * + * If there is no other protection through preempt disable and/or + * disabling interupts then one of these RMW operations can show unexpected + * behavior because the execution thread was rescheduled on another processor + * or an interrupt occurred and the same percpu variable was modified from + * the interrupt context. + */ +#ifndef __this_cpu_read +# ifndef __this_cpu_read_1 +# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_2 +# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_4 +# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_8 +# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# define __this_cpu_read(pcp) __size_call_return(__this_cpu_read_, (pcp)) +#endif + +#define __this_cpu_generic_to_op(pcp, val, op) \ +do { \ + *__this_cpu_ptr(&(pcp)) op val; \ +} while (0) + +#ifndef __this_cpu_write +# ifndef __this_cpu_write_1 +# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_2 +# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_4 +# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_8 +# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# define __this_cpu_write(pcp, val) __size_call(__this_cpu_write_, (pcp), (val)) +#endif + +#ifndef __this_cpu_add +# ifndef __this_cpu_add_1 +# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_2 +# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_4 +# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_8 +# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# define __this_cpu_add(pcp, val) __size_call(__this_cpu_add_, (pcp), (val)) +#endif + +#ifndef __this_cpu_sub +# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) +#endif + +#ifndef __this_cpu_inc +# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) +#endif + +#ifndef __this_cpu_dec +# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) +#endif + +#ifndef __this_cpu_and +# ifndef __this_cpu_and_1 +# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_2 +# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_4 +# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_8 +# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# define __this_cpu_and(pcp, val) __size_call(__this_cpu_and_, (pcp), (val)) +#endif + +#ifndef __this_cpu_or +# ifndef __this_cpu_or_1 +# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_2 +# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_4 +# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_8 +# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# define __this_cpu_or(pcp, val) __size_call(__this_cpu_or_, (pcp), (val)) +#endif + +#ifndef __this_cpu_xor +# ifndef __this_cpu_xor_1 +# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_2 +# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_4 +# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_8 +# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define __this_cpu_xor(pcp, val) __size_call(__this_cpu_xor_, (pcp), (val)) +#endif + +/* + * IRQ safe versions of the per cpu RMW operations. Note that these operations + * are *not* safe against modification of the same variable from another + * processors (which one gets when using regular atomic operations) + . They are guaranteed to be atomic vs. local interrupts and + * preemption only. + */ +#define irqsafe_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long flags; \ + local_irq_save(flags); \ + *__this_cpu_ptr(&(pcp)) op val; \ + local_irq_restore(flags); \ +} while (0) + +#ifndef irqsafe_cpu_add +# ifndef irqsafe_cpu_add_1 +# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef irqsafe_cpu_add_2 +# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef irqsafe_cpu_add_4 +# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef irqsafe_cpu_add_8 +# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# define irqsafe_cpu_add(pcp, val) __size_call(irqsafe_cpu_add_, (pcp), (val)) +#endif + +#ifndef irqsafe_cpu_sub +# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) +#endif + +#ifndef irqsafe_cpu_inc +# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) +#endif + +#ifndef irqsafe_cpu_dec +# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) +#endif + +#ifndef irqsafe_cpu_and +# ifndef irqsafe_cpu_and_1 +# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef irqsafe_cpu_and_2 +# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef irqsafe_cpu_and_4 +# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef irqsafe_cpu_and_8 +# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# define irqsafe_cpu_and(pcp, val) __size_call(irqsafe_cpu_and_, (val)) +#endif + +#ifndef irqsafe_cpu_or +# ifndef irqsafe_cpu_or_1 +# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef irqsafe_cpu_or_2 +# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef irqsafe_cpu_or_4 +# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef irqsafe_cpu_or_8 +# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# define irqsafe_cpu_or(pcp, val) __size_call(irqsafe_cpu_or_, (val)) +#endif + +#ifndef irqsafe_cpu_xor +# ifndef irqsafe_cpu_xor_1 +# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef irqsafe_cpu_xor_2 +# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef irqsafe_cpu_xor_4 +# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef irqsafe_cpu_xor_8 +# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define irqsafe_cpu_xor(pcp, val) __size_call(irqsafe_cpu_xor_, (val)) +#endif + #endif /* __LINUX_PERCPU_H */ -- cgit v1.2.3 From 4eb41d10c7ab419a1408bed2e63a9c0fdfa38844 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:22 +0900 Subject: this_cpu: Use this_cpu operations for SNMP statistics SNMP statistic macros can be signficantly simplified. This will also reduce code size if the arch supports these operations in hardware. Acked-by: Tejun Heo Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/net/snmp.h | 50 ++++++++++++++++++-------------------------------- 1 file changed, 18 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/net/snmp.h b/include/net/snmp.h index 8c842e06bec8..f0d756f2ac99 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -136,45 +136,31 @@ struct linux_xfrm_mib { #define SNMP_STAT_BHPTR(name) (name[0]) #define SNMP_STAT_USRPTR(name) (name[1]) -#define SNMP_INC_STATS_BH(mib, field) \ - (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) -#define SNMP_INC_STATS_USER(mib, field) \ - do { \ - per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \ - put_cpu(); \ - } while (0) -#define SNMP_INC_STATS(mib, field) \ - do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \ - put_cpu(); \ - } while (0) -#define SNMP_DEC_STATS(mib, field) \ - do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \ - put_cpu(); \ - } while (0) -#define SNMP_ADD_STATS(mib, field, addend) \ - do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \ - put_cpu(); \ - } while (0) -#define SNMP_ADD_STATS_BH(mib, field, addend) \ - (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend) -#define SNMP_ADD_STATS_USER(mib, field, addend) \ - do { \ - per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \ - put_cpu(); \ - } while (0) +#define SNMP_INC_STATS_BH(mib, field) \ + __this_cpu_inc(mib[0]->mibs[field]) +#define SNMP_INC_STATS_USER(mib, field) \ + this_cpu_inc(mib[1]->mibs[field]) +#define SNMP_INC_STATS(mib, field) \ + this_cpu_inc(mib[!in_softirq()]->mibs[field]) +#define SNMP_DEC_STATS(mib, field) \ + this_cpu_dec(mib[!in_softirq()]->mibs[field]) +#define SNMP_ADD_STATS_BH(mib, field, addend) \ + __this_cpu_add(mib[0]->mibs[field], addend) +#define SNMP_ADD_STATS_USER(mib, field, addend) \ + this_cpu_add(mib[1]->mibs[field], addend) #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ - __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\ + __typeof__(mib[0]) ptr; \ + preempt_disable(); \ + ptr = this_cpu_ptr((mib)[!in_softirq()]); \ ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend;\ - put_cpu(); \ + preempt_enable(); \ } while (0) #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ do { \ - __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\ + __typeof__(mib[0]) ptr = \ + __this_cpu_ptr((mib)[!in_softirq()]); \ ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend;\ } while (0) -- cgit v1.2.3 From 4ea7334b6de818b0123fa4be32af4cb8ac65174c Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:22 +0900 Subject: this_cpu: Use this_cpu ops for network statistics Acked-by: Tejun Heo Acked-by: David Miller Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/net/neighbour.h | 7 +------ include/net/netfilter/nf_conntrack.h | 4 ++-- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 3817fda82a80..f28403ff7648 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -90,12 +90,7 @@ struct neigh_statistics unsigned long unres_discards; /* number of unresolved drops */ }; -#define NEIGH_CACHE_STAT_INC(tbl, field) \ - do { \ - preempt_disable(); \ - (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \ - preempt_enable(); \ - } while (0) +#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) struct neighbour { diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index cbdd6284996d..dde549779e42 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -295,11 +295,11 @@ extern unsigned int nf_conntrack_htable_size; extern unsigned int nf_conntrack_max; #define NF_CT_STAT_INC(net, count) \ - (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++) + __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) \ do { \ local_bh_disable(); \ - per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \ + __this_cpu_inc((net)->ct.stat->count); \ local_bh_enable(); \ } while (0) -- cgit v1.2.3 From 4dac3e98840f11bb2d8d52fd375150c7c1912117 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:23 +0900 Subject: this_cpu: Use this_cpu ops for VM statistics Using per cpu atomics for the vm statistics reduces their overhead. And in the case of x86 we are guaranteed that they will never race even in the lax form used for vm statistics. Acked-by: Tejun Heo Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/linux/vmstat.h | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 2d0f222388a8..d85889710f9b 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -76,24 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); static inline void __count_vm_event(enum vm_event_item item) { - __get_cpu_var(vm_event_states).event[item]++; + __this_cpu_inc(per_cpu_var(vm_event_states).event[item]); } static inline void count_vm_event(enum vm_event_item item) { - get_cpu_var(vm_event_states).event[item]++; - put_cpu(); + this_cpu_inc(per_cpu_var(vm_event_states).event[item]); } static inline void __count_vm_events(enum vm_event_item item, long delta) { - __get_cpu_var(vm_event_states).event[item] += delta; + __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); } static inline void count_vm_events(enum vm_event_item item, long delta) { - get_cpu_var(vm_event_states).event[item] += delta; - put_cpu(); + this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); } extern void all_vm_events(unsigned long *); -- cgit v1.2.3 From ee34b32d8c2950f66038c8975747ef9aec855289 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 2 Oct 2009 11:01:21 -0700 Subject: dmar: support for parsing Remapping Hardware Static Affinity structure Add support for parsing Remapping Hardware Static Affinity (RHSA) structure. This enables identifying the association between remapping hardware units and the corresponding proximity domain. This enables to allocate transalation structures closer to the remapping hardware unit. Signed-off-by: Suresh Siddha Signed-off-by: David Woodhouse --- drivers/pci/dmar.c | 24 +++++++++++++++++++++++- include/linux/intel-iommu.h | 1 + 2 files changed, 24 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 22b02c6df854..577956566a0b 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -348,6 +348,26 @@ found: } #endif +static int __init +dmar_parse_one_rhsa(struct acpi_dmar_header *header) +{ + struct acpi_dmar_rhsa *rhsa; + struct dmar_drhd_unit *drhd; + + rhsa = (struct acpi_dmar_rhsa *)header; + for_each_drhd_unit(drhd) + if (drhd->reg_base_addr == rhsa->base_address) { + int node = acpi_map_pxm_to_node(rhsa->proximity_domain); + + if (!node_online(node)) + node = -1; + drhd->iommu->node = node; + return 0; + } + + return -ENODEV; +} + static void __init dmar_table_print_dmar_entry(struct acpi_dmar_header *header) { @@ -467,7 +487,7 @@ parse_dmar_table(void) #endif break; case ACPI_DMAR_HARDWARE_AFFINITY: - /* We don't do anything with RHSA (yet?) */ + ret = dmar_parse_one_rhsa(entry_header); break; default: printk(KERN_WARNING PREFIX @@ -677,6 +697,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->agaw = agaw; iommu->msagaw = msagaw; + iommu->node = -1; + /* the registers might be more than one page */ map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), cap_max_fault_reg_offset(iommu->cap)); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4f0a72a9740c..9310c699a37d 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -332,6 +332,7 @@ struct intel_iommu { #ifdef CONFIG_INTR_REMAP struct ir_table *ir_table; /* Interrupt remapping info */ #endif + int node; }; static inline void __iommu_flush_cache( -- cgit v1.2.3 From c182be37ed7cb04c344501b88b8fdb747016e6cf Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Sat, 12 Sep 2009 04:33:34 +1000 Subject: drm: Add async event synchronization for drmWaitVblank MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a new flag to the drmWaitVblank ioctl, which asks the drm to return immediately and notify userspace when the specified vblank sequence happens by sending an event back on the drm fd. The event mechanism works with the other flags supported by the ioctls, specifically, the vblank sequence can be specified relatively or absolutely, and works for primary and seconday crtc. The signal field of the vblank request is used to provide user data, which will be sent back to user space in the vblank event. Signed-off-by: Kristian Høgsberg Reviewed-by: Jesse Barnes Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 98 ++++++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/drm_irq.c | 95 +++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_stub.c | 2 + drivers/gpu/drm/i915/i915_drv.c | 1 + include/drm/drm.h | 33 +++++++++++++- include/drm/drmP.h | 26 +++++++++++ 6 files changed, 251 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 251bc0e3b5ec..8ac7fbf6b2b7 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, INIT_LIST_HEAD(&priv->lhead); INIT_LIST_HEAD(&priv->fbs); + INIT_LIST_HEAD(&priv->event_list); + init_waitqueue_head(&priv->event_wait); + priv->event_space = 4096; /* set aside 4k for event buffer */ if (dev->driver->driver_features & DRIVER_GEM) drm_gem_open(dev, priv); @@ -413,6 +416,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) } } +static void drm_events_release(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_pending_event *e, *et; + struct drm_pending_vblank_event *v, *vt; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + + /* Remove pending flips */ + list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link) + if (v->base.file_priv == file_priv) { + list_del(&v->base.link); + drm_vblank_put(dev, v->pipe); + v->base.destroy(&v->base); + } + + /* Remove unconsumed events */ + list_for_each_entry_safe(e, et, &file_priv->event_list, link) + e->destroy(e); + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + /** * Release file. * @@ -451,6 +478,8 @@ int drm_release(struct inode *inode, struct file *filp) if (file_priv->minor->master) drm_master_release(dev, filp); + drm_events_release(file_priv); + if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); @@ -544,9 +573,74 @@ int drm_release(struct inode *inode, struct file *filp) } EXPORT_SYMBOL(drm_release); -/** No-op. */ +static bool +drm_dequeue_event(struct drm_file *file_priv, + size_t total, size_t max, struct drm_pending_event **out) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_pending_event *e; + unsigned long flags; + bool ret = false; + + spin_lock_irqsave(&dev->event_lock, flags); + + *out = NULL; + if (list_empty(&file_priv->event_list)) + goto out; + e = list_first_entry(&file_priv->event_list, + struct drm_pending_event, link); + if (e->event->length + total > max) + goto out; + + file_priv->event_space += e->event->length; + list_del(&e->link); + *out = e; + ret = true; + +out: + spin_unlock_irqrestore(&dev->event_lock, flags); + return ret; +} + +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_pending_event *e; + size_t total; + ssize_t ret; + + ret = wait_event_interruptible(file_priv->event_wait, + !list_empty(&file_priv->event_list)); + if (ret < 0) + return ret; + + total = 0; + while (drm_dequeue_event(file_priv, total, count, &e)) { + if (copy_to_user(buffer + total, + e->event, e->event->length)) { + total = -EFAULT; + break; + } + + total += e->event->length; + e->destroy(e); + } + + return total; +} +EXPORT_SYMBOL(drm_read); + unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) { - return 0; + struct drm_file *file_priv = filp->private_data; + unsigned int mask = 0; + + poll_wait(filp, &file_priv->event_wait, wait); + + if (!list_empty(&file_priv->event_list)) + mask |= POLLIN | POLLRDNORM; + + return mask; } EXPORT_SYMBOL(drm_poll); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f85aaf21e783..d9af7964f81c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -523,6 +523,62 @@ out: return ret; } +static int drm_queue_vblank_event(struct drm_device *dev, int pipe, + union drm_wait_vblank *vblwait, + struct drm_file *file_priv) +{ + struct drm_pending_vblank_event *e; + struct timeval now; + unsigned long flags; + unsigned int seq; + + e = kzalloc(sizeof *e, GFP_KERNEL); + if (e == NULL) + return -ENOMEM; + + e->pipe = pipe; + e->event.base.type = DRM_EVENT_VBLANK; + e->event.base.length = sizeof e->event; + e->event.user_data = vblwait->request.signal; + e->base.event = &e->event.base; + e->base.file_priv = file_priv; + e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; + + do_gettimeofday(&now); + spin_lock_irqsave(&dev->event_lock, flags); + + if (file_priv->event_space < sizeof e->event) { + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(e); + return -ENOMEM; + } + + file_priv->event_space -= sizeof e->event; + seq = drm_vblank_count(dev, pipe); + if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && + (seq - vblwait->request.sequence) <= (1 << 23)) { + vblwait->request.sequence = seq + 1; + } + + DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", + vblwait->request.sequence, seq, pipe); + + e->event.sequence = vblwait->request.sequence; + if ((seq - vblwait->request.sequence) <= (1 << 23)) { + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + drm_vblank_put(dev, e->pipe); + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } else { + list_add_tail(&e->base.link, &dev->vblank_event_list); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); + + return 0; +} + /** * Wait for VBLANK. * @@ -582,6 +638,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, goto done; } + if (flags & _DRM_VBLANK_EVENT) + return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); + if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { vblwait->request.sequence = seq + 1; @@ -614,6 +673,38 @@ done: return ret; } +void drm_handle_vblank_events(struct drm_device *dev, int crtc) +{ + struct drm_pending_vblank_event *e, *t; + struct timeval now; + unsigned long flags; + unsigned int seq; + + do_gettimeofday(&now); + seq = drm_vblank_count(dev, crtc); + + spin_lock_irqsave(&dev->event_lock, flags); + + list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { + if (e->pipe != crtc) + continue; + if ((seq - e->event.sequence) > (1<<23)) + continue; + + DRM_DEBUG("vblank event on %d, current %d\n", + e->event.sequence, seq); + + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + drm_vblank_put(dev, e->pipe); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + /** * drm_handle_vblank - handle a vblank event * @dev: DRM device @@ -624,7 +715,11 @@ done: */ void drm_handle_vblank(struct drm_device *dev, int crtc) { + if (!dev->num_crtcs) + return; + atomic_inc(&dev->_vblank_count[crtc]); DRM_WAKEUP(&dev->vbl_queue[crtc]); + drm_handle_vblank_events(dev, crtc); } EXPORT_SYMBOL(drm_handle_vblank); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 55bb8a82d612..adb864dfef3e 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -220,9 +220,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->vmalist); INIT_LIST_HEAD(&dev->maplist); + INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->count_lock); spin_lock_init(&dev->drw_lock); + spin_lock_init(&dev->event_lock); init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index dbe568c9327b..b81305e33c79 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -206,6 +206,7 @@ static struct drm_driver driver = { .mmap = drm_gem_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = i915_compat_ioctl, #endif diff --git a/include/drm/drm.h b/include/drm/drm.h index 7cb50bdde46d..fa6d9155873d 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -454,6 +454,7 @@ struct drm_irq_busid { enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ + _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ @@ -461,8 +462,8 @@ enum drm_vblank_seq_type { }; #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) -#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ - _DRM_VBLANK_NEXTONMISS) +#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ + _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) struct drm_wait_vblank_request { enum drm_vblank_seq_type type; @@ -698,6 +699,34 @@ struct drm_gem_open { #define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_END 0xA0 +/** + * Header for events written back to userspace on the drm fd. The + * type defines the type of event, the length specifies the total + * length of the event (including the header), and user_data is + * typically a 64 bit value passed with the ioctl that triggered the + * event. A read on the drm fd will always only return complete + * events, that is, if for example the read buffer is 100 bytes, and + * there are two 64 byte events pending, only one will be returned. + * + * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and + * up are chipset specific. + */ +struct drm_event { + __u32 type; + __u32 length; +}; + +#define DRM_EVENT_VBLANK 0x01 + +struct drm_event_vblank { + struct drm_event base; + __u64 user_data; + __u32 tv_sec; + __u32 tv_usec; + __u32 sequence; + __u32 reserved; +}; + /* typedef area */ #ifndef __KERNEL__ typedef struct drm_clip_rect drm_clip_rect_t; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index eeefb6369e19..fe52254df60c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -426,6 +426,14 @@ struct drm_buf_entry { struct drm_freelist freelist; }; +/* Event queued up for userspace to read */ +struct drm_pending_event { + struct drm_event *event; + struct list_head link; + struct drm_file *file_priv; + void (*destroy)(struct drm_pending_event *event); +}; + /** File private data */ struct drm_file { int authenticated; @@ -449,6 +457,10 @@ struct drm_file { struct drm_master *master; /* master this node is currently associated with N.B. not always minor->master */ struct list_head fbs; + + wait_queue_head_t event_wait; + struct list_head event_list; + int event_space; }; /** Wait queue */ @@ -897,6 +909,12 @@ struct drm_minor { struct drm_mode_group mode_group; }; +struct drm_pending_vblank_event { + struct drm_pending_event base; + int pipe; + struct drm_event_vblank event; +}; + /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. @@ -996,6 +1014,12 @@ struct drm_device { u32 max_vblank_count; /**< size of vblank counter register */ + /** + * List of events + */ + struct list_head vblank_event_list; + spinlock_t event_lock; + /*@} */ cycles_t ctx_start; cycles_t lck_start; @@ -1132,6 +1156,8 @@ extern int drm_lastclose(struct drm_device *dev); extern int drm_open(struct inode *inode, struct file *filp); extern int drm_stub_open(struct inode *inode, struct file *filp); extern int drm_fasync(int fd, struct file *filp, int on); +extern ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); extern int drm_release(struct inode *inode, struct file *filp); /* Mapping support (drm_vm.h) */ -- cgit v1.2.3 From dc7a08166f3a5f23e79e839a8a88849bd3397c32 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 27 Oct 2009 14:41:35 -0400 Subject: nfs: new subdir Documentation/filesystems/nfs We're adding enough nfs documentation that it may as well have its own subdirectory. Acked-by: Randy Dunlap Signed-off-by: J. Bruce Fields --- Documentation/filesystems/00-INDEX | 10 +- Documentation/filesystems/Exporting | 147 -------------- Documentation/filesystems/nfs-rdma.txt | 271 ------------------------- Documentation/filesystems/nfs.txt | 98 --------- Documentation/filesystems/nfs/00-INDEX | 12 ++ Documentation/filesystems/nfs/Exporting | 147 ++++++++++++++ Documentation/filesystems/nfs/nfs-rdma.txt | 271 +++++++++++++++++++++++++ Documentation/filesystems/nfs/nfs.txt | 98 +++++++++ Documentation/filesystems/nfs/nfs41-server.txt | 222 ++++++++++++++++++++ Documentation/filesystems/nfs/nfsroot.txt | 270 ++++++++++++++++++++++++ Documentation/filesystems/nfs41-server.txt | 222 -------------------- Documentation/filesystems/nfsroot.txt | 270 ------------------------ Documentation/filesystems/porting | 2 +- Documentation/kernel-parameters.txt | 6 +- fs/cifs/export.c | 2 +- fs/exportfs/expfs.c | 2 +- fs/isofs/export.c | 2 +- fs/nfs/Kconfig | 2 +- include/linux/exportfs.h | 2 +- net/ipv4/Kconfig | 6 +- net/ipv4/ipconfig.c | 2 +- 21 files changed, 1035 insertions(+), 1029 deletions(-) delete mode 100644 Documentation/filesystems/Exporting delete mode 100644 Documentation/filesystems/nfs-rdma.txt delete mode 100644 Documentation/filesystems/nfs.txt create mode 100644 Documentation/filesystems/nfs/00-INDEX create mode 100644 Documentation/filesystems/nfs/Exporting create mode 100644 Documentation/filesystems/nfs/nfs-rdma.txt create mode 100644 Documentation/filesystems/nfs/nfs.txt create mode 100644 Documentation/filesystems/nfs/nfs41-server.txt create mode 100644 Documentation/filesystems/nfs/nfsroot.txt delete mode 100644 Documentation/filesystems/nfs41-server.txt delete mode 100644 Documentation/filesystems/nfsroot.txt (limited to 'include') diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX index f15621ee5599..482151c883a5 100644 --- a/Documentation/filesystems/00-INDEX +++ b/Documentation/filesystems/00-INDEX @@ -1,7 +1,5 @@ 00-INDEX - this file (info on some of the filesystems supported by linux). -Exporting - - explanation of how to make filesystems exportable. Locking - info on locking rules as they pertain to Linux VFS. 9p.txt @@ -66,12 +64,8 @@ mandatory-locking.txt - info on the Linux implementation of Sys V mandatory file locking. ncpfs.txt - info on Novell Netware(tm) filesystem using NCP protocol. -nfs41-server.txt - - info on the Linux server implementation of NFSv4 minor version 1. -nfs-rdma.txt - - how to install and setup the Linux NFS/RDMA client and server software. -nfsroot.txt - - short guide on setting up a diskless box with NFS root filesystem. +nfs/ + - nfs-related documentation. nilfs2.txt - info and mount options for the NILFS2 filesystem. ntfs.txt diff --git a/Documentation/filesystems/Exporting b/Documentation/filesystems/Exporting deleted file mode 100644 index 87019d2b5981..000000000000 --- a/Documentation/filesystems/Exporting +++ /dev/null @@ -1,147 +0,0 @@ - -Making Filesystems Exportable -============================= - -Overview --------- - -All filesystem operations require a dentry (or two) as a starting -point. Local applications have a reference-counted hold on suitable -dentries via open file descriptors or cwd/root. However remote -applications that access a filesystem via a remote filesystem protocol -such as NFS may not be able to hold such a reference, and so need a -different way to refer to a particular dentry. As the alternative -form of reference needs to be stable across renames, truncates, and -server-reboot (among other things, though these tend to be the most -problematic), there is no simple answer like 'filename'. - -The mechanism discussed here allows each filesystem implementation to -specify how to generate an opaque (outside of the filesystem) byte -string for any dentry, and how to find an appropriate dentry for any -given opaque byte string. -This byte string will be called a "filehandle fragment" as it -corresponds to part of an NFS filehandle. - -A filesystem which supports the mapping between filehandle fragments -and dentries will be termed "exportable". - - - -Dcache Issues -------------- - -The dcache normally contains a proper prefix of any given filesystem -tree. This means that if any filesystem object is in the dcache, then -all of the ancestors of that filesystem object are also in the dcache. -As normal access is by filename this prefix is created naturally and -maintained easily (by each object maintaining a reference count on -its parent). - -However when objects are included into the dcache by interpreting a -filehandle fragment, there is no automatic creation of a path prefix -for the object. This leads to two related but distinct features of -the dcache that are not needed for normal filesystem access. - -1/ The dcache must sometimes contain objects that are not part of the - proper prefix. i.e that are not connected to the root. -2/ The dcache must be prepared for a newly found (via ->lookup) directory - to already have a (non-connected) dentry, and must be able to move - that dentry into place (based on the parent and name in the - ->lookup). This is particularly needed for directories as - it is a dcache invariant that directories only have one dentry. - -To implement these features, the dcache has: - -a/ A dentry flag DCACHE_DISCONNECTED which is set on - any dentry that might not be part of the proper prefix. - This is set when anonymous dentries are created, and cleared when a - dentry is noticed to be a child of a dentry which is in the proper - prefix. - -b/ A per-superblock list "s_anon" of dentries which are the roots of - subtrees that are not in the proper prefix. These dentries, as - well as the proper prefix, need to be released at unmount time. As - these dentries will not be hashed, they are linked together on the - d_hash list_head. - -c/ Helper routines to allocate anonymous dentries, and to help attach - loose directory dentries at lookup time. They are: - d_alloc_anon(inode) will return a dentry for the given inode. - If the inode already has a dentry, one of those is returned. - If it doesn't, a new anonymous (IS_ROOT and - DCACHE_DISCONNECTED) dentry is allocated and attached. - In the case of a directory, care is taken that only one dentry - can ever be attached. - d_splice_alias(inode, dentry) will make sure that there is a - dentry with the same name and parent as the given dentry, and - which refers to the given inode. - If the inode is a directory and already has a dentry, then that - dentry is d_moved over the given dentry. - If the passed dentry gets attached, care is taken that this is - mutually exclusive to a d_alloc_anon operation. - If the passed dentry is used, NULL is returned, else the used - dentry is returned. This corresponds to the calling pattern of - ->lookup. - - -Filesystem Issues ------------------ - -For a filesystem to be exportable it must: - - 1/ provide the filehandle fragment routines described below. - 2/ make sure that d_splice_alias is used rather than d_add - when ->lookup finds an inode for a given parent and name. - Typically the ->lookup routine will end with a: - - return d_splice_alias(inode, dentry); - } - - - - A file system implementation declares that instances of the filesystem -are exportable by setting the s_export_op field in the struct -super_block. This field must point to a "struct export_operations" -struct which has the following members: - - encode_fh (optional) - Takes a dentry and creates a filehandle fragment which can later be used - to find or create a dentry for the same object. The default - implementation creates a filehandle fragment that encodes a 32bit inode - and generation number for the inode encoded, and if necessary the - same information for the parent. - - fh_to_dentry (mandatory) - Given a filehandle fragment, this should find the implied object and - create a dentry for it (possibly with d_alloc_anon). - - fh_to_parent (optional but strongly recommended) - Given a filehandle fragment, this should find the parent of the - implied object and create a dentry for it (possibly with d_alloc_anon). - May fail if the filehandle fragment is too small. - - get_parent (optional but strongly recommended) - When given a dentry for a directory, this should return a dentry for - the parent. Quite possibly the parent dentry will have been allocated - by d_alloc_anon. The default get_parent function just returns an error - so any filehandle lookup that requires finding a parent will fail. - ->lookup("..") is *not* used as a default as it can leave ".." entries - in the dcache which are too messy to work with. - - get_name (optional) - When given a parent dentry and a child dentry, this should find a name - in the directory identified by the parent dentry, which leads to the - object identified by the child dentry. If no get_name function is - supplied, a default implementation is provided which uses vfs_readdir - to find potential names, and matches inode numbers to find the correct - match. - - -A filehandle fragment consists of an array of 1 or more 4byte words, -together with a one byte "type". -The decode_fh routine should not depend on the stated size that is -passed to it. This size may be larger than the original filehandle -generated by encode_fh, in which case it will have been padded with -nuls. Rather, the encode_fh routine should choose a "type" which -indicates the decode_fh how much of the filehandle is valid, and how -it should be interpreted. diff --git a/Documentation/filesystems/nfs-rdma.txt b/Documentation/filesystems/nfs-rdma.txt deleted file mode 100644 index e386f7e4bcee..000000000000 --- a/Documentation/filesystems/nfs-rdma.txt +++ /dev/null @@ -1,271 +0,0 @@ -################################################################################ -# # -# NFS/RDMA README # -# # -################################################################################ - - Author: NetApp and Open Grid Computing - Date: May 29, 2008 - -Table of Contents -~~~~~~~~~~~~~~~~~ - - Overview - - Getting Help - - Installation - - Check RDMA and NFS Setup - - NFS/RDMA Setup - -Overview -~~~~~~~~ - - This document describes how to install and setup the Linux NFS/RDMA client - and server software. - - The NFS/RDMA client was first included in Linux 2.6.24. The NFS/RDMA server - was first included in the following release, Linux 2.6.25. - - In our testing, we have obtained excellent performance results (full 10Gbit - wire bandwidth at minimal client CPU) under many workloads. The code passes - the full Connectathon test suite and operates over both Infiniband and iWARP - RDMA adapters. - -Getting Help -~~~~~~~~~~~~ - - If you get stuck, you can ask questions on the - - nfs-rdma-devel@lists.sourceforge.net - - mailing list. - -Installation -~~~~~~~~~~~~ - - These instructions are a step by step guide to building a machine for - use with NFS/RDMA. - - - Install an RDMA device - - Any device supported by the drivers in drivers/infiniband/hw is acceptable. - - Testing has been performed using several Mellanox-based IB cards, the - Ammasso AMS1100 iWARP adapter, and the Chelsio cxgb3 iWARP adapter. - - - Install a Linux distribution and tools - - The first kernel release to contain both the NFS/RDMA client and server was - Linux 2.6.25 Therefore, a distribution compatible with this and subsequent - Linux kernel release should be installed. - - The procedures described in this document have been tested with - distributions from Red Hat's Fedora Project (http://fedora.redhat.com/). - - - Install nfs-utils-1.1.2 or greater on the client - - An NFS/RDMA mount point can be obtained by using the mount.nfs command in - nfs-utils-1.1.2 or greater (nfs-utils-1.1.1 was the first nfs-utils - version with support for NFS/RDMA mounts, but for various reasons we - recommend using nfs-utils-1.1.2 or greater). To see which version of - mount.nfs you are using, type: - - $ /sbin/mount.nfs -V - - If the version is less than 1.1.2 or the command does not exist, - you should install the latest version of nfs-utils. - - Download the latest package from: - - http://www.kernel.org/pub/linux/utils/nfs - - Uncompress the package and follow the installation instructions. - - If you will not need the idmapper and gssd executables (you do not need - these to create an NFS/RDMA enabled mount command), the installation - process can be simplified by disabling these features when running - configure: - - $ ./configure --disable-gss --disable-nfsv4 - - To build nfs-utils you will need the tcp_wrappers package installed. For - more information on this see the package's README and INSTALL files. - - After building the nfs-utils package, there will be a mount.nfs binary in - the utils/mount directory. This binary can be used to initiate NFS v2, v3, - or v4 mounts. To initiate a v4 mount, the binary must be called - mount.nfs4. The standard technique is to create a symlink called - mount.nfs4 to mount.nfs. - - This mount.nfs binary should be installed at /sbin/mount.nfs as follows: - - $ sudo cp utils/mount/mount.nfs /sbin/mount.nfs - - In this location, mount.nfs will be invoked automatically for NFS mounts - by the system mount command. - - NOTE: mount.nfs and therefore nfs-utils-1.1.2 or greater is only needed - on the NFS client machine. You do not need this specific version of - nfs-utils on the server. Furthermore, only the mount.nfs command from - nfs-utils-1.1.2 is needed on the client. - - - Install a Linux kernel with NFS/RDMA - - The NFS/RDMA client and server are both included in the mainline Linux - kernel version 2.6.25 and later. This and other versions of the 2.6 Linux - kernel can be found at: - - ftp://ftp.kernel.org/pub/linux/kernel/v2.6/ - - Download the sources and place them in an appropriate location. - - - Configure the RDMA stack - - Make sure your kernel configuration has RDMA support enabled. Under - Device Drivers -> InfiniBand support, update the kernel configuration - to enable InfiniBand support [NOTE: the option name is misleading. Enabling - InfiniBand support is required for all RDMA devices (IB, iWARP, etc.)]. - - Enable the appropriate IB HCA support (mlx4, mthca, ehca, ipath, etc.) or - iWARP adapter support (amso, cxgb3, etc.). - - If you are using InfiniBand, be sure to enable IP-over-InfiniBand support. - - - Configure the NFS client and server - - Your kernel configuration must also have NFS file system support and/or - NFS server support enabled. These and other NFS related configuration - options can be found under File Systems -> Network File Systems. - - - Build, install, reboot - - The NFS/RDMA code will be enabled automatically if NFS and RDMA - are turned on. The NFS/RDMA client and server are configured via the hidden - SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The - value of SUNRPC_XPRT_RDMA will be: - - - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client - and server will not be built - - M if both SUNRPC and INFINIBAND are on (M or Y) and at least one is M, - in this case the NFS/RDMA client and server will be built as modules - - Y if both SUNRPC and INFINIBAND are Y, in this case the NFS/RDMA client - and server will be built into the kernel - - Therefore, if you have followed the steps above and turned no NFS and RDMA, - the NFS/RDMA client and server will be built. - - Build a new kernel, install it, boot it. - -Check RDMA and NFS Setup -~~~~~~~~~~~~~~~~~~~~~~~~ - - Before configuring the NFS/RDMA software, it is a good idea to test - your new kernel to ensure that the kernel is working correctly. - In particular, it is a good idea to verify that the RDMA stack - is functioning as expected and standard NFS over TCP/IP and/or UDP/IP - is working properly. - - - Check RDMA Setup - - If you built the RDMA components as modules, load them at - this time. For example, if you are using a Mellanox Tavor/Sinai/Arbel - card: - - $ modprobe ib_mthca - $ modprobe ib_ipoib - - If you are using InfiniBand, make sure there is a Subnet Manager (SM) - running on the network. If your IB switch has an embedded SM, you can - use it. Otherwise, you will need to run an SM, such as OpenSM, on one - of your end nodes. - - If an SM is running on your network, you should see the following: - - $ cat /sys/class/infiniband/driverX/ports/1/state - 4: ACTIVE - - where driverX is mthca0, ipath5, ehca3, etc. - - To further test the InfiniBand software stack, use IPoIB (this - assumes you have two IB hosts named host1 and host2): - - host1$ ifconfig ib0 a.b.c.x - host2$ ifconfig ib0 a.b.c.y - host1$ ping a.b.c.y - host2$ ping a.b.c.x - - For other device types, follow the appropriate procedures. - - - Check NFS Setup - - For the NFS components enabled above (client and/or server), - test their functionality over standard Ethernet using TCP/IP or UDP/IP. - -NFS/RDMA Setup -~~~~~~~~~~~~~~ - - We recommend that you use two machines, one to act as the client and - one to act as the server. - - One time configuration: - - - On the server system, configure the /etc/exports file and - start the NFS/RDMA server. - - Exports entries with the following formats have been tested: - - /vol0 192.168.0.47(fsid=0,rw,async,insecure,no_root_squash) - /vol0 192.168.0.0/255.255.255.0(fsid=0,rw,async,insecure,no_root_squash) - - The IP address(es) is(are) the client's IPoIB address for an InfiniBand - HCA or the cleint's iWARP address(es) for an RNIC. - - NOTE: The "insecure" option must be used because the NFS/RDMA client does - not use a reserved port. - - Each time a machine boots: - - - Load and configure the RDMA drivers - - For InfiniBand using a Mellanox adapter: - - $ modprobe ib_mthca - $ modprobe ib_ipoib - $ ifconfig ib0 a.b.c.d - - NOTE: use unique addresses for the client and server - - - Start the NFS server - - If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in - kernel config), load the RDMA transport module: - - $ modprobe svcrdma - - Regardless of how the server was built (module or built-in), start the - server: - - $ /etc/init.d/nfs start - - or - - $ service nfs start - - Instruct the server to listen on the RDMA transport: - - $ echo rdma 20049 > /proc/fs/nfsd/portlist - - - On the client system - - If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in - kernel config), load the RDMA client module: - - $ modprobe xprtrdma.ko - - Regardless of how the client was built (module or built-in), use this - command to mount the NFS/RDMA server: - - $ mount -o rdma,port=20049 :/ /mnt - - To verify that the mount is using RDMA, run "cat /proc/mounts" and check - the "proto" field for the given mount. - - Congratulations! You're using NFS/RDMA! diff --git a/Documentation/filesystems/nfs.txt b/Documentation/filesystems/nfs.txt deleted file mode 100644 index f50f26ce6cd0..000000000000 --- a/Documentation/filesystems/nfs.txt +++ /dev/null @@ -1,98 +0,0 @@ - -The NFS client -============== - -The NFS version 2 protocol was first documented in RFC1094 (March 1989). -Since then two more major releases of NFS have been published, with NFSv3 -being documented in RFC1813 (June 1995), and NFSv4 in RFC3530 (April -2003). - -The Linux NFS client currently supports all the above published versions, -and work is in progress on adding support for minor version 1 of the NFSv4 -protocol. - -The purpose of this document is to provide information on some of the -upcall interfaces that are used in order to provide the NFS client with -some of the information that it requires in order to fully comply with -the NFS spec. - -The DNS resolver -================ - -NFSv4 allows for one server to refer the NFS client to data that has been -migrated onto another server by means of the special "fs_locations" -attribute. See - http://tools.ietf.org/html/rfc3530#section-6 -and - http://tools.ietf.org/html/draft-ietf-nfsv4-referrals-00 - -The fs_locations information can take the form of either an ip address and -a path, or a DNS hostname and a path. The latter requires the NFS client to -do a DNS lookup in order to mount the new volume, and hence the need for an -upcall to allow userland to provide this service. - -Assuming that the user has the 'rpc_pipefs' filesystem mounted in the usual -/var/lib/nfs/rpc_pipefs, the upcall consists of the following steps: - - (1) The process checks the dns_resolve cache to see if it contains a - valid entry. If so, it returns that entry and exits. - - (2) If no valid entry exists, the helper script '/sbin/nfs_cache_getent' - (may be changed using the 'nfs.cache_getent' kernel boot parameter) - is run, with two arguments: - - the cache name, "dns_resolve" - - the hostname to resolve - - (3) After looking up the corresponding ip address, the helper script - writes the result into the rpc_pipefs pseudo-file - '/var/lib/nfs/rpc_pipefs/cache/dns_resolve/channel' - in the following (text) format: - - " \n" - - Where is in the usual IPv4 (123.456.78.90) or IPv6 - (ffee:ddcc:bbaa:9988:7766:5544:3322:1100, ffee::1100, ...) format. - is identical to the second argument of the helper - script, and is the 'time to live' of this cache entry (in - units of seconds). - - Note: If is invalid, say the string "0", then a negative - entry is created, which will cause the kernel to treat the hostname - as having no valid DNS translation. - - - - -A basic sample /sbin/nfs_cache_getent -===================================== - -#!/bin/bash -# -ttl=600 -# -cut=/usr/bin/cut -getent=/usr/bin/getent -rpc_pipefs=/var/lib/nfs/rpc_pipefs -# -die() -{ - echo "Usage: $0 cache_name entry_name" - exit 1 -} - -[ $# -lt 2 ] && die -cachename="$1" -cache_path=${rpc_pipefs}/cache/${cachename}/channel - -case "${cachename}" in - dns_resolve) - name="$2" - result="$(${getent} hosts ${name} | ${cut} -f1 -d\ )" - [ -z "${result}" ] && result="0" - ;; - *) - die - ;; -esac -echo "${result} ${name} ${ttl}" >${cache_path} - diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX new file mode 100644 index 000000000000..6ff3d212027b --- /dev/null +++ b/Documentation/filesystems/nfs/00-INDEX @@ -0,0 +1,12 @@ +00-INDEX + - this file (nfs-related documentation). +Exporting + - explanation of how to make filesystems exportable. +nfs.txt + - nfs client, and DNS resolution for fs_locations. +nfs41-server.txt + - info on the Linux server implementation of NFSv4 minor version 1. +nfs-rdma.txt + - how to install and setup the Linux NFS/RDMA client and server software +nfsroot.txt + - short guide on setting up a diskless box with NFS root filesystem. diff --git a/Documentation/filesystems/nfs/Exporting b/Documentation/filesystems/nfs/Exporting new file mode 100644 index 000000000000..87019d2b5981 --- /dev/null +++ b/Documentation/filesystems/nfs/Exporting @@ -0,0 +1,147 @@ + +Making Filesystems Exportable +============================= + +Overview +-------- + +All filesystem operations require a dentry (or two) as a starting +point. Local applications have a reference-counted hold on suitable +dentries via open file descriptors or cwd/root. However remote +applications that access a filesystem via a remote filesystem protocol +such as NFS may not be able to hold such a reference, and so need a +different way to refer to a particular dentry. As the alternative +form of reference needs to be stable across renames, truncates, and +server-reboot (among other things, though these tend to be the most +problematic), there is no simple answer like 'filename'. + +The mechanism discussed here allows each filesystem implementation to +specify how to generate an opaque (outside of the filesystem) byte +string for any dentry, and how to find an appropriate dentry for any +given opaque byte string. +This byte string will be called a "filehandle fragment" as it +corresponds to part of an NFS filehandle. + +A filesystem which supports the mapping between filehandle fragments +and dentries will be termed "exportable". + + + +Dcache Issues +------------- + +The dcache normally contains a proper prefix of any given filesystem +tree. This means that if any filesystem object is in the dcache, then +all of the ancestors of that filesystem object are also in the dcache. +As normal access is by filename this prefix is created naturally and +maintained easily (by each object maintaining a reference count on +its parent). + +However when objects are included into the dcache by interpreting a +filehandle fragment, there is no automatic creation of a path prefix +for the object. This leads to two related but distinct features of +the dcache that are not needed for normal filesystem access. + +1/ The dcache must sometimes contain objects that are not part of the + proper prefix. i.e that are not connected to the root. +2/ The dcache must be prepared for a newly found (via ->lookup) directory + to already have a (non-connected) dentry, and must be able to move + that dentry into place (based on the parent and name in the + ->lookup). This is particularly needed for directories as + it is a dcache invariant that directories only have one dentry. + +To implement these features, the dcache has: + +a/ A dentry flag DCACHE_DISCONNECTED which is set on + any dentry that might not be part of the proper prefix. + This is set when anonymous dentries are created, and cleared when a + dentry is noticed to be a child of a dentry which is in the proper + prefix. + +b/ A per-superblock list "s_anon" of dentries which are the roots of + subtrees that are not in the proper prefix. These dentries, as + well as the proper prefix, need to be released at unmount time. As + these dentries will not be hashed, they are linked together on the + d_hash list_head. + +c/ Helper routines to allocate anonymous dentries, and to help attach + loose directory dentries at lookup time. They are: + d_alloc_anon(inode) will return a dentry for the given inode. + If the inode already has a dentry, one of those is returned. + If it doesn't, a new anonymous (IS_ROOT and + DCACHE_DISCONNECTED) dentry is allocated and attached. + In the case of a directory, care is taken that only one dentry + can ever be attached. + d_splice_alias(inode, dentry) will make sure that there is a + dentry with the same name and parent as the given dentry, and + which refers to the given inode. + If the inode is a directory and already has a dentry, then that + dentry is d_moved over the given dentry. + If the passed dentry gets attached, care is taken that this is + mutually exclusive to a d_alloc_anon operation. + If the passed dentry is used, NULL is returned, else the used + dentry is returned. This corresponds to the calling pattern of + ->lookup. + + +Filesystem Issues +----------------- + +For a filesystem to be exportable it must: + + 1/ provide the filehandle fragment routines described below. + 2/ make sure that d_splice_alias is used rather than d_add + when ->lookup finds an inode for a given parent and name. + Typically the ->lookup routine will end with a: + + return d_splice_alias(inode, dentry); + } + + + + A file system implementation declares that instances of the filesystem +are exportable by setting the s_export_op field in the struct +super_block. This field must point to a "struct export_operations" +struct which has the following members: + + encode_fh (optional) + Takes a dentry and creates a filehandle fragment which can later be used + to find or create a dentry for the same object. The default + implementation creates a filehandle fragment that encodes a 32bit inode + and generation number for the inode encoded, and if necessary the + same information for the parent. + + fh_to_dentry (mandatory) + Given a filehandle fragment, this should find the implied object and + create a dentry for it (possibly with d_alloc_anon). + + fh_to_parent (optional but strongly recommended) + Given a filehandle fragment, this should find the parent of the + implied object and create a dentry for it (possibly with d_alloc_anon). + May fail if the filehandle fragment is too small. + + get_parent (optional but strongly recommended) + When given a dentry for a directory, this should return a dentry for + the parent. Quite possibly the parent dentry will have been allocated + by d_alloc_anon. The default get_parent function just returns an error + so any filehandle lookup that requires finding a parent will fail. + ->lookup("..") is *not* used as a default as it can leave ".." entries + in the dcache which are too messy to work with. + + get_name (optional) + When given a parent dentry and a child dentry, this should find a name + in the directory identified by the parent dentry, which leads to the + object identified by the child dentry. If no get_name function is + supplied, a default implementation is provided which uses vfs_readdir + to find potential names, and matches inode numbers to find the correct + match. + + +A filehandle fragment consists of an array of 1 or more 4byte words, +together with a one byte "type". +The decode_fh routine should not depend on the stated size that is +passed to it. This size may be larger than the original filehandle +generated by encode_fh, in which case it will have been padded with +nuls. Rather, the encode_fh routine should choose a "type" which +indicates the decode_fh how much of the filehandle is valid, and how +it should be interpreted. diff --git a/Documentation/filesystems/nfs/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt new file mode 100644 index 000000000000..e386f7e4bcee --- /dev/null +++ b/Documentation/filesystems/nfs/nfs-rdma.txt @@ -0,0 +1,271 @@ +################################################################################ +# # +# NFS/RDMA README # +# # +################################################################################ + + Author: NetApp and Open Grid Computing + Date: May 29, 2008 + +Table of Contents +~~~~~~~~~~~~~~~~~ + - Overview + - Getting Help + - Installation + - Check RDMA and NFS Setup + - NFS/RDMA Setup + +Overview +~~~~~~~~ + + This document describes how to install and setup the Linux NFS/RDMA client + and server software. + + The NFS/RDMA client was first included in Linux 2.6.24. The NFS/RDMA server + was first included in the following release, Linux 2.6.25. + + In our testing, we have obtained excellent performance results (full 10Gbit + wire bandwidth at minimal client CPU) under many workloads. The code passes + the full Connectathon test suite and operates over both Infiniband and iWARP + RDMA adapters. + +Getting Help +~~~~~~~~~~~~ + + If you get stuck, you can ask questions on the + + nfs-rdma-devel@lists.sourceforge.net + + mailing list. + +Installation +~~~~~~~~~~~~ + + These instructions are a step by step guide to building a machine for + use with NFS/RDMA. + + - Install an RDMA device + + Any device supported by the drivers in drivers/infiniband/hw is acceptable. + + Testing has been performed using several Mellanox-based IB cards, the + Ammasso AMS1100 iWARP adapter, and the Chelsio cxgb3 iWARP adapter. + + - Install a Linux distribution and tools + + The first kernel release to contain both the NFS/RDMA client and server was + Linux 2.6.25 Therefore, a distribution compatible with this and subsequent + Linux kernel release should be installed. + + The procedures described in this document have been tested with + distributions from Red Hat's Fedora Project (http://fedora.redhat.com/). + + - Install nfs-utils-1.1.2 or greater on the client + + An NFS/RDMA mount point can be obtained by using the mount.nfs command in + nfs-utils-1.1.2 or greater (nfs-utils-1.1.1 was the first nfs-utils + version with support for NFS/RDMA mounts, but for various reasons we + recommend using nfs-utils-1.1.2 or greater). To see which version of + mount.nfs you are using, type: + + $ /sbin/mount.nfs -V + + If the version is less than 1.1.2 or the command does not exist, + you should install the latest version of nfs-utils. + + Download the latest package from: + + http://www.kernel.org/pub/linux/utils/nfs + + Uncompress the package and follow the installation instructions. + + If you will not need the idmapper and gssd executables (you do not need + these to create an NFS/RDMA enabled mount command), the installation + process can be simplified by disabling these features when running + configure: + + $ ./configure --disable-gss --disable-nfsv4 + + To build nfs-utils you will need the tcp_wrappers package installed. For + more information on this see the package's README and INSTALL files. + + After building the nfs-utils package, there will be a mount.nfs binary in + the utils/mount directory. This binary can be used to initiate NFS v2, v3, + or v4 mounts. To initiate a v4 mount, the binary must be called + mount.nfs4. The standard technique is to create a symlink called + mount.nfs4 to mount.nfs. + + This mount.nfs binary should be installed at /sbin/mount.nfs as follows: + + $ sudo cp utils/mount/mount.nfs /sbin/mount.nfs + + In this location, mount.nfs will be invoked automatically for NFS mounts + by the system mount command. + + NOTE: mount.nfs and therefore nfs-utils-1.1.2 or greater is only needed + on the NFS client machine. You do not need this specific version of + nfs-utils on the server. Furthermore, only the mount.nfs command from + nfs-utils-1.1.2 is needed on the client. + + - Install a Linux kernel with NFS/RDMA + + The NFS/RDMA client and server are both included in the mainline Linux + kernel version 2.6.25 and later. This and other versions of the 2.6 Linux + kernel can be found at: + + ftp://ftp.kernel.org/pub/linux/kernel/v2.6/ + + Download the sources and place them in an appropriate location. + + - Configure the RDMA stack + + Make sure your kernel configuration has RDMA support enabled. Under + Device Drivers -> InfiniBand support, update the kernel configuration + to enable InfiniBand support [NOTE: the option name is misleading. Enabling + InfiniBand support is required for all RDMA devices (IB, iWARP, etc.)]. + + Enable the appropriate IB HCA support (mlx4, mthca, ehca, ipath, etc.) or + iWARP adapter support (amso, cxgb3, etc.). + + If you are using InfiniBand, be sure to enable IP-over-InfiniBand support. + + - Configure the NFS client and server + + Your kernel configuration must also have NFS file system support and/or + NFS server support enabled. These and other NFS related configuration + options can be found under File Systems -> Network File Systems. + + - Build, install, reboot + + The NFS/RDMA code will be enabled automatically if NFS and RDMA + are turned on. The NFS/RDMA client and server are configured via the hidden + SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The + value of SUNRPC_XPRT_RDMA will be: + + - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client + and server will not be built + - M if both SUNRPC and INFINIBAND are on (M or Y) and at least one is M, + in this case the NFS/RDMA client and server will be built as modules + - Y if both SUNRPC and INFINIBAND are Y, in this case the NFS/RDMA client + and server will be built into the kernel + + Therefore, if you have followed the steps above and turned no NFS and RDMA, + the NFS/RDMA client and server will be built. + + Build a new kernel, install it, boot it. + +Check RDMA and NFS Setup +~~~~~~~~~~~~~~~~~~~~~~~~ + + Before configuring the NFS/RDMA software, it is a good idea to test + your new kernel to ensure that the kernel is working correctly. + In particular, it is a good idea to verify that the RDMA stack + is functioning as expected and standard NFS over TCP/IP and/or UDP/IP + is working properly. + + - Check RDMA Setup + + If you built the RDMA components as modules, load them at + this time. For example, if you are using a Mellanox Tavor/Sinai/Arbel + card: + + $ modprobe ib_mthca + $ modprobe ib_ipoib + + If you are using InfiniBand, make sure there is a Subnet Manager (SM) + running on the network. If your IB switch has an embedded SM, you can + use it. Otherwise, you will need to run an SM, such as OpenSM, on one + of your end nodes. + + If an SM is running on your network, you should see the following: + + $ cat /sys/class/infiniband/driverX/ports/1/state + 4: ACTIVE + + where driverX is mthca0, ipath5, ehca3, etc. + + To further test the InfiniBand software stack, use IPoIB (this + assumes you have two IB hosts named host1 and host2): + + host1$ ifconfig ib0 a.b.c.x + host2$ ifconfig ib0 a.b.c.y + host1$ ping a.b.c.y + host2$ ping a.b.c.x + + For other device types, follow the appropriate procedures. + + - Check NFS Setup + + For the NFS components enabled above (client and/or server), + test their functionality over standard Ethernet using TCP/IP or UDP/IP. + +NFS/RDMA Setup +~~~~~~~~~~~~~~ + + We recommend that you use two machines, one to act as the client and + one to act as the server. + + One time configuration: + + - On the server system, configure the /etc/exports file and + start the NFS/RDMA server. + + Exports entries with the following formats have been tested: + + /vol0 192.168.0.47(fsid=0,rw,async,insecure,no_root_squash) + /vol0 192.168.0.0/255.255.255.0(fsid=0,rw,async,insecure,no_root_squash) + + The IP address(es) is(are) the client's IPoIB address for an InfiniBand + HCA or the cleint's iWARP address(es) for an RNIC. + + NOTE: The "insecure" option must be used because the NFS/RDMA client does + not use a reserved port. + + Each time a machine boots: + + - Load and configure the RDMA drivers + + For InfiniBand using a Mellanox adapter: + + $ modprobe ib_mthca + $ modprobe ib_ipoib + $ ifconfig ib0 a.b.c.d + + NOTE: use unique addresses for the client and server + + - Start the NFS server + + If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in + kernel config), load the RDMA transport module: + + $ modprobe svcrdma + + Regardless of how the server was built (module or built-in), start the + server: + + $ /etc/init.d/nfs start + + or + + $ service nfs start + + Instruct the server to listen on the RDMA transport: + + $ echo rdma 20049 > /proc/fs/nfsd/portlist + + - On the client system + + If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in + kernel config), load the RDMA client module: + + $ modprobe xprtrdma.ko + + Regardless of how the client was built (module or built-in), use this + command to mount the NFS/RDMA server: + + $ mount -o rdma,port=20049 :/ /mnt + + To verify that the mount is using RDMA, run "cat /proc/mounts" and check + the "proto" field for the given mount. + + Congratulations! You're using NFS/RDMA! diff --git a/Documentation/filesystems/nfs/nfs.txt b/Documentation/filesystems/nfs/nfs.txt new file mode 100644 index 000000000000..f50f26ce6cd0 --- /dev/null +++ b/Documentation/filesystems/nfs/nfs.txt @@ -0,0 +1,98 @@ + +The NFS client +============== + +The NFS version 2 protocol was first documented in RFC1094 (March 1989). +Since then two more major releases of NFS have been published, with NFSv3 +being documented in RFC1813 (June 1995), and NFSv4 in RFC3530 (April +2003). + +The Linux NFS client currently supports all the above published versions, +and work is in progress on adding support for minor version 1 of the NFSv4 +protocol. + +The purpose of this document is to provide information on some of the +upcall interfaces that are used in order to provide the NFS client with +some of the information that it requires in order to fully comply with +the NFS spec. + +The DNS resolver +================ + +NFSv4 allows for one server to refer the NFS client to data that has been +migrated onto another server by means of the special "fs_locations" +attribute. See + http://tools.ietf.org/html/rfc3530#section-6 +and + http://tools.ietf.org/html/draft-ietf-nfsv4-referrals-00 + +The fs_locations information can take the form of either an ip address and +a path, or a DNS hostname and a path. The latter requires the NFS client to +do a DNS lookup in order to mount the new volume, and hence the need for an +upcall to allow userland to provide this service. + +Assuming that the user has the 'rpc_pipefs' filesystem mounted in the usual +/var/lib/nfs/rpc_pipefs, the upcall consists of the following steps: + + (1) The process checks the dns_resolve cache to see if it contains a + valid entry. If so, it returns that entry and exits. + + (2) If no valid entry exists, the helper script '/sbin/nfs_cache_getent' + (may be changed using the 'nfs.cache_getent' kernel boot parameter) + is run, with two arguments: + - the cache name, "dns_resolve" + - the hostname to resolve + + (3) After looking up the corresponding ip address, the helper script + writes the result into the rpc_pipefs pseudo-file + '/var/lib/nfs/rpc_pipefs/cache/dns_resolve/channel' + in the following (text) format: + + " \n" + + Where is in the usual IPv4 (123.456.78.90) or IPv6 + (ffee:ddcc:bbaa:9988:7766:5544:3322:1100, ffee::1100, ...) format. + is identical to the second argument of the helper + script, and is the 'time to live' of this cache entry (in + units of seconds). + + Note: If is invalid, say the string "0", then a negative + entry is created, which will cause the kernel to treat the hostname + as having no valid DNS translation. + + + + +A basic sample /sbin/nfs_cache_getent +===================================== + +#!/bin/bash +# +ttl=600 +# +cut=/usr/bin/cut +getent=/usr/bin/getent +rpc_pipefs=/var/lib/nfs/rpc_pipefs +# +die() +{ + echo "Usage: $0 cache_name entry_name" + exit 1 +} + +[ $# -lt 2 ] && die +cachename="$1" +cache_path=${rpc_pipefs}/cache/${cachename}/channel + +case "${cachename}" in + dns_resolve) + name="$2" + result="$(${getent} hosts ${name} | ${cut} -f1 -d\ )" + [ -z "${result}" ] && result="0" + ;; + *) + die + ;; +esac +echo "${result} ${name} ${ttl}" >${cache_path} + diff --git a/Documentation/filesystems/nfs/nfs41-server.txt b/Documentation/filesystems/nfs/nfs41-server.txt new file mode 100644 index 000000000000..1bd0d0c05171 --- /dev/null +++ b/Documentation/filesystems/nfs/nfs41-server.txt @@ -0,0 +1,222 @@ +NFSv4.1 Server Implementation + +Server support for minorversion 1 can be controlled using the +/proc/fs/nfsd/versions control file. The string output returned +by reading this file will contain either "+4.1" or "-4.1" +correspondingly. + +Currently, server support for minorversion 1 is disabled by default. +It can be enabled at run time by writing the string "+4.1" to +the /proc/fs/nfsd/versions control file. Note that to write this +control file, the nfsd service must be taken down. Use your user-mode +nfs-utils to set this up; see rpc.nfsd(8) + +(Warning: older servers will interpret "+4.1" and "-4.1" as "+4" and +"-4", respectively. Therefore, code meant to work on both new and old +kernels must turn 4.1 on or off *before* turning support for version 4 +on or off; rpc.nfsd does this correctly.) + +The NFSv4 minorversion 1 (NFSv4.1) implementation in nfsd is based +on the latest NFSv4.1 Internet Draft: +http://tools.ietf.org/html/draft-ietf-nfsv4-minorversion1-29 + +From the many new features in NFSv4.1 the current implementation +focuses on the mandatory-to-implement NFSv4.1 Sessions, providing +"exactly once" semantics and better control and throttling of the +resources allocated for each client. + +Other NFSv4.1 features, Parallel NFS operations in particular, +are still under development out of tree. +See http://wiki.linux-nfs.org/wiki/index.php/PNFS_prototype_design +for more information. + +The current implementation is intended for developers only: while it +does support ordinary file operations on clients we have tested against +(including the linux client), it is incomplete in ways which may limit +features unexpectedly, cause known bugs in rare cases, or cause +interoperability problems with future clients. Known issues: + + - gss support is questionable: currently mounts with kerberos + from a linux client are possible, but we aren't really + conformant with the spec (for example, we don't use kerberos + on the backchannel correctly). + - no trunking support: no clients currently take advantage of + trunking, but this is a mandatory feature, and its use is + recommended to clients in a number of places. (E.g. to ensure + timely renewal in case an existing connection's retry timeouts + have gotten too long; see section 8.3 of the draft.) + Therefore, lack of this feature may cause future clients to + fail. + - Incomplete backchannel support: incomplete backchannel gss + support and no support for BACKCHANNEL_CTL mean that + callbacks (hence delegations and layouts) may not be + available and clients confused by the incomplete + implementation may fail. + - Server reboot recovery is unsupported; if the server reboots, + clients may fail. + - We do not support SSV, which provides security for shared + client-server state (thus preventing unauthorized tampering + with locks and opens, for example). It is mandatory for + servers to support this, though no clients use it yet. + - Mandatory operations which we do not support, such as + DESTROY_CLIENTID, FREE_STATEID, SECINFO_NO_NAME, and + TEST_STATEID, are not currently used by clients, but will be + (and the spec recommends their uses in common cases), and + clients should not be expected to know how to recover from the + case where they are not supported. This will eventually cause + interoperability failures. + +In addition, some limitations are inherited from the current NFSv4 +implementation: + + - Incomplete delegation enforcement: if a file is renamed or + unlinked, a client holding a delegation may continue to + indefinitely allow opens of the file under the old name. + +The table below, taken from the NFSv4.1 document, lists +the operations that are mandatory to implement (REQ), optional +(OPT), and NFSv4.0 operations that are required not to implement (MNI) +in minor version 1. The first column indicates the operations that +are not supported yet by the linux server implementation. + +The OPTIONAL features identified and their abbreviations are as follows: + pNFS Parallel NFS + FDELG File Delegations + DDELG Directory Delegations + +The following abbreviations indicate the linux server implementation status. + I Implemented NFSv4.1 operations. + NS Not Supported. + NS* unimplemented optional feature. + P pNFS features implemented out of tree. + PNS pNFS features that are not supported yet (out of tree). + +Operations + + +----------------------+------------+--------------+----------------+ + | Operation | REQ, REC, | Feature | Definition | + | | OPT, or | (REQ, REC, | | + | | MNI | or OPT) | | + +----------------------+------------+--------------+----------------+ + | ACCESS | REQ | | Section 18.1 | +NS | BACKCHANNEL_CTL | REQ | | Section 18.33 | +NS | BIND_CONN_TO_SESSION | REQ | | Section 18.34 | + | CLOSE | REQ | | Section 18.2 | + | COMMIT | REQ | | Section 18.3 | + | CREATE | REQ | | Section 18.4 | +I | CREATE_SESSION | REQ | | Section 18.36 | +NS*| DELEGPURGE | OPT | FDELG (REQ) | Section 18.5 | + | DELEGRETURN | OPT | FDELG, | Section 18.6 | + | | | DDELG, pNFS | | + | | | (REQ) | | +NS | DESTROY_CLIENTID | REQ | | Section 18.50 | +I | DESTROY_SESSION | REQ | | Section 18.37 | +I | EXCHANGE_ID | REQ | | Section 18.35 | +NS | FREE_STATEID | REQ | | Section 18.38 | + | GETATTR | REQ | | Section 18.7 | +P | GETDEVICEINFO | OPT | pNFS (REQ) | Section 18.40 | +P | GETDEVICELIST | OPT | pNFS (OPT) | Section 18.41 | + | GETFH | REQ | | Section 18.8 | +NS*| GET_DIR_DELEGATION | OPT | DDELG (REQ) | Section 18.39 | +P | LAYOUTCOMMIT | OPT | pNFS (REQ) | Section 18.42 | +P | LAYOUTGET | OPT | pNFS (REQ) | Section 18.43 | +P | LAYOUTRETURN | OPT | pNFS (REQ) | Section 18.44 | + | LINK | OPT | | Section 18.9 | + | LOCK | REQ | | Section 18.10 | + | LOCKT | REQ | | Section 18.11 | + | LOCKU | REQ | | Section 18.12 | + | LOOKUP | REQ | | Section 18.13 | + | LOOKUPP | REQ | | Section 18.14 | + | NVERIFY | REQ | | Section 18.15 | + | OPEN | REQ | | Section 18.16 | +NS*| OPENATTR | OPT | | Section 18.17 | + | OPEN_CONFIRM | MNI | | N/A | + | OPEN_DOWNGRADE | REQ | | Section 18.18 | + | PUTFH | REQ | | Section 18.19 | + | PUTPUBFH | REQ | | Section 18.20 | + | PUTROOTFH | REQ | | Section 18.21 | + | READ | REQ | | Section 18.22 | + | READDIR | REQ | | Section 18.23 | + | READLINK | OPT | | Section 18.24 | +NS | RECLAIM_COMPLETE | REQ | | Section 18.51 | + | RELEASE_LOCKOWNER | MNI | | N/A | + | REMOVE | REQ | | Section 18.25 | + | RENAME | REQ | | Section 18.26 | + | RENEW | MNI | | N/A | + | RESTOREFH | REQ | | Section 18.27 | + | SAVEFH | REQ | | Section 18.28 | + | SECINFO | REQ | | Section 18.29 | +NS | SECINFO_NO_NAME | REC | pNFS files | Section 18.45, | + | | | layout (REQ) | Section 13.12 | +I | SEQUENCE | REQ | | Section 18.46 | + | SETATTR | REQ | | Section 18.30 | + | SETCLIENTID | MNI | | N/A | + | SETCLIENTID_CONFIRM | MNI | | N/A | +NS | SET_SSV | REQ | | Section 18.47 | +NS | TEST_STATEID | REQ | | Section 18.48 | + | VERIFY | REQ | | Section 18.31 | +NS*| WANT_DELEGATION | OPT | FDELG (OPT) | Section 18.49 | + | WRITE | REQ | | Section 18.32 | + +Callback Operations + + +-------------------------+-----------+-------------+---------------+ + | Operation | REQ, REC, | Feature | Definition | + | | OPT, or | (REQ, REC, | | + | | MNI | or OPT) | | + +-------------------------+-----------+-------------+---------------+ + | CB_GETATTR | OPT | FDELG (REQ) | Section 20.1 | +P | CB_LAYOUTRECALL | OPT | pNFS (REQ) | Section 20.3 | +NS*| CB_NOTIFY | OPT | DDELG (REQ) | Section 20.4 | +P | CB_NOTIFY_DEVICEID | OPT | pNFS (OPT) | Section 20.12 | +NS*| CB_NOTIFY_LOCK | OPT | | Section 20.11 | +NS*| CB_PUSH_DELEG | OPT | FDELG (OPT) | Section 20.5 | + | CB_RECALL | OPT | FDELG, | Section 20.2 | + | | | DDELG, pNFS | | + | | | (REQ) | | +NS*| CB_RECALL_ANY | OPT | FDELG, | Section 20.6 | + | | | DDELG, pNFS | | + | | | (REQ) | | +NS | CB_RECALL_SLOT | REQ | | Section 20.8 | +NS*| CB_RECALLABLE_OBJ_AVAIL | OPT | DDELG, pNFS | Section 20.7 | + | | | (REQ) | | +I | CB_SEQUENCE | OPT | FDELG, | Section 20.9 | + | | | DDELG, pNFS | | + | | | (REQ) | | +NS*| CB_WANTS_CANCELLED | OPT | FDELG, | Section 20.10 | + | | | DDELG, pNFS | | + | | | (REQ) | | + +-------------------------+-----------+-------------+---------------+ + +Implementation notes: + +DELEGPURGE: +* mandatory only for servers that support CLAIM_DELEGATE_PREV and/or + CLAIM_DELEG_PREV_FH (which allows clients to keep delegations that + persist across client reboots). Thus we need not implement this for + now. + +EXCHANGE_ID: +* only SP4_NONE state protection supported +* implementation ids are ignored + +CREATE_SESSION: +* backchannel attributes are ignored +* backchannel security parameters are ignored + +SEQUENCE: +* no support for dynamic slot table renegotiation (optional) + +nfsv4.1 COMPOUND rules: +The following cases aren't supported yet: +* Enforcing of NFS4ERR_NOT_ONLY_OP for: BIND_CONN_TO_SESSION, CREATE_SESSION, + DESTROY_CLIENTID, DESTROY_SESSION, EXCHANGE_ID. +* DESTROY_SESSION MUST be the final operation in the COMPOUND request. + +Nonstandard compound limitations: +* No support for a sessions fore channel RPC compound that requires both a + ca_maxrequestsize request and a ca_maxresponsesize reply, so we may + fail to live up to the promise we made in CREATE_SESSION fore channel + negotiation. +* No more than one IO operation (read, write, readdir) allowed per + compound. diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt new file mode 100644 index 000000000000..3ba0b945aaf8 --- /dev/null +++ b/Documentation/filesystems/nfs/nfsroot.txt @@ -0,0 +1,270 @@ +Mounting the root filesystem via NFS (nfsroot) +=============================================== + +Written 1996 by Gero Kuhlmann +Updated 1997 by Martin Mares +Updated 2006 by Nico Schottelius +Updated 2006 by Horms + + + +In order to use a diskless system, such as an X-terminal or printer server +for example, it is necessary for the root filesystem to be present on a +non-disk device. This may be an initramfs (see Documentation/filesystems/ +ramfs-rootfs-initramfs.txt), a ramdisk (see Documentation/initrd.txt) or a +filesystem mounted via NFS. The following text describes on how to use NFS +for the root filesystem. For the rest of this text 'client' means the +diskless system, and 'server' means the NFS server. + + + + +1.) Enabling nfsroot capabilities + ----------------------------- + +In order to use nfsroot, NFS client support needs to be selected as +built-in during configuration. Once this has been selected, the nfsroot +option will become available, which should also be selected. + +In the networking options, kernel level autoconfiguration can be selected, +along with the types of autoconfiguration to support. Selecting all of +DHCP, BOOTP and RARP is safe. + + + + +2.) Kernel command line + ------------------- + +When the kernel has been loaded by a boot loader (see below) it needs to be +told what root fs device to use. And in the case of nfsroot, where to find +both the server and the name of the directory on the server to mount as root. +This can be established using the following kernel command line parameters: + + +root=/dev/nfs + + This is necessary to enable the pseudo-NFS-device. Note that it's not a + real device but just a synonym to tell the kernel to use NFS instead of + a real device. + + +nfsroot=[:][,] + + If the `nfsroot' parameter is NOT given on the command line, + the default "/tftpboot/%s" will be used. + + Specifies the IP address of the NFS server. + The default address is determined by the `ip' parameter + (see below). This parameter allows the use of different + servers for IP autoconfiguration and NFS. + + Name of the directory on the server to mount as root. + If there is a "%s" token in the string, it will be + replaced by the ASCII-representation of the client's + IP address. + + Standard NFS options. All options are separated by commas. + The following defaults are used: + port = as given by server portmap daemon + rsize = 4096 + wsize = 4096 + timeo = 7 + retrans = 3 + acregmin = 3 + acregmax = 60 + acdirmin = 30 + acdirmax = 60 + flags = hard, nointr, noposix, cto, ac + + +ip=:::::: + + This parameter tells the kernel how to configure IP addresses of devices + and also how to set up the IP routing table. It was originally called + `nfsaddrs', but now the boot-time IP configuration works independently of + NFS, so it was renamed to `ip' and the old name remained as an alias for + compatibility reasons. + + If this parameter is missing from the kernel command line, all fields are + assumed to be empty, and the defaults mentioned below apply. In general + this means that the kernel tries to configure everything using + autoconfiguration. + + The parameter can appear alone as the value to the `ip' + parameter (without all the ':' characters before). If the value is + "ip=off" or "ip=none", no autoconfiguration will take place, otherwise + autoconfiguration will take place. The most common way to use this + is "ip=dhcp". + + IP address of the client. + + Default: Determined using autoconfiguration. + + IP address of the NFS server. If RARP is used to determine + the client address and this parameter is NOT empty only + replies from the specified server are accepted. + + Only required for NFS root. That is autoconfiguration + will not be triggered if it is missing and NFS root is not + in operation. + + Default: Determined using autoconfiguration. + The address of the autoconfiguration server is used. + + IP address of a gateway if the server is on a different subnet. + + Default: Determined using autoconfiguration. + + Netmask for local network interface. If unspecified + the netmask is derived from the client IP address assuming + classful addressing. + + Default: Determined using autoconfiguration. + + Name of the client. May be supplied by autoconfiguration, + but its absence will not trigger autoconfiguration. + + Default: Client IP address is used in ASCII notation. + + Name of network device to use. + + Default: If the host only has one device, it is used. + Otherwise the device is determined using + autoconfiguration. This is done by sending + autoconfiguration requests out of all devices, + and using the device that received the first reply. + + Method to use for autoconfiguration. In the case of options + which specify multiple autoconfiguration protocols, + requests are sent using all protocols, and the first one + to reply is used. + + Only autoconfiguration protocols that have been compiled + into the kernel will be used, regardless of the value of + this option. + + off or none: don't use autoconfiguration + (do static IP assignment instead) + on or any: use any protocol available in the kernel + (default) + dhcp: use DHCP + bootp: use BOOTP + rarp: use RARP + both: use both BOOTP and RARP but not DHCP + (old option kept for backwards compatibility) + + Default: any + + + + +3.) Boot Loader + ---------- + +To get the kernel into memory different approaches can be used. +They depend on various facilities being available: + + +3.1) Booting from a floppy using syslinux + + When building kernels, an easy way to create a boot floppy that uses + syslinux is to use the zdisk or bzdisk make targets which use zimage + and bzimage images respectively. Both targets accept the + FDARGS parameter which can be used to set the kernel command line. + + e.g. + make bzdisk FDARGS="root=/dev/nfs" + + Note that the user running this command will need to have + access to the floppy drive device, /dev/fd0 + + For more information on syslinux, including how to create bootdisks + for prebuilt kernels, see http://syslinux.zytor.com/ + + N.B: Previously it was possible to write a kernel directly to + a floppy using dd, configure the boot device using rdev, and + boot using the resulting floppy. Linux no longer supports this + method of booting. + +3.2) Booting from a cdrom using isolinux + + When building kernels, an easy way to create a bootable cdrom that + uses isolinux is to use the isoimage target which uses a bzimage + image. Like zdisk and bzdisk, this target accepts the FDARGS + parameter which can be used to set the kernel command line. + + e.g. + make isoimage FDARGS="root=/dev/nfs" + + The resulting iso image will be arch//boot/image.iso + This can be written to a cdrom using a variety of tools including + cdrecord. + + e.g. + cdrecord dev=ATAPI:1,0,0 arch/i386/boot/image.iso + + For more information on isolinux, including how to create bootdisks + for prebuilt kernels, see http://syslinux.zytor.com/ + +3.2) Using LILO + When using LILO all the necessary command line parameters may be + specified using the 'append=' directive in the LILO configuration + file. + + However, to use the 'root=' directive you also need to create + a dummy root device, which may be removed after LILO is run. + + mknod /dev/boot255 c 0 255 + + For information on configuring LILO, please refer to its documentation. + +3.3) Using GRUB + When using GRUB, kernel parameter are simply appended after the kernel + specification: kernel + +3.4) Using loadlin + loadlin may be used to boot Linux from a DOS command prompt without + requiring a local hard disk to mount as root. This has not been + thoroughly tested by the authors of this document, but in general + it should be possible configure the kernel command line similarly + to the configuration of LILO. + + Please refer to the loadlin documentation for further information. + +3.5) Using a boot ROM + This is probably the most elegant way of booting a diskless client. + With a boot ROM the kernel is loaded using the TFTP protocol. The + authors of this document are not aware of any no commercial boot + ROMs that support booting Linux over the network. However, there + are two free implementations of a boot ROM, netboot-nfs and + etherboot, both of which are available on sunsite.unc.edu, and both + of which contain everything you need to boot a diskless Linux client. + +3.6) Using pxelinux + Pxelinux may be used to boot linux using the PXE boot loader + which is present on many modern network cards. + + When using pxelinux, the kernel image is specified using + "kernel ". The nfsroot parameters + are passed to the kernel by adding them to the "append" line. + It is common to use serial console in conjunction with pxeliunx, + see Documentation/serial-console.txt for more information. + + For more information on isolinux, including how to create bootdisks + for prebuilt kernels, see http://syslinux.zytor.com/ + + + + +4.) Credits + ------- + + The nfsroot code in the kernel and the RARP support have been written + by Gero Kuhlmann . + + The rest of the IP layer autoconfiguration code has been written + by Martin Mares . + + In order to write the initial version of nfsroot I would like to thank + Jens-Uwe Mager for his help. diff --git a/Documentation/filesystems/nfs41-server.txt b/Documentation/filesystems/nfs41-server.txt deleted file mode 100644 index 1bd0d0c05171..000000000000 --- a/Documentation/filesystems/nfs41-server.txt +++ /dev/null @@ -1,222 +0,0 @@ -NFSv4.1 Server Implementation - -Server support for minorversion 1 can be controlled using the -/proc/fs/nfsd/versions control file. The string output returned -by reading this file will contain either "+4.1" or "-4.1" -correspondingly. - -Currently, server support for minorversion 1 is disabled by default. -It can be enabled at run time by writing the string "+4.1" to -the /proc/fs/nfsd/versions control file. Note that to write this -control file, the nfsd service must be taken down. Use your user-mode -nfs-utils to set this up; see rpc.nfsd(8) - -(Warning: older servers will interpret "+4.1" and "-4.1" as "+4" and -"-4", respectively. Therefore, code meant to work on both new and old -kernels must turn 4.1 on or off *before* turning support for version 4 -on or off; rpc.nfsd does this correctly.) - -The NFSv4 minorversion 1 (NFSv4.1) implementation in nfsd is based -on the latest NFSv4.1 Internet Draft: -http://tools.ietf.org/html/draft-ietf-nfsv4-minorversion1-29 - -From the many new features in NFSv4.1 the current implementation -focuses on the mandatory-to-implement NFSv4.1 Sessions, providing -"exactly once" semantics and better control and throttling of the -resources allocated for each client. - -Other NFSv4.1 features, Parallel NFS operations in particular, -are still under development out of tree. -See http://wiki.linux-nfs.org/wiki/index.php/PNFS_prototype_design -for more information. - -The current implementation is intended for developers only: while it -does support ordinary file operations on clients we have tested against -(including the linux client), it is incomplete in ways which may limit -features unexpectedly, cause known bugs in rare cases, or cause -interoperability problems with future clients. Known issues: - - - gss support is questionable: currently mounts with kerberos - from a linux client are possible, but we aren't really - conformant with the spec (for example, we don't use kerberos - on the backchannel correctly). - - no trunking support: no clients currently take advantage of - trunking, but this is a mandatory feature, and its use is - recommended to clients in a number of places. (E.g. to ensure - timely renewal in case an existing connection's retry timeouts - have gotten too long; see section 8.3 of the draft.) - Therefore, lack of this feature may cause future clients to - fail. - - Incomplete backchannel support: incomplete backchannel gss - support and no support for BACKCHANNEL_CTL mean that - callbacks (hence delegations and layouts) may not be - available and clients confused by the incomplete - implementation may fail. - - Server reboot recovery is unsupported; if the server reboots, - clients may fail. - - We do not support SSV, which provides security for shared - client-server state (thus preventing unauthorized tampering - with locks and opens, for example). It is mandatory for - servers to support this, though no clients use it yet. - - Mandatory operations which we do not support, such as - DESTROY_CLIENTID, FREE_STATEID, SECINFO_NO_NAME, and - TEST_STATEID, are not currently used by clients, but will be - (and the spec recommends their uses in common cases), and - clients should not be expected to know how to recover from the - case where they are not supported. This will eventually cause - interoperability failures. - -In addition, some limitations are inherited from the current NFSv4 -implementation: - - - Incomplete delegation enforcement: if a file is renamed or - unlinked, a client holding a delegation may continue to - indefinitely allow opens of the file under the old name. - -The table below, taken from the NFSv4.1 document, lists -the operations that are mandatory to implement (REQ), optional -(OPT), and NFSv4.0 operations that are required not to implement (MNI) -in minor version 1. The first column indicates the operations that -are not supported yet by the linux server implementation. - -The OPTIONAL features identified and their abbreviations are as follows: - pNFS Parallel NFS - FDELG File Delegations - DDELG Directory Delegations - -The following abbreviations indicate the linux server implementation status. - I Implemented NFSv4.1 operations. - NS Not Supported. - NS* unimplemented optional feature. - P pNFS features implemented out of tree. - PNS pNFS features that are not supported yet (out of tree). - -Operations - - +----------------------+------------+--------------+----------------+ - | Operation | REQ, REC, | Feature | Definition | - | | OPT, or | (REQ, REC, | | - | | MNI | or OPT) | | - +----------------------+------------+--------------+----------------+ - | ACCESS | REQ | | Section 18.1 | -NS | BACKCHANNEL_CTL | REQ | | Section 18.33 | -NS | BIND_CONN_TO_SESSION | REQ | | Section 18.34 | - | CLOSE | REQ | | Section 18.2 | - | COMMIT | REQ | | Section 18.3 | - | CREATE | REQ | | Section 18.4 | -I | CREATE_SESSION | REQ | | Section 18.36 | -NS*| DELEGPURGE | OPT | FDELG (REQ) | Section 18.5 | - | DELEGRETURN | OPT | FDELG, | Section 18.6 | - | | | DDELG, pNFS | | - | | | (REQ) | | -NS | DESTROY_CLIENTID | REQ | | Section 18.50 | -I | DESTROY_SESSION | REQ | | Section 18.37 | -I | EXCHANGE_ID | REQ | | Section 18.35 | -NS | FREE_STATEID | REQ | | Section 18.38 | - | GETATTR | REQ | | Section 18.7 | -P | GETDEVICEINFO | OPT | pNFS (REQ) | Section 18.40 | -P | GETDEVICELIST | OPT | pNFS (OPT) | Section 18.41 | - | GETFH | REQ | | Section 18.8 | -NS*| GET_DIR_DELEGATION | OPT | DDELG (REQ) | Section 18.39 | -P | LAYOUTCOMMIT | OPT | pNFS (REQ) | Section 18.42 | -P | LAYOUTGET | OPT | pNFS (REQ) | Section 18.43 | -P | LAYOUTRETURN | OPT | pNFS (REQ) | Section 18.44 | - | LINK | OPT | | Section 18.9 | - | LOCK | REQ | | Section 18.10 | - | LOCKT | REQ | | Section 18.11 | - | LOCKU | REQ | | Section 18.12 | - | LOOKUP | REQ | | Section 18.13 | - | LOOKUPP | REQ | | Section 18.14 | - | NVERIFY | REQ | | Section 18.15 | - | OPEN | REQ | | Section 18.16 | -NS*| OPENATTR | OPT | | Section 18.17 | - | OPEN_CONFIRM | MNI | | N/A | - | OPEN_DOWNGRADE | REQ | | Section 18.18 | - | PUTFH | REQ | | Section 18.19 | - | PUTPUBFH | REQ | | Section 18.20 | - | PUTROOTFH | REQ | | Section 18.21 | - | READ | REQ | | Section 18.22 | - | READDIR | REQ | | Section 18.23 | - | READLINK | OPT | | Section 18.24 | -NS | RECLAIM_COMPLETE | REQ | | Section 18.51 | - | RELEASE_LOCKOWNER | MNI | | N/A | - | REMOVE | REQ | | Section 18.25 | - | RENAME | REQ | | Section 18.26 | - | RENEW | MNI | | N/A | - | RESTOREFH | REQ | | Section 18.27 | - | SAVEFH | REQ | | Section 18.28 | - | SECINFO | REQ | | Section 18.29 | -NS | SECINFO_NO_NAME | REC | pNFS files | Section 18.45, | - | | | layout (REQ) | Section 13.12 | -I | SEQUENCE | REQ | | Section 18.46 | - | SETATTR | REQ | | Section 18.30 | - | SETCLIENTID | MNI | | N/A | - | SETCLIENTID_CONFIRM | MNI | | N/A | -NS | SET_SSV | REQ | | Section 18.47 | -NS | TEST_STATEID | REQ | | Section 18.48 | - | VERIFY | REQ | | Section 18.31 | -NS*| WANT_DELEGATION | OPT | FDELG (OPT) | Section 18.49 | - | WRITE | REQ | | Section 18.32 | - -Callback Operations - - +-------------------------+-----------+-------------+---------------+ - | Operation | REQ, REC, | Feature | Definition | - | | OPT, or | (REQ, REC, | | - | | MNI | or OPT) | | - +-------------------------+-----------+-------------+---------------+ - | CB_GETATTR | OPT | FDELG (REQ) | Section 20.1 | -P | CB_LAYOUTRECALL | OPT | pNFS (REQ) | Section 20.3 | -NS*| CB_NOTIFY | OPT | DDELG (REQ) | Section 20.4 | -P | CB_NOTIFY_DEVICEID | OPT | pNFS (OPT) | Section 20.12 | -NS*| CB_NOTIFY_LOCK | OPT | | Section 20.11 | -NS*| CB_PUSH_DELEG | OPT | FDELG (OPT) | Section 20.5 | - | CB_RECALL | OPT | FDELG, | Section 20.2 | - | | | DDELG, pNFS | | - | | | (REQ) | | -NS*| CB_RECALL_ANY | OPT | FDELG, | Section 20.6 | - | | | DDELG, pNFS | | - | | | (REQ) | | -NS | CB_RECALL_SLOT | REQ | | Section 20.8 | -NS*| CB_RECALLABLE_OBJ_AVAIL | OPT | DDELG, pNFS | Section 20.7 | - | | | (REQ) | | -I | CB_SEQUENCE | OPT | FDELG, | Section 20.9 | - | | | DDELG, pNFS | | - | | | (REQ) | | -NS*| CB_WANTS_CANCELLED | OPT | FDELG, | Section 20.10 | - | | | DDELG, pNFS | | - | | | (REQ) | | - +-------------------------+-----------+-------------+---------------+ - -Implementation notes: - -DELEGPURGE: -* mandatory only for servers that support CLAIM_DELEGATE_PREV and/or - CLAIM_DELEG_PREV_FH (which allows clients to keep delegations that - persist across client reboots). Thus we need not implement this for - now. - -EXCHANGE_ID: -* only SP4_NONE state protection supported -* implementation ids are ignored - -CREATE_SESSION: -* backchannel attributes are ignored -* backchannel security parameters are ignored - -SEQUENCE: -* no support for dynamic slot table renegotiation (optional) - -nfsv4.1 COMPOUND rules: -The following cases aren't supported yet: -* Enforcing of NFS4ERR_NOT_ONLY_OP for: BIND_CONN_TO_SESSION, CREATE_SESSION, - DESTROY_CLIENTID, DESTROY_SESSION, EXCHANGE_ID. -* DESTROY_SESSION MUST be the final operation in the COMPOUND request. - -Nonstandard compound limitations: -* No support for a sessions fore channel RPC compound that requires both a - ca_maxrequestsize request and a ca_maxresponsesize reply, so we may - fail to live up to the promise we made in CREATE_SESSION fore channel - negotiation. -* No more than one IO operation (read, write, readdir) allowed per - compound. diff --git a/Documentation/filesystems/nfsroot.txt b/Documentation/filesystems/nfsroot.txt deleted file mode 100644 index 3ba0b945aaf8..000000000000 --- a/Documentation/filesystems/nfsroot.txt +++ /dev/null @@ -1,270 +0,0 @@ -Mounting the root filesystem via NFS (nfsroot) -=============================================== - -Written 1996 by Gero Kuhlmann -Updated 1997 by Martin Mares -Updated 2006 by Nico Schottelius -Updated 2006 by Horms - - - -In order to use a diskless system, such as an X-terminal or printer server -for example, it is necessary for the root filesystem to be present on a -non-disk device. This may be an initramfs (see Documentation/filesystems/ -ramfs-rootfs-initramfs.txt), a ramdisk (see Documentation/initrd.txt) or a -filesystem mounted via NFS. The following text describes on how to use NFS -for the root filesystem. For the rest of this text 'client' means the -diskless system, and 'server' means the NFS server. - - - - -1.) Enabling nfsroot capabilities - ----------------------------- - -In order to use nfsroot, NFS client support needs to be selected as -built-in during configuration. Once this has been selected, the nfsroot -option will become available, which should also be selected. - -In the networking options, kernel level autoconfiguration can be selected, -along with the types of autoconfiguration to support. Selecting all of -DHCP, BOOTP and RARP is safe. - - - - -2.) Kernel command line - ------------------- - -When the kernel has been loaded by a boot loader (see below) it needs to be -told what root fs device to use. And in the case of nfsroot, where to find -both the server and the name of the directory on the server to mount as root. -This can be established using the following kernel command line parameters: - - -root=/dev/nfs - - This is necessary to enable the pseudo-NFS-device. Note that it's not a - real device but just a synonym to tell the kernel to use NFS instead of - a real device. - - -nfsroot=[:][,] - - If the `nfsroot' parameter is NOT given on the command line, - the default "/tftpboot/%s" will be used. - - Specifies the IP address of the NFS server. - The default address is determined by the `ip' parameter - (see below). This parameter allows the use of different - servers for IP autoconfiguration and NFS. - - Name of the directory on the server to mount as root. - If there is a "%s" token in the string, it will be - replaced by the ASCII-representation of the client's - IP address. - - Standard NFS options. All options are separated by commas. - The following defaults are used: - port = as given by server portmap daemon - rsize = 4096 - wsize = 4096 - timeo = 7 - retrans = 3 - acregmin = 3 - acregmax = 60 - acdirmin = 30 - acdirmax = 60 - flags = hard, nointr, noposix, cto, ac - - -ip=:::::: - - This parameter tells the kernel how to configure IP addresses of devices - and also how to set up the IP routing table. It was originally called - `nfsaddrs', but now the boot-time IP configuration works independently of - NFS, so it was renamed to `ip' and the old name remained as an alias for - compatibility reasons. - - If this parameter is missing from the kernel command line, all fields are - assumed to be empty, and the defaults mentioned below apply. In general - this means that the kernel tries to configure everything using - autoconfiguration. - - The parameter can appear alone as the value to the `ip' - parameter (without all the ':' characters before). If the value is - "ip=off" or "ip=none", no autoconfiguration will take place, otherwise - autoconfiguration will take place. The most common way to use this - is "ip=dhcp". - - IP address of the client. - - Default: Determined using autoconfiguration. - - IP address of the NFS server. If RARP is used to determine - the client address and this parameter is NOT empty only - replies from the specified server are accepted. - - Only required for NFS root. That is autoconfiguration - will not be triggered if it is missing and NFS root is not - in operation. - - Default: Determined using autoconfiguration. - The address of the autoconfiguration server is used. - - IP address of a gateway if the server is on a different subnet. - - Default: Determined using autoconfiguration. - - Netmask for local network interface. If unspecified - the netmask is derived from the client IP address assuming - classful addressing. - - Default: Determined using autoconfiguration. - - Name of the client. May be supplied by autoconfiguration, - but its absence will not trigger autoconfiguration. - - Default: Client IP address is used in ASCII notation. - - Name of network device to use. - - Default: If the host only has one device, it is used. - Otherwise the device is determined using - autoconfiguration. This is done by sending - autoconfiguration requests out of all devices, - and using the device that received the first reply. - - Method to use for autoconfiguration. In the case of options - which specify multiple autoconfiguration protocols, - requests are sent using all protocols, and the first one - to reply is used. - - Only autoconfiguration protocols that have been compiled - into the kernel will be used, regardless of the value of - this option. - - off or none: don't use autoconfiguration - (do static IP assignment instead) - on or any: use any protocol available in the kernel - (default) - dhcp: use DHCP - bootp: use BOOTP - rarp: use RARP - both: use both BOOTP and RARP but not DHCP - (old option kept for backwards compatibility) - - Default: any - - - - -3.) Boot Loader - ---------- - -To get the kernel into memory different approaches can be used. -They depend on various facilities being available: - - -3.1) Booting from a floppy using syslinux - - When building kernels, an easy way to create a boot floppy that uses - syslinux is to use the zdisk or bzdisk make targets which use zimage - and bzimage images respectively. Both targets accept the - FDARGS parameter which can be used to set the kernel command line. - - e.g. - make bzdisk FDARGS="root=/dev/nfs" - - Note that the user running this command will need to have - access to the floppy drive device, /dev/fd0 - - For more information on syslinux, including how to create bootdisks - for prebuilt kernels, see http://syslinux.zytor.com/ - - N.B: Previously it was possible to write a kernel directly to - a floppy using dd, configure the boot device using rdev, and - boot using the resulting floppy. Linux no longer supports this - method of booting. - -3.2) Booting from a cdrom using isolinux - - When building kernels, an easy way to create a bootable cdrom that - uses isolinux is to use the isoimage target which uses a bzimage - image. Like zdisk and bzdisk, this target accepts the FDARGS - parameter which can be used to set the kernel command line. - - e.g. - make isoimage FDARGS="root=/dev/nfs" - - The resulting iso image will be arch//boot/image.iso - This can be written to a cdrom using a variety of tools including - cdrecord. - - e.g. - cdrecord dev=ATAPI:1,0,0 arch/i386/boot/image.iso - - For more information on isolinux, including how to create bootdisks - for prebuilt kernels, see http://syslinux.zytor.com/ - -3.2) Using LILO - When using LILO all the necessary command line parameters may be - specified using the 'append=' directive in the LILO configuration - file. - - However, to use the 'root=' directive you also need to create - a dummy root device, which may be removed after LILO is run. - - mknod /dev/boot255 c 0 255 - - For information on configuring LILO, please refer to its documentation. - -3.3) Using GRUB - When using GRUB, kernel parameter are simply appended after the kernel - specification: kernel - -3.4) Using loadlin - loadlin may be used to boot Linux from a DOS command prompt without - requiring a local hard disk to mount as root. This has not been - thoroughly tested by the authors of this document, but in general - it should be possible configure the kernel command line similarly - to the configuration of LILO. - - Please refer to the loadlin documentation for further information. - -3.5) Using a boot ROM - This is probably the most elegant way of booting a diskless client. - With a boot ROM the kernel is loaded using the TFTP protocol. The - authors of this document are not aware of any no commercial boot - ROMs that support booting Linux over the network. However, there - are two free implementations of a boot ROM, netboot-nfs and - etherboot, both of which are available on sunsite.unc.edu, and both - of which contain everything you need to boot a diskless Linux client. - -3.6) Using pxelinux - Pxelinux may be used to boot linux using the PXE boot loader - which is present on many modern network cards. - - When using pxelinux, the kernel image is specified using - "kernel ". The nfsroot parameters - are passed to the kernel by adding them to the "append" line. - It is common to use serial console in conjunction with pxeliunx, - see Documentation/serial-console.txt for more information. - - For more information on isolinux, including how to create bootdisks - for prebuilt kernels, see http://syslinux.zytor.com/ - - - - -4.) Credits - ------- - - The nfsroot code in the kernel and the RARP support have been written - by Gero Kuhlmann . - - The rest of the IP layer autoconfiguration code has been written - by Martin Mares . - - In order to write the initial version of nfsroot I would like to thank - Jens-Uwe Mager for his help. diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 92b888d540a6..a7e9746ee7ea 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -140,7 +140,7 @@ Callers of notify_change() need ->i_mutex now. New super_block field "struct export_operations *s_export_op" for explicit support for exporting, e.g. via NFS. The structure is fully documented at its declaration in include/linux/fs.h, and in -Documentation/filesystems/Exporting. +Documentation/filesystems/nfs/Exporting. Briefly it allows for the definition of decode_fh and encode_fh operations to encode and decode filehandles, and allows the filesystem to use diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9107b387e91f..dab0f04b4264 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1017,7 +1017,7 @@ and is between 256 and 4096 characters. It is defined in the file No delay ip= [IP_PNP] - See Documentation/filesystems/nfsroot.txt. + See Documentation/filesystems/nfs/nfsroot.txt. ip2= [HW] Set IO/IRQ pairs for up to 4 IntelliPort boards See comment before ip2_setup() in @@ -1538,10 +1538,10 @@ and is between 256 and 4096 characters. It is defined in the file going to be removed in 2.6.29. nfsaddrs= [NFS] - See Documentation/filesystems/nfsroot.txt. + See Documentation/filesystems/nfs/nfsroot.txt. nfsroot= [NFS] nfs root filesystem for disk-less boxes. - See Documentation/filesystems/nfsroot.txt. + See Documentation/filesystems/nfs/nfsroot.txt. nfs.callback_tcpport= [NFS] set the TCP port on which the NFSv4 callback diff --git a/fs/cifs/export.c b/fs/cifs/export.c index 75949d6a5f1b..6177f7cca16a 100644 --- a/fs/cifs/export.c +++ b/fs/cifs/export.c @@ -24,7 +24,7 @@ */ /* - * See Documentation/filesystems/Exporting + * See Documentation/filesystems/nfs/Exporting * and examples in fs/exportfs * * Since cifs is a network file system, an "fsid" must be included for diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 197c7db583c7..e9e175949a63 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -6,7 +6,7 @@ * and for mapping back from file handles to dentries. * * For details on why we do all the strange and hairy things in here - * take a look at Documentation/filesystems/Exporting. + * take a look at Documentation/filesystems/nfs/Exporting. */ #include #include diff --git a/fs/isofs/export.c b/fs/isofs/export.c index e81a30593ba9..ed752cb38474 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -9,7 +9,7 @@ * * The following files are helpful: * - * Documentation/filesystems/Exporting + * Documentation/filesystems/nfs/Exporting * fs/exportfs/expfs.c. */ diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 2a77bc25d5af..59e5673b4597 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -90,7 +90,7 @@ config ROOT_NFS If you want your system to mount its root file system via NFS, choose Y here. This is common practice for managing systems without local permanent storage. For details, read - . + . Most people say N here. diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 27e772cefb6a..dc12f416a49f 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -97,7 +97,7 @@ struct fid { * @get_name: find the name for a given inode in a given directory * @get_parent: find the parent of a given directory * - * See Documentation/filesystems/Exporting for details on how to use + * See Documentation/filesystems/nfs/Exporting for details on how to use * this interface correctly. * * encode_fh: diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 70491d9035eb..0c94a1ac2946 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -166,7 +166,7 @@ config IP_PNP_DHCP If unsure, say Y. Note that if you want to use DHCP, a DHCP server must be operating on your network. Read - for details. + for details. config IP_PNP_BOOTP bool "IP: BOOTP support" @@ -181,7 +181,7 @@ config IP_PNP_BOOTP does BOOTP itself, providing all necessary information on the kernel command line, you can say N here. If unsure, say Y. Note that if you want to use BOOTP, a BOOTP server must be operating on your network. - Read for details. + Read for details. config IP_PNP_RARP bool "IP: RARP support" @@ -194,7 +194,7 @@ config IP_PNP_RARP older protocol which is being obsoleted by BOOTP and DHCP), say Y here. Note that if you want to use RARP, a RARP server must be operating on your network. Read - for details. + for details. # not yet ready.. # bool ' IP: ARP support' CONFIG_IP_PNP_ARP diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index f8d04c256454..7dcbf4706099 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1447,7 +1447,7 @@ late_initcall(ip_auto_config); /* * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel - * command line parameter. See Documentation/filesystems/nfsroot.txt. + * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt. */ static int __init ic_proto_name(char *name) { -- cgit v1.2.3 From c017b4be3e84176cab10eca5e6c4faeb8cfc6f3e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:09 +0000 Subject: kmemleak: Simplify the kmemleak_scan_area() function prototype This function was taking non-necessary arguments which can be determined by kmemleak. The patch also modifies the calling sites. Signed-off-by: Catalin Marinas Cc: Pekka Enberg Cc: Christoph Lameter Cc: Rusty Russell --- include/linux/kmemleak.h | 6 ++---- kernel/module.c | 7 ++----- mm/kmemleak.c | 49 +++++++++++++++++++++--------------------------- mm/slab.c | 4 ++-- 4 files changed, 27 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 3c7497d46ee9..99d9a6766f7e 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset, size_t size) __ref; extern void kmemleak_not_leak(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref; -extern void kmemleak_scan_area(const void *ptr, unsigned long offset, - size_t length, gfp_t gfp) __ref; +extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_no_scan(const void *ptr) __ref; static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, @@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr) static inline void kmemleak_ignore(const void *ptr) { } -static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, - size_t length, gfp_t gfp) +static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { } static inline void kmemleak_erase(void **ptr) diff --git a/kernel/module.c b/kernel/module.c index 8b7d8805819d..1eb952097077 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2043,9 +2043,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, unsigned int i; /* only scan the sections containing data */ - kmemleak_scan_area(mod->module_core, (unsigned long)mod - - (unsigned long)mod->module_core, - sizeof(struct module), GFP_KERNEL); + kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); for (i = 1; i < hdr->e_shnum; i++) { if (!(sechdrs[i].sh_flags & SHF_ALLOC)) @@ -2054,8 +2052,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) continue; - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - - (unsigned long)mod->module_core, + kmemleak_scan_area((void *)sechdrs[i].sh_addr, sechdrs[i].sh_size, GFP_KERNEL); } } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 8bf765c4f58d..96106358e042 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -119,8 +119,8 @@ /* scanning area inside a memory block */ struct kmemleak_scan_area { struct hlist_node node; - unsigned long offset; - size_t length; + unsigned long start; + size_t size; }; #define KMEMLEAK_GREY 0 @@ -241,8 +241,6 @@ struct early_log { const void *ptr; /* allocated/freed memory block */ size_t size; /* memory block size */ int min_count; /* minimum reference count */ - unsigned long offset; /* scan area offset */ - size_t length; /* scan area length */ unsigned long trace[MAX_TRACE]; /* stack trace */ unsigned int trace_len; /* stack trace length */ }; @@ -720,14 +718,13 @@ static void make_black_object(unsigned long ptr) * Add a scanning area to the object. If at least one such area is added, * kmemleak will only scan these ranges rather than the whole memory block. */ -static void add_scan_area(unsigned long ptr, unsigned long offset, - size_t length, gfp_t gfp) +static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; struct kmemleak_scan_area *area; - object = find_and_get_object(ptr, 0); + object = find_and_get_object(ptr, 1); if (!object) { kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", ptr); @@ -741,7 +738,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, } spin_lock_irqsave(&object->lock, flags); - if (offset + length > object->size) { + if (ptr + size > object->pointer + object->size) { kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); @@ -749,8 +746,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, } INIT_HLIST_NODE(&area->node); - area->offset = offset; - area->length = length; + area->start = ptr; + area->size = size; hlist_add_head(&area->node, &object->area_list); out_unlock: @@ -786,7 +783,7 @@ static void object_no_scan(unsigned long ptr) * processed later once kmemleak is fully initialized. */ static void __init log_early(int op_type, const void *ptr, size_t size, - int min_count, unsigned long offset, size_t length) + int min_count) { unsigned long flags; struct early_log *log; @@ -808,8 +805,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size, log->ptr = ptr; log->size = size; log->min_count = min_count; - log->offset = offset; - log->length = length; if (op_type == KMEMLEAK_ALLOC) log->trace_len = __save_stack_trace(log->trace); crt_early_log++; @@ -858,7 +853,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) create_object((unsigned long)ptr, size, min_count, gfp); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); + log_early(KMEMLEAK_ALLOC, ptr, size, min_count); } EXPORT_SYMBOL_GPL(kmemleak_alloc); @@ -873,7 +868,7 @@ void __ref kmemleak_free(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) delete_object_full((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_FREE, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free); @@ -888,7 +883,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) delete_object_part((unsigned long)ptr, size); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); + log_early(KMEMLEAK_FREE_PART, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_part); @@ -903,7 +898,7 @@ void __ref kmemleak_not_leak(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) make_gray_object((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_not_leak); @@ -919,22 +914,21 @@ void __ref kmemleak_ignore(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) make_black_object((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_IGNORE, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_ignore); /* * Limit the range to be scanned in an allocated memory block. */ -void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, - size_t length, gfp_t gfp) +void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) - add_scan_area((unsigned long)ptr, offset, length, gfp); + add_scan_area((unsigned long)ptr, size, gfp); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); + log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); } EXPORT_SYMBOL(kmemleak_scan_area); @@ -948,7 +942,7 @@ void __ref kmemleak_no_scan(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) object_no_scan((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_no_scan); @@ -1075,9 +1069,9 @@ static void scan_object(struct kmemleak_object *object) } } else hlist_for_each_entry(area, elem, &object->area_list, node) - scan_block((void *)(object->pointer + area->offset), - (void *)(object->pointer + area->offset - + area->length), object, 0); + scan_block((void *)area->start, + (void *)(area->start + area->size), + object, 0); out: spin_unlock_irqrestore(&object->lock, flags); } @@ -1642,8 +1636,7 @@ void __init kmemleak_init(void) kmemleak_ignore(log->ptr); break; case KMEMLEAK_SCAN_AREA: - kmemleak_scan_area(log->ptr, log->offset, log->length, - GFP_KERNEL); + kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); break; case KMEMLEAK_NO_SCAN: kmemleak_no_scan(log->ptr); diff --git a/mm/slab.c b/mm/slab.c index 646db3085193..d2713a944ebd 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2584,8 +2584,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, * kmemleak does not treat the ->s_mem pointer as a reference * to the object. Otherwise we will not report the leak. */ - kmemleak_scan_area(slabp, offsetof(struct slab, list), - sizeof(struct list_head), local_flags); + kmemleak_scan_area(&slabp->list, sizeof(struct list_head), + local_flags); if (!slabp) return NULL; } else { -- cgit v1.2.3 From 64ef291f46d795917f32a0f5975e2b76f6fe206a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 29 Oct 2009 22:34:12 +0900 Subject: percpu: make alloc_percpu() handle array types alloc_percpu() couldn't handle array types like "int [100]" due to the way return type was casted. Fix it by using typeof() instead. Signed-off-by: Tejun Heo Reviewed-by: Frederic Weisbecker Reviewed-by: Christoph Lameter --- include/linux/percpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3d9ba92b104f..519d6876590f 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -164,8 +164,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr) #endif /* CONFIG_SMP */ -#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ - __alignof__(type)) +#define alloc_percpu(type) \ + (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) /* * Optional methods for optimized non-lvalue per-cpu variable access. -- cgit v1.2.3 From 0f5e4816dbf38ce9488e611ca2296925c1e90d5e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 29 Oct 2009 22:34:12 +0900 Subject: percpu: remove some sparse warnings Make the following changes to remove some sparse warnings. * Make DEFINE_PER_CPU_SECTION() declare __pcpu_unique_* before defining it. * Annotate pcpu_extend_area_map() that it is entered with pcpu_lock held, releases it and then reacquires it. * Make percpu related macros use unique nested variable names. * While at it, add pcpu prefix to __size_call[_return]() macros as to-be-implemented sparse annotations will add percpu specific stuff to these macros. Signed-off-by: Tejun Heo Reviewed-by: Christoph Lameter Cc: Rusty Russell --- arch/x86/include/asm/percpu.h | 26 +++++++++++------------ include/linux/percpu-defs.h | 1 + include/linux/percpu.h | 48 +++++++++++++++++++++---------------------- mm/percpu.c | 1 + 4 files changed, 39 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 8b5ec19bdef4..0c44196b78ac 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -74,31 +74,31 @@ extern void __bad_percpu_size(void); #define percpu_to_op(op, var, val) \ do { \ - typedef typeof(var) T__; \ + typedef typeof(var) pto_T__; \ if (0) { \ - T__ tmp__; \ - tmp__ = (val); \ + pto_T__ pto_tmp__; \ + pto_tmp__ = (val); \ } \ switch (sizeof(var)) { \ case 1: \ asm(op "b %1,"__percpu_arg(0) \ : "+m" (var) \ - : "qi" ((T__)(val))); \ + : "qi" ((pto_T__)(val))); \ break; \ case 2: \ asm(op "w %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)(val))); \ + : "ri" ((pto_T__)(val))); \ break; \ case 4: \ asm(op "l %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)(val))); \ + : "ri" ((pto_T__)(val))); \ break; \ case 8: \ asm(op "q %1,"__percpu_arg(0) \ : "+m" (var) \ - : "re" ((T__)(val))); \ + : "re" ((pto_T__)(val))); \ break; \ default: __bad_percpu_size(); \ } \ @@ -106,31 +106,31 @@ do { \ #define percpu_from_op(op, var, constraint) \ ({ \ - typeof(var) ret__; \ + typeof(var) pfo_ret__; \ switch (sizeof(var)) { \ case 1: \ asm(op "b "__percpu_arg(1)",%0" \ - : "=q" (ret__) \ + : "=q" (pfo_ret__) \ : constraint); \ break; \ case 2: \ asm(op "w "__percpu_arg(1)",%0" \ - : "=r" (ret__) \ + : "=r" (pfo_ret__) \ : constraint); \ break; \ case 4: \ asm(op "l "__percpu_arg(1)",%0" \ - : "=r" (ret__) \ + : "=r" (pfo_ret__) \ : constraint); \ break; \ case 8: \ asm(op "q "__percpu_arg(1)",%0" \ - : "=r" (ret__) \ + : "=r" (pfo_ret__) \ : constraint); \ break; \ default: __bad_percpu_size(); \ } \ - ret__; \ + pfo_ret__; \ }) /* diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 9bd03193ecd4..5a5d6ce4bd55 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -60,6 +60,7 @@ #define DEFINE_PER_CPU_SECTION(type, name, sec) \ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ + extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ __typeof__(type) per_cpu__##name diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 519d6876590f..522f421ec213 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -226,20 +226,20 @@ do { \ extern void __bad_size_call_parameter(void); -#define __size_call_return(stem, variable) \ -({ typeof(variable) ret__; \ +#define __pcpu_size_call_return(stem, variable) \ +({ typeof(variable) pscr_ret__; \ switch(sizeof(variable)) { \ - case 1: ret__ = stem##1(variable);break; \ - case 2: ret__ = stem##2(variable);break; \ - case 4: ret__ = stem##4(variable);break; \ - case 8: ret__ = stem##8(variable);break; \ + case 1: pscr_ret__ = stem##1(variable);break; \ + case 2: pscr_ret__ = stem##2(variable);break; \ + case 4: pscr_ret__ = stem##4(variable);break; \ + case 8: pscr_ret__ = stem##8(variable);break; \ default: \ __bad_size_call_parameter();break; \ } \ - ret__; \ + pscr_ret__; \ }) -#define __size_call(stem, variable, ...) \ +#define __pcpu_size_call(stem, variable, ...) \ do { \ switch(sizeof(variable)) { \ case 1: stem##1(variable, __VA_ARGS__);break; \ @@ -299,7 +299,7 @@ do { \ # ifndef this_cpu_read_8 # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) # endif -# define this_cpu_read(pcp) __size_call_return(this_cpu_read_, (pcp)) +# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) #endif #define _this_cpu_generic_to_op(pcp, val, op) \ @@ -322,7 +322,7 @@ do { \ # ifndef this_cpu_write_8 # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) # endif -# define this_cpu_write(pcp, val) __size_call(this_cpu_write_, (pcp), (val)) +# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) #endif #ifndef this_cpu_add @@ -338,7 +338,7 @@ do { \ # ifndef this_cpu_add_8 # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) # endif -# define this_cpu_add(pcp, val) __size_call(this_cpu_add_, (pcp), (val)) +# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) #endif #ifndef this_cpu_sub @@ -366,7 +366,7 @@ do { \ # ifndef this_cpu_and_8 # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) # endif -# define this_cpu_and(pcp, val) __size_call(this_cpu_and_, (pcp), (val)) +# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) #endif #ifndef this_cpu_or @@ -382,7 +382,7 @@ do { \ # ifndef this_cpu_or_8 # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) # endif -# define this_cpu_or(pcp, val) __size_call(this_cpu_or_, (pcp), (val)) +# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) #endif #ifndef this_cpu_xor @@ -398,7 +398,7 @@ do { \ # ifndef this_cpu_xor_8 # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) # endif -# define this_cpu_xor(pcp, val) __size_call(this_cpu_or_, (pcp), (val)) +# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) #endif /* @@ -428,7 +428,7 @@ do { \ # ifndef __this_cpu_read_8 # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) # endif -# define __this_cpu_read(pcp) __size_call_return(__this_cpu_read_, (pcp)) +# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) #endif #define __this_cpu_generic_to_op(pcp, val, op) \ @@ -449,7 +449,7 @@ do { \ # ifndef __this_cpu_write_8 # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) # endif -# define __this_cpu_write(pcp, val) __size_call(__this_cpu_write_, (pcp), (val)) +# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) #endif #ifndef __this_cpu_add @@ -465,7 +465,7 @@ do { \ # ifndef __this_cpu_add_8 # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) # endif -# define __this_cpu_add(pcp, val) __size_call(__this_cpu_add_, (pcp), (val)) +# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) #endif #ifndef __this_cpu_sub @@ -493,7 +493,7 @@ do { \ # ifndef __this_cpu_and_8 # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) # endif -# define __this_cpu_and(pcp, val) __size_call(__this_cpu_and_, (pcp), (val)) +# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) #endif #ifndef __this_cpu_or @@ -509,7 +509,7 @@ do { \ # ifndef __this_cpu_or_8 # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) # endif -# define __this_cpu_or(pcp, val) __size_call(__this_cpu_or_, (pcp), (val)) +# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) #endif #ifndef __this_cpu_xor @@ -525,7 +525,7 @@ do { \ # ifndef __this_cpu_xor_8 # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) # endif -# define __this_cpu_xor(pcp, val) __size_call(__this_cpu_xor_, (pcp), (val)) +# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) #endif /* @@ -556,7 +556,7 @@ do { \ # ifndef irqsafe_cpu_add_8 # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) # endif -# define irqsafe_cpu_add(pcp, val) __size_call(irqsafe_cpu_add_, (pcp), (val)) +# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) #endif #ifndef irqsafe_cpu_sub @@ -584,7 +584,7 @@ do { \ # ifndef irqsafe_cpu_and_8 # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) # endif -# define irqsafe_cpu_and(pcp, val) __size_call(irqsafe_cpu_and_, (val)) +# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) #endif #ifndef irqsafe_cpu_or @@ -600,7 +600,7 @@ do { \ # ifndef irqsafe_cpu_or_8 # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) # endif -# define irqsafe_cpu_or(pcp, val) __size_call(irqsafe_cpu_or_, (val)) +# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) #endif #ifndef irqsafe_cpu_xor @@ -616,7 +616,7 @@ do { \ # ifndef irqsafe_cpu_xor_8 # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) # endif -# define irqsafe_cpu_xor(pcp, val) __size_call(irqsafe_cpu_xor_, (val)) +# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) #endif #endif /* __LINUX_PERCPU_H */ diff --git a/mm/percpu.c b/mm/percpu.c index ec158bb5f86d..e2e80fc78601 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -365,6 +365,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) * 0 if noop, 1 if successfully extended, -errno on failure. */ static int pcpu_extend_area_map(struct pcpu_chunk *chunk) + __releases(lock) __acquires(lock) { int new_alloc; int *new; -- cgit v1.2.3 From 4029a91f0c82ae16e5b10b36da51c470895deedf Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Mon, 26 Oct 2009 11:09:29 +0800 Subject: asm-generic: Fix typo in asm-generic/unistd.h. >>From 9741f7928ef35416e49f329a64e623a109de5c2d Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Mon, 26 Oct 2009 10:50:50 +0800 Subject: [PATCH] asm-generic: Fix typo in asm-generic/unistd.h. Fixed __NR_ftruncate and __NR_ftruncate64 define in asm-generic/unistd.h. Signed-off-by: Chen Liqin Signed-off-by: Arnd Bergmann --- include/asm-generic/unistd.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index d76b66acea95..2869650fb083 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -802,7 +802,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) #define __NR_statfs __NR3264_statfs #define __NR_fstatfs __NR3264_fstatfs #define __NR_truncate __NR3264_truncate -#define __NR_ftruncate __NR3264_truncate +#define __NR_ftruncate __NR3264_ftruncate #define __NR_lseek __NR3264_lseek #define __NR_sendfile __NR3264_sendfile #define __NR_newfstatat __NR3264_fstatat @@ -818,7 +818,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) #define __NR_statfs64 __NR3264_statfs #define __NR_fstatfs64 __NR3264_fstatfs #define __NR_truncate64 __NR3264_truncate -#define __NR_ftruncate64 __NR3264_truncate +#define __NR_ftruncate64 __NR3264_ftruncate #define __NR_llseek __NR3264_lseek #define __NR_sendfile64 __NR3264_sendfile #define __NR_fstatat64 __NR3264_fstatat -- cgit v1.2.3 From ac1aa47b131416a6ff37eb1005a0a1d2541aad6c Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 26 Oct 2009 13:20:44 -0700 Subject: PCI: determine CLS more intelligently Till now, CLS has been determined either by arch code or as L1_CACHE_BYTES. Only x86 and ia64 set CLS explicitly and x86 doesn't always get it right. On most configurations, the chance is that firmware configures the correct value during boot. This patch makes pci_init() determine CLS by looking at what firmware has configured. It scans all devices and if all non-zero values agree, the value is used. If none is configured or there is a disagreement, pci_dfl_cache_line_size is used. arch can set the dfl value (via PCI_CACHE_LINE_BYTES or pci_dfl_cache_line_size) or override the actual one. ia64, x86 and sparc64 updated to set the default cls instead of the actual one. While at it, declare pci_cache_line_size and pci_dfl_cache_line_size in pci.h and drop private declarations from arch code. Signed-off-by: Tejun Heo Acked-by: David Miller Acked-by: Greg KH Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Tony Luck Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 9 +++------ arch/x86/pci/common.c | 8 +++----- drivers/pci/pci.c | 21 +++++++++++++-------- drivers/pci/quirks.c | 28 ++++++++++++++++++++++++++++ include/linux/pci.h | 2 ++ 5 files changed, 49 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index c0fca2c1c858..d60e7195b7dd 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -720,9 +720,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) return ret; } -/* It's defined in drivers/pci/pci.c */ -extern u8 pci_cache_line_size; - /** * set_pci_cacheline_size - determine cacheline size for PCI devices * @@ -731,7 +728,7 @@ extern u8 pci_cache_line_size; * * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). */ -static void __init set_pci_cacheline_size(void) +static void __init set_pci_dfl_cacheline_size(void) { unsigned long levels, unique_caches; long status; @@ -751,7 +748,7 @@ static void __init set_pci_cacheline_size(void) "(status=%ld)\n", __func__, status); return; } - pci_cache_line_size = (1 << cci.pcci_line_size) / 4; + pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4; } u64 ia64_dma_get_required_mask(struct device *dev) @@ -782,7 +779,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask); static int __init pcibios_init(void) { - set_pci_cacheline_size(); + set_pci_dfl_cacheline_size(); return 0; } diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 1331fcf26143..fbeec31316cf 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -410,8 +410,6 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) return bus; } -extern u8 pci_cache_line_size; - int __init pcibios_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -426,11 +424,11 @@ int __init pcibios_init(void) * and P4. It's also good for 386/486s (which actually have 16) * as quite a few PCI devices do not support smaller values. */ - pci_cache_line_size = 32 >> 2; + pci_dfl_cache_line_size = 32 >> 2; if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD) - pci_cache_line_size = 64 >> 2; /* K7 & K8 */ + pci_dfl_cache_line_size = 64 >> 2; /* K7 & K8 */ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL) - pci_cache_line_size = 128 >> 2; /* P4 */ + pci_dfl_cache_line_size = 128 >> 2; /* P4 */ pcibios_resource_survey(); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e4c295a049f..1f9a7a03847b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -47,6 +47,19 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; +#ifndef PCI_CACHE_LINE_BYTES +#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES +#endif + +/* + * The default CLS is used if arch didn't set CLS explicitly and not + * all pci devices agree on the same value. Arch can override either + * the dfl or actual value as it sees fit. Don't forget this is + * measured in 32-bit words, not bytes. + */ +u8 pci_dfl_cache_line_size __initdata = PCI_CACHE_LINE_BYTES >> 2; +u8 pci_cache_line_size; + /** * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children * @bus: pointer to PCI bus structure to search @@ -1883,14 +1896,6 @@ void pci_clear_mwi(struct pci_dev *dev) #else -#ifndef PCI_CACHE_LINE_BYTES -#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES -#endif - -/* This can be overridden by arch code. */ -/* Don't forget this is measured in 32-bit words, not bytes */ -u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; - /** * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed * @dev: the PCI device for which MWI is to be enabled diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 245d2cdb4765..1812ae7698de 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2595,9 +2595,37 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) static int __init pci_apply_final_quirks(void) { struct pci_dev *dev = NULL; + u8 cls = 0; + u8 tmp; + + if (pci_cache_line_size) + printk(KERN_DEBUG "PCI: CLS %u bytes\n", + pci_cache_line_size << 2); while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_fixup_device(pci_fixup_final, dev); + /* + * If arch hasn't set it explicitly yet, use the CLS + * value shared by all PCI devices. If there's a + * mismatch, fall back to the default value. + */ + if (!pci_cache_line_size) { + pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp); + if (!cls) + cls = tmp; + if (!tmp || cls == tmp) + continue; + + printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), " + "using %u bytes\n", cls << 2, tmp << 2, + pci_dfl_cache_line_size << 2); + pci_cache_line_size = pci_dfl_cache_line_size; + } + } + if (!pci_cache_line_size) { + printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", + cls << 2, pci_dfl_cache_line_size << 2); + pci_cache_line_size = cls; } return 0; diff --git a/include/linux/pci.h b/include/linux/pci.h index f5c7cd343e56..b849861d78e6 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1246,6 +1246,8 @@ extern int pci_pci_problems; extern unsigned long pci_cardbus_io_size; extern unsigned long pci_cardbus_mem_size; +extern u8 pci_dfl_cache_line_size; +extern u8 pci_cache_line_size; extern unsigned long pci_hotplug_io_size; extern unsigned long pci_hotplug_mem_size; -- cgit v1.2.3 From 15ea76d407d560f985224b65fe59c9db01692a0d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 22 Sep 2009 17:34:48 +0900 Subject: pccard: configure CLS on attach For non hotplug PCI devices, the system firmware usually configures CLS correctly. For pccard devices system firmware can't do it and Linux PCI layer doesn't do it either. Unfortunately this leads to poor performance for certain devices (sata_sil). Unless MWI, which requires separate configuration, is to be used, CLS doesn't affect correctness, so the configuration should be harmless. This patch makes pci_set_cacheline_size() always built and export it and make pccard call it during attach. Please note that some other PCI hotplug drivers (shpchp and pciehp) also configure CLS on hotplug. Signed-off-by: Tejun Heo Cc: Daniel Ritz Cc: Dominik Brodowski Cc: Greg KH Cc: Kenji Kaneshige Cc: Axel Birndt Signed-off-by: Jesse Barnes --- drivers/pci/pci.c | 40 ++++++++++++++++++++-------------------- drivers/pcmcia/cardbus.c | 23 +++++++++++++++-------- include/linux/pci.h | 1 + 3 files changed, 36 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 01337b7a215f..d1afbae5b1fb 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1875,23 +1875,6 @@ void pci_clear_master(struct pci_dev *dev) __pci_set_master(dev, false); } -#ifdef PCI_DISABLE_MWI -int pci_set_mwi(struct pci_dev *dev) -{ - return 0; -} - -int pci_try_set_mwi(struct pci_dev *dev) -{ - return 0; -} - -void pci_clear_mwi(struct pci_dev *dev) -{ -} - -#else - /** * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed * @dev: the PCI device for which MWI is to be enabled @@ -1902,13 +1885,12 @@ void pci_clear_mwi(struct pci_dev *dev) * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ -static int -pci_set_cacheline_size(struct pci_dev *dev) +int pci_set_cacheline_size(struct pci_dev *dev) { u8 cacheline_size; if (!pci_cache_line_size) - return -EINVAL; /* The system doesn't support MWI. */ + return -EINVAL; /* Validate current setting: the PCI_CACHE_LINE_SIZE must be equal to or multiple of the right value. */ @@ -1929,6 +1911,24 @@ pci_set_cacheline_size(struct pci_dev *dev) return -EINVAL; } +EXPORT_SYMBOL_GPL(pci_set_cacheline_size); + +#ifdef PCI_DISABLE_MWI +int pci_set_mwi(struct pci_dev *dev) +{ + return 0; +} + +int pci_try_set_mwi(struct pci_dev *dev) +{ + return 0; +} + +void pci_clear_mwi(struct pci_dev *dev) +{ +} + +#else /** * pci_set_mwi - enables memory-write-invalidate PCI transaction diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index db77e1f3309a..98789c031a7c 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c @@ -184,26 +184,33 @@ fail: =====================================================================*/ -/* - * Since there is only one interrupt available to CardBus - * devices, all devices downstream of this device must - * be using this IRQ. - */ -static void cardbus_assign_irqs(struct pci_bus *bus, int irq) +static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { u8 irq_pin; + /* + * Since there is only one interrupt available to + * CardBus devices, all devices downstream of this + * device must be using this IRQ. + */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin); if (irq_pin) { dev->irq = irq; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } + /* + * Some controllers transfer very slowly with 0 CLS. + * Configure it. This may fail as CLS configuration + * is mandatory only for MWI. + */ + pci_set_cacheline_size(dev); + if (dev->subordinate) - cardbus_assign_irqs(dev->subordinate, irq); + cardbus_config_irq_and_cls(dev->subordinate, irq); } } @@ -228,7 +235,7 @@ int __ref cb_alloc(struct pcmcia_socket * s) */ pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); - cardbus_assign_irqs(bus, s->pci_irq); + cardbus_config_irq_and_cls(bus, s->pci_irq); /* socket specific tune function */ if (s->tune_bridge) diff --git a/include/linux/pci.h b/include/linux/pci.h index b849861d78e6..da4128f6e916 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -701,6 +701,7 @@ void pci_disable_device(struct pci_dev *dev); void pci_set_master(struct pci_dev *dev); void pci_clear_master(struct pci_dev *dev); int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); +int pci_set_cacheline_size(struct pci_dev *dev); #define HAVE_PCI_SET_MWI int __must_check pci_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); -- cgit v1.2.3 From 1ccbf5344c3daef046d2323190cc6807c44f1917 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 6 Oct 2009 15:11:14 -0700 Subject: xen: move Xen-testing predicates to common header Move xen_domain and related tests out of asm-x86 to xen/xen.h so they can be included whenever they are necessary. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Jesse Barnes --- arch/x86/include/asm/xen/hypervisor.h | 27 --------------------------- arch/x86/xen/enlighten.c | 1 + drivers/block/xen-blkfront.c | 1 + drivers/char/hvc_xen.c | 2 ++ drivers/input/xen-kbdfront.c | 3 +++ drivers/net/xen-netfront.c | 1 + drivers/video/xen-fbfront.c | 3 +++ drivers/xen/balloon.c | 2 ++ drivers/xen/cpu_hotplug.c | 1 + drivers/xen/evtchn.c | 2 ++ drivers/xen/grant-table.c | 1 + drivers/xen/sys-hypervisor.c | 1 + drivers/xen/xenbus/xenbus_probe.c | 2 ++ drivers/xen/xenfs/super.c | 2 ++ include/xen/xen.h | 32 ++++++++++++++++++++++++++++++++ 15 files changed, 54 insertions(+), 27 deletions(-) create mode 100644 include/xen/xen.h (limited to 'include') diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index d5b7e90c0edf..396ff4cc8ed4 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -37,31 +37,4 @@ extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; -enum xen_domain_type { - XEN_NATIVE, /* running on bare hardware */ - XEN_PV_DOMAIN, /* running in a PV domain */ - XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ -}; - -#ifdef CONFIG_XEN -extern enum xen_domain_type xen_domain_type; -#else -#define xen_domain_type XEN_NATIVE -#endif - -#define xen_domain() (xen_domain_type != XEN_NATIVE) -#define xen_pv_domain() (xen_domain() && \ - xen_domain_type == XEN_PV_DOMAIN) -#define xen_hvm_domain() (xen_domain() && \ - xen_domain_type == XEN_HVM_DOMAIN) - -#ifdef CONFIG_XEN_DOM0 -#include - -#define xen_initial_domain() (xen_pv_domain() && \ - xen_start_info->flags & SIF_INITDOMAIN) -#else /* !CONFIG_XEN_DOM0 */ -#define xen_initial_domain() (0) -#endif /* CONFIG_XEN_DOM0 */ - #endif /* _ASM_X86_XEN_HYPERVISOR_H */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 23a4d80fb39e..5bccd706232c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -28,6 +28,7 @@ #include #include +#include #include #include #include diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index b8578bb3f4c9..05a31e55d278 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c index eba999f8598d..93d33816c9d9 100644 --- a/drivers/char/hvc_xen.c +++ b/drivers/char/hvc_xen.c @@ -25,6 +25,8 @@ #include #include + +#include #include #include #include diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c index b115726dc088..c721c0a23eb8 100644 --- a/drivers/input/xen-kbdfront.c +++ b/drivers/input/xen-kbdfront.c @@ -21,7 +21,10 @@ #include #include #include + #include + +#include #include #include #include diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index baa051d5bfbe..a869b45d3d37 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c index 54cd91610174..966b226c858c 100644 --- a/drivers/video/xen-fbfront.c +++ b/drivers/video/xen-fbfront.c @@ -25,7 +25,10 @@ #include #include #include + #include + +#include #include #include #include diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index d31505b6f7a4..826dda414166 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -52,6 +52,8 @@ #include #include + +#include #include #include #include diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index bdfd584ad853..6625ffe1a6fd 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -1,5 +1,6 @@ #include +#include #include #include diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 79bedba44fee..f70a4f4698c5 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -48,6 +48,8 @@ #include #include #include + +#include #include #include #include diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7d8f531fb8e8..4c6c0bd636a8 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -37,6 +37,7 @@ #include #include +#include #include #include #include diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index 88a60e03ccf0..ae5cb05a1a1c 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index d42e25d5968d..08638adec9fc 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -49,6 +49,8 @@ #include #include #include + +#include #include #include #include diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 6559e0c752ce..8924d93136f1 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -13,6 +13,8 @@ #include #include +#include + #include "xenfs.h" #include diff --git a/include/xen/xen.h b/include/xen/xen.h new file mode 100644 index 000000000000..a16402418d31 --- /dev/null +++ b/include/xen/xen.h @@ -0,0 +1,32 @@ +#ifndef _XEN_XEN_H +#define _XEN_XEN_H + +enum xen_domain_type { + XEN_NATIVE, /* running on bare hardware */ + XEN_PV_DOMAIN, /* running in a PV domain */ + XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ +}; + +#ifdef CONFIG_XEN +extern enum xen_domain_type xen_domain_type; +#else +#define xen_domain_type XEN_NATIVE +#endif + +#define xen_domain() (xen_domain_type != XEN_NATIVE) +#define xen_pv_domain() (xen_domain() && \ + xen_domain_type == XEN_PV_DOMAIN) +#define xen_hvm_domain() (xen_domain() && \ + xen_domain_type == XEN_HVM_DOMAIN) + +#ifdef CONFIG_XEN_DOM0 +#include +#include + +#define xen_initial_domain() (xen_pv_domain() && \ + xen_start_info->flags & SIF_INITDOMAIN) +#else /* !CONFIG_XEN_DOM0 */ +#define xen_initial_domain() (0) +#endif /* CONFIG_XEN_DOM0 */ + +#endif /* _XEN_XEN_H */ -- cgit v1.2.3 From ae21ee65e8bc228416bbcc8a1da01c56a847a60c Mon Sep 17 00:00:00 2001 From: Allen Kay Date: Wed, 7 Oct 2009 10:27:17 -0700 Subject: PCI: acs p2p upsteram forwarding enabling Note: dom0 checking in v4 has been separated out into 2/2. This patch enables P2P upstream forwarding in ACS capable PCIe switches. It solves two potential problems in virtualization environment where a PCIe device is assigned to a guest domain using a HW iommu such as VT-d: 1) Unintentional failure caused by guest physical address programmed into the device's DMA that happens to match the memory address range of other downstream ports in the same PCIe switch. This causes the PCI transaction to go to the matching downstream port instead of go to the root complex to get translated by VT-d as it should be. 2) Malicious guest software intentionally attacks another downstream PCIe device by programming the DMA address into the assigned device that matches memory address range of the downstream PCIe port. We are in process of implementing device filtering software in KVM/XEN management software to allow device assignment of PCIe devices behind a PCIe switch only if it has ACS capability and with the P2P upstream forwarding bits enabled. This patch is intended to work for both KVM and Xen environments. Signed-off-by: Allen Kay Reviewed-by: Mathew Wilcox Reviewed-by: Chris Wright Signed-off-by: Jesse Barnes --- drivers/pci/pci.c | 35 +++++++++++++++++++++++++++++++++++ drivers/pci/pci.h | 2 ++ drivers/pci/probe.c | 5 +++++ include/linux/pci_regs.h | 13 +++++++++++++ 4 files changed, 55 insertions(+) (limited to 'include') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4859669f0ab5..557218222826 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1545,6 +1545,41 @@ void pci_enable_ari(struct pci_dev *dev) bridge->ari_enabled = 1; } +/** + * pci_enable_acs - enable ACS if hardware support it + * @dev: the PCI device + */ +void pci_enable_acs(struct pci_dev *dev) +{ + int pos; + u16 cap; + u16 ctrl; + + if (!dev->is_pcie) + return; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + if (!pos) + return; + + pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); + + /* Source Validation */ + ctrl |= (cap & PCI_ACS_SV); + + /* P2P Request Redirect */ + ctrl |= (cap & PCI_ACS_RR); + + /* P2P Completion Redirect */ + ctrl |= (cap & PCI_ACS_CR); + + /* Upstream Forwarding */ + ctrl |= (cap & PCI_ACS_UF); + + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); +} + /** * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge * @dev: the PCI device diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d92d1954a2fb..33ed8e0aba1e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -311,4 +311,6 @@ static inline int pci_resource_alignment(struct pci_dev *dev, return resource_alignment(res); } +extern void pci_enable_acs(struct pci_dev *dev); + #endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2adb47574d86..aac5b156a5c5 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ @@ -1004,6 +1005,10 @@ static void pci_init_capabilities(struct pci_dev *dev) /* Single Root I/O Virtualization */ pci_iov_init(dev); + + /* Enable ACS P2P upstream forwarding */ + if (iommu_found()) + pci_enable_acs(dev); } void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index dd0bed4f1cf0..d798770f08cd 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -502,6 +502,7 @@ #define PCI_EXT_CAP_ID_VC 2 #define PCI_EXT_CAP_ID_DSN 3 #define PCI_EXT_CAP_ID_PWR 4 +#define PCI_EXT_CAP_ID_ACS 13 #define PCI_EXT_CAP_ID_ARI 14 #define PCI_EXT_CAP_ID_ATS 15 #define PCI_EXT_CAP_ID_SRIOV 16 @@ -662,4 +663,16 @@ #define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ #define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ +/* Access Control Service */ +#define PCI_ACS_CAP 0x04 /* ACS Capability Register */ +#define PCI_ACS_SV 0x01 /* Source Validation */ +#define PCI_ACS_TB 0x02 /* Translation Blocking */ +#define PCI_ACS_RR 0x04 /* P2P Request Redirect */ +#define PCI_ACS_CR 0x08 /* P2P Completion Redirect */ +#define PCI_ACS_UF 0x10 /* Upstream Forwarding */ +#define PCI_ACS_EC 0x20 /* P2P Egress Control */ +#define PCI_ACS_DT 0x40 /* Direct Translated P2P */ +#define PCI_ACS_CTRL 0x06 /* ACS Control Register */ +#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ + #endif /* LINUX_PCI_REGS_H */ -- cgit v1.2.3 From 0584396157ad2d008e2cc76b4ed6254151183a25 Mon Sep 17 00:00:00 2001 From: Matt Domsch Date: Mon, 2 Nov 2009 11:51:24 -0600 Subject: PCI: PCIe AER: honor ACPI HEST FIRMWARE FIRST mode Feedback from Hidetoshi Seto and Kenji Kaneshige incorporated. This correctly handles PCI-X bridges, PCIe root ports and endpoints, and prints debug messages when invalid/reserved types are found in the HEST. PCI devices not in domain/segment 0 are not represented in HEST, thus will be ignored. Today, the PCIe Advanced Error Reporting (AER) driver attaches itself to every PCIe root port for which BIOS reports it should, via ACPI _OSC. However, _OSC alone is insufficient for newer BIOSes. Part of ACPI 4.0 is the new APEI (ACPI Platform Error Interfaces) which is a way for OS and BIOS to handshake over which errors for which components each will handle. One table in ACPI 4.0 is the Hardware Error Source Table (HEST), where BIOS can define that errors for certain PCIe devices (or all devices), should be handled by BIOS ("Firmware First mode"), rather than be handled by the OS. Dell PowerEdge 11G server BIOS defines Firmware First mode in HEST, so that it may manage such errors, log them to the System Event Log, and possibly take other actions. The aer driver should honor this, and not attach itself to devices noted as such. Furthermore, Kenji Kaneshige reminded us to disallow changing the AER registers when respecting Firmware First mode. Platform firmware is expected to manage these, and if changes to them are allowed, it could break that firmware's behavior. The HEST parsing code may be replaced in the future by a more feature-rich implementation. This patch provides the minimum needed to prevent breakage until that implementation is available. Reviewed-by: Kenji Kaneshige Reviewed-by: Hidetoshi Seto Signed-off-by: Matt Domsch Signed-off-by: Jesse Barnes --- drivers/acpi/Makefile | 1 + drivers/acpi/hest.c | 135 +++++++++++++++++++++++++++++++++++++ drivers/pci/pcie/aer/aerdrv_core.c | 24 ++++++- drivers/pci/probe.c | 8 +++ include/acpi/acpi_hest.h | 12 ++++ include/linux/pci.h | 1 + 6 files changed, 179 insertions(+), 2 deletions(-) create mode 100644 drivers/acpi/hest.c create mode 100644 include/acpi/acpi_hest.h (limited to 'include') diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 7702118509a0..c7b10b4298e9 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -19,6 +19,7 @@ obj-y += acpi.o \ # All the builtin files are in the "acpi." module_param namespace. acpi-y += osl.o utils.o reboot.o +acpi-y += hest.o # sleep related files acpi-y += wakeup.o diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c new file mode 100644 index 000000000000..4bb18c980ac6 --- /dev/null +++ b/drivers/acpi/hest.c @@ -0,0 +1,135 @@ +#include +#include + +#define PREFIX "ACPI: " + +static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p) +{ + return sizeof(*p) + + (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); +} + +static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p) +{ + return sizeof(*p) + + (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); +} + +static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p) +{ + return sizeof(*p); +} + +static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p) +{ + return sizeof(*p); +} + +static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci) +{ + return (0 == pci_domain_nr(pci->bus) && + p->bus == pci->bus->number && + p->device == PCI_SLOT(pci->devfn) && + p->function == PCI_FUNC(pci->devfn)); +} + +static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first) +{ + struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header); + unsigned long rc=0; + u8 pcie_type = 0; + u8 bridge = 0; + switch (type) { + case ACPI_HEST_TYPE_AER_ROOT_PORT: + rc = sizeof(struct acpi_hest_aer_root); + pcie_type = PCI_EXP_TYPE_ROOT_PORT; + break; + case ACPI_HEST_TYPE_AER_ENDPOINT: + rc = sizeof(struct acpi_hest_aer); + pcie_type = PCI_EXP_TYPE_ENDPOINT; + break; + case ACPI_HEST_TYPE_AER_BRIDGE: + rc = sizeof(struct acpi_hest_aer_bridge); + if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE) + bridge = 1; + break; + } + + if (p->flags & ACPI_HEST_GLOBAL) { + if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge) + *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); + } + else + if (hest_match_pci(p, pci)) + *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); + return rc; +} + +static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci) +{ + struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader; + void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */ + struct acpi_hest_header *hdr = p; + + int i; + int firmware_first = 0; + static unsigned char printed_unused = 0; + static unsigned char printed_reserved = 0; + + for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) { + switch (hdr->type) { + case ACPI_HEST_TYPE_IA32_CHECK: + p += parse_acpi_hest_ia_machine_check(p); + break; + case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK: + p += parse_acpi_hest_ia_corrected(p); + break; + case ACPI_HEST_TYPE_IA32_NMI: + p += parse_acpi_hest_ia_nmi(p); + break; + /* These three should never appear */ + case ACPI_HEST_TYPE_NOT_USED3: + case ACPI_HEST_TYPE_NOT_USED4: + case ACPI_HEST_TYPE_NOT_USED5: + if (!printed_unused) { + printk(KERN_DEBUG PREFIX + "HEST Error Source list contains an obsolete type (%d).\n", hdr->type); + printed_unused = 1; + } + break; + case ACPI_HEST_TYPE_AER_ROOT_PORT: + case ACPI_HEST_TYPE_AER_ENDPOINT: + case ACPI_HEST_TYPE_AER_BRIDGE: + p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first); + break; + case ACPI_HEST_TYPE_GENERIC_ERROR: + p += parse_acpi_hest_generic(p); + break; + /* These should never appear either */ + case ACPI_HEST_TYPE_RESERVED: + default: + if (!printed_reserved) { + printk(KERN_DEBUG PREFIX + "HEST Error Source list contains a reserved type (%d).\n", hdr->type); + printed_reserved = 1; + } + break; + } + } + return firmware_first; +} + +int acpi_hest_firmware_first_pci(struct pci_dev *pci) +{ + acpi_status status = AE_NOT_FOUND; + struct acpi_table_header *hest = NULL; + status = acpi_get_table(ACPI_SIG_HEST, 1, &hest); + + if (ACPI_SUCCESS(status)) { + if (acpi_hest_firmware_first(hest, pci)) { + return 1; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci); diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 9f5ccbeb4fa5..f4512feac12b 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -35,6 +35,9 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) u16 reg16 = 0; int pos; + if (dev->aer_firmware_first) + return -EIO; + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; @@ -60,6 +63,9 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) u16 reg16 = 0; int pos; + if (dev->aer_firmware_first) + return -EIO; + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) return -EIO; @@ -874,8 +880,22 @@ void aer_delete_rootport(struct aer_rpc *rpc) */ int aer_init(struct pcie_device *dev) { - if (aer_osc_setup(dev) && !forceload) - return -ENXIO; + if (dev->port->aer_firmware_first) { + dev_printk(KERN_DEBUG, &dev->device, + "PCIe errors handled by platform firmware.\n"); + goto out; + } + + if (aer_osc_setup(dev)) + goto out; return 0; +out: + if (forceload) { + dev_printk(KERN_DEBUG, &dev->device, + "aerdrv forceload requested.\n"); + dev->port->aer_firmware_first = 0; + return 0; + } + return -ENXIO; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 9cefc54a0125..118463befef0 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "pci.h" @@ -706,6 +707,12 @@ static void set_pcie_hotplug_bridge(struct pci_dev *pdev) pdev->is_hotplug_bridge = 1; } +static void set_pci_aer_firmware_first(struct pci_dev *pdev) +{ + if (acpi_hest_firmware_first_pci(pdev)) + pdev->aer_firmware_first = 1; +} + #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) /** @@ -734,6 +741,7 @@ int pci_setup_device(struct pci_dev *dev) dev->multifunction = !!(hdr_type & 0x80); dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); + set_pci_aer_firmware_first(dev); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) diff --git a/include/acpi/acpi_hest.h b/include/acpi/acpi_hest.h new file mode 100644 index 000000000000..63194d03cb2d --- /dev/null +++ b/include/acpi/acpi_hest.h @@ -0,0 +1,12 @@ +#ifndef __ACPI_HEST_H +#define __ACPI_HEST_H + +#include + +#ifdef CONFIG_ACPI +extern int acpi_hest_firmware_first_pci(struct pci_dev *pci); +#else +static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; } +#endif + +#endif diff --git a/include/linux/pci.h b/include/linux/pci.h index da4128f6e916..9d646e60cae0 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -280,6 +280,7 @@ struct pci_dev { unsigned int is_virtfn:1; unsigned int reset_fn:1; unsigned int is_hotplug_bridge:1; + unsigned int aer_firmware_first:1; pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ -- cgit v1.2.3 From bc577d2bb98cc44371287fce3e892d26ad4050a8 Mon Sep 17 00:00:00 2001 From: Gabe Black Date: Tue, 6 Oct 2009 10:45:19 -0500 Subject: PCI: populate subsystem vendor and device IDs for PCI bridges Change to populate the subsystem vendor and subsytem device IDs for PCI-PCI bridges that implement the PCI Subsystem Vendor ID capability. Previously bridges left subsystem vendor IDs unpopulated. Signed-off-by: Gabe Black Signed-off-by: Jesse Barnes --- drivers/pci/probe.c | 6 ++++++ include/linux/pci_regs.h | 5 +++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 118463befef0..4842b09b7f3c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -730,6 +730,7 @@ int pci_setup_device(struct pci_dev *dev) u32 class; u8 hdr_type; struct pci_slot *slot; + int pos = 0; if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) return -EIO; @@ -822,6 +823,11 @@ int pci_setup_device(struct pci_dev *dev) dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); set_pcie_hotplug_bridge(dev); + pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); + if (pos) { + pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); + pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); + } break; case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index d798770f08cd..9f2ad0aa3c39 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -365,6 +365,11 @@ #define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */ #define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */ +/* PCI Bridge Subsystem ID registers */ + +#define PCI_SSVID_VENDOR_ID 4 /* PCI-Bridge subsystem vendor id register */ +#define PCI_SSVID_DEVICE_ID 6 /* PCI-Bridge subsystem device id register */ + /* PCI Express capability registers */ #define PCI_EXP_FLAGS 2 /* Capabilities register */ -- cgit v1.2.3 From 3c299dc22635e500214707aa28be119ff2b3901c Mon Sep 17 00:00:00 2001 From: Andrew Patterson Date: Mon, 12 Oct 2009 13:14:00 -0600 Subject: PCI: add pci_get_domain_bus_and_slot function Added the pci_get_domain_and_slot_function which is analogous to pci_get_bus_and_slot. It returns a pci_dev given a domain (segment) number, bus number, and devnr. Like pci_get_bus_and_slot, pci_get_domain_bus_and_slot holds a reference to the returned pci_dev. Converted pci_get_bus_and_slot to a wrapper that calls pci_get_domain_bus_and_slot with the domain hard-coded to 0. This routine was patterned off code suggested by Bjorn Helgaas. Acked-by: Huang Ying Signed-off-by: Andrew Patterson Signed-off-by: Jesse Barnes --- drivers/pci/search.c | 34 +++++++++++++++++----------------- include/linux/pci.h | 8 +++++++- 2 files changed, 24 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/pci/search.c b/drivers/pci/search.c index ec415352d9ba..75826482c71a 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -149,32 +149,33 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) } /** - * pci_get_bus_and_slot - locate PCI device from a given PCI bus & slot - * @bus: number of PCI bus on which desired PCI device resides - * @devfn: encodes number of PCI slot in which the desired PCI - * device resides and the logical device number within that slot - * in case of multi-function devices. - * - * Note: the bus/slot search is limited to PCI domain (segment) 0. + * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot + * @domain: PCI domain/segment on which the PCI device resides. + * @bus: PCI bus on which desired PCI device resides + * @devfn: encodes number of PCI slot in which the desired PCI device + * resides and the logical device number within that slot in case of + * multi-function devices. * - * Given a PCI bus and slot/function number, the desired PCI device - * is located in system global list of PCI devices. If the device - * is found, a pointer to its data structure is returned. If no - * device is found, %NULL is returned. The returned device has its - * reference count bumped by one. + * Given a PCI domain, bus, and slot/function number, the desired PCI + * device is located in the list of PCI devices. If the device is + * found, its reference count is increased and this function returns a + * pointer to its data structure. The caller must decrement the + * reference count by calling pci_dev_put(). If no device is found, + * %NULL is returned. */ - -struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) +struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, + unsigned int devfn) { struct pci_dev *dev = NULL; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - if (pci_domain_nr(dev->bus) == 0 && - (dev->bus->number == bus && dev->devfn == devfn)) + if (pci_domain_nr(dev->bus) == domain && + (dev->bus->number == bus && dev->devfn == devfn)) return dev; } return NULL; } +EXPORT_SYMBOL(pci_get_domain_bus_and_slot); static int match_pci_dev_by_id(struct device *dev, void *data) { @@ -354,5 +355,4 @@ EXPORT_SYMBOL(pci_find_next_bus); EXPORT_SYMBOL(pci_get_device); EXPORT_SYMBOL(pci_get_subsys); EXPORT_SYMBOL(pci_get_slot); -EXPORT_SYMBOL(pci_get_bus_and_slot); EXPORT_SYMBOL(pci_get_class); diff --git a/include/linux/pci.h b/include/linux/pci.h index 9d646e60cae0..86c31ac454d1 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -636,7 +636,13 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from); struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); -struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); +struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, + unsigned int devfn); +static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, + unsigned int devfn) +{ + return pci_get_domain_bus_and_slot(0, bus, devfn); +} struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); int pci_dev_present(const struct pci_device_id *ids); -- cgit v1.2.3 From 42bbb70980f3720b0ae6da6af862af0e95a04351 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Wed, 4 Nov 2009 15:34:18 -0700 Subject: powerpc/5200: Add mpc5200-spi (non-PSC) device driver Adds support for the dedicated SPI device on the Freescale MPC5200(b) SoC. Signed-off-by: Grant Likely --- drivers/spi/Kconfig | 8 + drivers/spi/Makefile | 1 + drivers/spi/mpc52xx_spi.c | 520 ++++++++++++++++++++++++++++++++++++++++ include/linux/spi/mpc52xx_spi.h | 10 + 4 files changed, 539 insertions(+) create mode 100644 drivers/spi/mpc52xx_spi.c create mode 100644 include/linux/spi/mpc52xx_spi.h (limited to 'include') diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 4b6f7cba3b3d..2a4ba1993083 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -133,6 +133,14 @@ config SPI_LM70_LLP which interfaces to an LM70 temperature sensor using a parallel port. +config SPI_MPC52xx + tristate "Freescale MPC52xx SPI (non-PSC) controller support" + depends on PPC_MPC52xx && SPI + select SPI_MASTER_OF + help + This drivers supports the MPC52xx SPI controller in master SPI + mode. + config SPI_MPC52xx_PSC tristate "Freescale MPC52xx PSC SPI controller" depends on PPC_MPC52xx && EXPERIMENTAL diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 21a118269cac..e3f092a9afa5 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o obj-$(CONFIG_SPI_ORION) += orion_spi.o obj-$(CONFIG_SPI_PL022) += amba-pl022.o obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o +obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c new file mode 100644 index 000000000000..ef8379b2c172 --- /dev/null +++ b/drivers/spi/mpc52xx_spi.c @@ -0,0 +1,520 @@ +/* + * MPC52xx SPI bus driver. + * + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * This file is released under the GPLv2 + * + * This is the driver for the MPC5200's dedicated SPI controller. + * + * Note: this driver does not support the MPC5200 PSC in SPI mode. For + * that driver see drivers/spi/mpc52xx_psc_spi.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Grant Likely "); +MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver"); +MODULE_LICENSE("GPL"); + +/* Register offsets */ +#define SPI_CTRL1 0x00 +#define SPI_CTRL1_SPIE (1 << 7) +#define SPI_CTRL1_SPE (1 << 6) +#define SPI_CTRL1_MSTR (1 << 4) +#define SPI_CTRL1_CPOL (1 << 3) +#define SPI_CTRL1_CPHA (1 << 2) +#define SPI_CTRL1_SSOE (1 << 1) +#define SPI_CTRL1_LSBFE (1 << 0) + +#define SPI_CTRL2 0x01 +#define SPI_BRR 0x04 + +#define SPI_STATUS 0x05 +#define SPI_STATUS_SPIF (1 << 7) +#define SPI_STATUS_WCOL (1 << 6) +#define SPI_STATUS_MODF (1 << 4) + +#define SPI_DATA 0x09 +#define SPI_PORTDATA 0x0d +#define SPI_DATADIR 0x10 + +/* FSM state return values */ +#define FSM_STOP 0 /* Nothing more for the state machine to */ + /* do. If something interesting happens */ + /* then and IRQ will be received */ +#define FSM_POLL 1 /* need to poll for completion, an IRQ is */ + /* not expected */ +#define FSM_CONTINUE 2 /* Keep iterating the state machine */ + +/* Driver internal data */ +struct mpc52xx_spi { + struct spi_master *master; + u32 sysclk; + void __iomem *regs; + int irq0; /* MODF irq */ + int irq1; /* SPIF irq */ + int ipb_freq; + + /* Statistics */ + int msg_count; + int wcol_count; + int wcol_ticks; + u32 wcol_tx_timestamp; + int modf_count; + int byte_count; + + struct list_head queue; /* queue of pending messages */ + spinlock_t lock; + struct work_struct work; + + + /* Details of current transfer (length, and buffer pointers) */ + struct spi_message *message; /* current message */ + struct spi_transfer *transfer; /* current transfer */ + int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data); + int len; + int timestamp; + u8 *rx_buf; + const u8 *tx_buf; + int cs_change; +}; + +/* + * CS control function + */ +static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value) +{ + out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08); +} + +/* + * Start a new transfer. This is called both by the idle state + * for the first transfer in a message, and by the wait state when the + * previous transfer in a message is complete. + */ +static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms) +{ + ms->rx_buf = ms->transfer->rx_buf; + ms->tx_buf = ms->transfer->tx_buf; + ms->len = ms->transfer->len; + + /* Activate the chip select */ + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 1); + ms->cs_change = ms->transfer->cs_change; + + /* Write out the first byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); +} + +/* Forward declaration of state handlers */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); +static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); + +/* + * IDLE state + * + * No transfers are in progress; if another transfer is pending then retrieve + * it and kick it off. Otherwise, stop processing the state machine + */ +static int +mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + struct spi_device *spi; + int spr, sppr; + u8 ctrl1; + + if (status && (irq != NO_IRQ)) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + /* Check if there is another transfer waiting. */ + if (list_empty(&ms->queue)) + return FSM_STOP; + + /* get the head of the queue */ + ms->message = list_first_entry(&ms->queue, struct spi_message, queue); + list_del_init(&ms->message->queue); + + /* Setup the controller parameters */ + ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; + spi = ms->message->spi; + if (spi->mode & SPI_CPHA) + ctrl1 |= SPI_CTRL1_CPHA; + if (spi->mode & SPI_CPOL) + ctrl1 |= SPI_CTRL1_CPOL; + if (spi->mode & SPI_LSB_FIRST) + ctrl1 |= SPI_CTRL1_LSBFE; + out_8(ms->regs + SPI_CTRL1, ctrl1); + + /* Setup the controller speed */ + /* minimum divider is '2'. Also, add '1' to force rounding the + * divider up. */ + sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1; + spr = 0; + if (sppr < 1) + sppr = 1; + while (((sppr - 1) & ~0x7) != 0) { + sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */ + spr++; + } + sppr--; /* sppr quantity in register is offset by 1 */ + if (spr > 7) { + /* Don't overrun limits of SPI baudrate register */ + spr = 7; + sppr = 7; + } + out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */ + + ms->cs_change = 1; + ms->transfer = container_of(ms->message->transfers.next, + struct spi_transfer, transfer_list); + + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + + return FSM_CONTINUE; +} + +/* + * TRANSFER state + * + * In the middle of a transfer. If the SPI core has completed processing + * a byte, then read out the received data and write out the next byte + * (unless this transfer is finished; in which case go on to the wait + * state) + */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data) +{ + if (!status) + return ms->irq0 ? FSM_STOP : FSM_POLL; + + if (status & SPI_STATUS_WCOL) { + /* The SPI controller is stoopid. At slower speeds, it may + * raise the SPIF flag before the state machine is actually + * finished, which causes a collision (internal to the state + * machine only). The manual recommends inserting a delay + * between receiving the interrupt and sending the next byte, + * but it can also be worked around simply by retrying the + * transfer which is what we do here. */ + ms->wcol_count++; + ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp; + ms->wcol_tx_timestamp = get_tbl(); + data = 0; + if (ms->tx_buf) + data = *(ms->tx_buf-1); + out_8(ms->regs + SPI_DATA, data); /* try again */ + return FSM_CONTINUE; + } else if (status & SPI_STATUS_MODF) { + ms->modf_count++; + dev_err(&ms->master->dev, "mode fault\n"); + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = -EIO; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* Read data out of the spi device */ + ms->byte_count++; + if (ms->rx_buf) + *ms->rx_buf++ = data; + + /* Is the transfer complete? */ + ms->len--; + if (ms->len == 0) { + ms->timestamp = get_tbl(); + ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec; + ms->state = mpc52xx_spi_fsmstate_wait; + return FSM_CONTINUE; + } + + /* Write out the next byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); + + return FSM_CONTINUE; +} + +/* + * WAIT state + * + * A transfer has completed; need to wait for the delay period to complete + * before starting the next transfer + */ +static int +mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + if (status && irq) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + if (((int)get_tbl()) - ms->timestamp < 0) + return FSM_POLL; + + ms->message->actual_length += ms->transfer->len; + + /* Check if there is another transfer in this message. If there + * aren't then deactivate CS, notify sender, and drop back to idle + * to start the next message. */ + if (ms->transfer->transfer_list.next == &ms->message->transfers) { + ms->msg_count++; + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = 0; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* There is another transfer; kick it off */ + + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 0); + + ms->transfer = container_of(ms->transfer->transfer_list.next, + struct spi_transfer, transfer_list); + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + return FSM_CONTINUE; +} + +/** + * mpc52xx_spi_fsm_process - Finite State Machine iteration function + * @irq: irq number that triggered the FSM or 0 for polling + * @ms: pointer to mpc52xx_spi driver data + */ +static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms) +{ + int rc = FSM_CONTINUE; + u8 status, data; + + while (rc == FSM_CONTINUE) { + /* Interrupt cleared by read of STATUS followed by + * read of DATA registers */ + status = in_8(ms->regs + SPI_STATUS); + data = in_8(ms->regs + SPI_DATA); + rc = ms->state(irq, ms, status, data); + } + + if (rc == FSM_POLL) + schedule_work(&ms->work); +} + +/** + * mpc52xx_spi_irq - IRQ handler + */ +static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms) +{ + struct mpc52xx_spi *ms = _ms; + spin_lock(&ms->lock); + mpc52xx_spi_fsm_process(irq, ms); + spin_unlock(&ms->lock); + return IRQ_HANDLED; +} + +/** + * mpc52xx_spi_wq - Workqueue function for polling the state machine + */ +static void mpc52xx_spi_wq(struct work_struct *work) +{ + struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work); + unsigned long flags; + + spin_lock_irqsave(&ms->lock, flags); + mpc52xx_spi_fsm_process(0, ms); + spin_unlock_irqrestore(&ms->lock, flags); +} + +/* + * spi_master ops + */ + +static int mpc52xx_spi_setup(struct spi_device *spi) +{ + if (spi->bits_per_word % 8) + return -EINVAL; + + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) + return -EINVAL; + + if (spi->chip_select >= spi->master->num_chipselect) + return -EINVAL; + + return 0; +} + +static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&ms->lock, flags); + list_add_tail(&m->queue, &ms->queue); + spin_unlock_irqrestore(&ms->lock, flags); + schedule_work(&ms->work); + + return 0; +} + +/* + * OF Platform Bus Binding + */ +static int __devinit mpc52xx_spi_probe(struct of_device *op, + const struct of_device_id *match) +{ + struct spi_master *master; + struct mpc52xx_spi *ms; + void __iomem *regs; + int rc; + + /* MMIO registers */ + dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); + regs = of_iomap(op->node, 0); + if (!regs) + return -ENODEV; + + /* initialize the device */ + out_8(regs+SPI_CTRL1, SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR); + out_8(regs + SPI_CTRL2, 0x0); + out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */ + out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */ + + /* Clear the status register and re-read it to check for a MODF + * failure. This driver cannot currently handle multiple masters + * on the SPI bus. This fault will also occur if the SPI signals + * are not connected to any pins (port_config setting) */ + in_8(regs + SPI_STATUS); + in_8(regs + SPI_DATA); + if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) { + dev_err(&op->dev, "mode fault; is port_config correct?\n"); + rc = -EIO; + goto err_init; + } + + dev_dbg(&op->dev, "allocating spi_master struct\n"); + master = spi_alloc_master(&op->dev, sizeof *ms); + if (!master) { + rc = -ENOMEM; + goto err_alloc; + } + master->bus_num = -1; + master->num_chipselect = 1; + master->setup = mpc52xx_spi_setup; + master->transfer = mpc52xx_spi_transfer; + dev_set_drvdata(&op->dev, master); + + ms = spi_master_get_devdata(master); + ms->master = master; + ms->regs = regs; + ms->irq0 = irq_of_parse_and_map(op->node, 0); + ms->irq1 = irq_of_parse_and_map(op->node, 1); + ms->state = mpc52xx_spi_fsmstate_idle; + ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node); + spin_lock_init(&ms->lock); + INIT_LIST_HEAD(&ms->queue); + INIT_WORK(&ms->work, mpc52xx_spi_wq); + + /* Decide if interrupts can be used */ + if (ms->irq0 && ms->irq1) { + rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, + "mpc5200-spi-modf", ms); + rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, + "mpc5200-spi-spiF", ms); + if (rc) { + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + ms->irq0 = ms->irq1 = 0; + } + } else { + /* operate in polled mode */ + ms->irq0 = ms->irq1 = 0; + } + + if (!ms->irq0) + dev_info(&op->dev, "using polled mode\n"); + + dev_dbg(&op->dev, "registering spi_master struct\n"); + rc = spi_register_master(master); + if (rc) + goto err_register; + + of_register_spi_devices(master, op->node); + dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); + + return rc; + + err_register: + dev_err(&ms->master->dev, "initialization failed\n"); + spi_master_put(master); + err_alloc: + err_init: + iounmap(regs); + return rc; +} + +static int __devexit mpc52xx_spi_remove(struct of_device *op) +{ + struct spi_master *master = dev_get_drvdata(&op->dev); + struct mpc52xx_spi *ms = spi_master_get_devdata(master); + + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + + spi_unregister_master(master); + spi_master_put(master); + iounmap(ms->regs); + + return 0; +} + +static struct of_device_id mpc52xx_spi_match[] __devinitdata = { + { .compatible = "fsl,mpc5200-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); + +static struct of_platform_driver mpc52xx_spi_of_driver = { + .owner = THIS_MODULE, + .name = "mpc52xx-spi", + .match_table = mpc52xx_spi_match, + .probe = mpc52xx_spi_probe, + .remove = __exit_p(mpc52xx_spi_remove), +}; + +static int __init mpc52xx_spi_init(void) +{ + return of_register_platform_driver(&mpc52xx_spi_of_driver); +} +module_init(mpc52xx_spi_init); + +static void __exit mpc52xx_spi_exit(void) +{ + of_unregister_platform_driver(&mpc52xx_spi_of_driver); +} +module_exit(mpc52xx_spi_exit); + diff --git a/include/linux/spi/mpc52xx_spi.h b/include/linux/spi/mpc52xx_spi.h new file mode 100644 index 000000000000..d1004cf09241 --- /dev/null +++ b/include/linux/spi/mpc52xx_spi.h @@ -0,0 +1,10 @@ + +#ifndef INCLUDE_MPC5200_SPI_H +#define INCLUDE_MPC5200_SPI_H + +extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master, + void (*hook)(struct spi_message *m, + void *context), + void *hook_context); + +#endif -- cgit v1.2.3 From 8c10cbdb4af642d9a2efb45ea89251aaab905360 Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Mon, 19 Oct 2009 12:04:53 +0200 Subject: nfsd: use STATEID_FMT and STATEID_VAL for printing stateids Signed-off-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 46 ++++++++++++++++------------------------------ include/linux/nfsd/state.h | 7 +++++++ 2 files changed, 23 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 42dab9587afe..c8b621a120cd 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2416,11 +2416,8 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); - dprintk("NFSD: delegation stateid=(%08x/%08x/%08x/%08x)\n\n", - dp->dl_stateid.si_boot, - dp->dl_stateid.si_stateownerid, - dp->dl_stateid.si_fileid, - dp->dl_stateid.si_generation); + dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", + STATEID_VAL(&dp->dl_stateid)); out: if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && flag == NFS4_OPEN_DELEGATE_NONE @@ -2510,9 +2507,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf status = nfs_ok; - dprintk("nfs4_process_open2: stateid=(%08x/%08x/%08x/%08x)\n", - stp->st_stateid.si_boot, stp->st_stateid.si_stateownerid, - stp->st_stateid.si_fileid, stp->st_stateid.si_generation); + dprintk("%s: stateid=" STATEID_FMT "\n", __func__, + STATEID_VAL(&stp->st_stateid)); out: if (fp) put_nfs4_file(fp); @@ -2678,9 +2674,8 @@ STALE_STATEID(stateid_t *stateid) { if (time_after((unsigned long)boot_time, (unsigned long)stateid->si_boot)) { - dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n", - stateid->si_boot, stateid->si_stateownerid, - stateid->si_fileid, stateid->si_generation); + dprintk("NFSD: stale stateid " STATEID_FMT "!\n", + STATEID_VAL(stateid)); return 1; } return 0; @@ -2692,9 +2687,8 @@ EXPIRED_STATEID(stateid_t *stateid) if (time_before((unsigned long)boot_time, ((unsigned long)stateid->si_boot)) && time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) { - dprintk("NFSD: expired stateid (%08x/%08x/%08x/%08x)!\n", - stateid->si_boot, stateid->si_stateownerid, - stateid->si_fileid, stateid->si_generation); + dprintk("NFSD: expired stateid " STATEID_FMT "!\n", + STATEID_VAL(stateid)); return 1; } return 0; @@ -2708,9 +2702,8 @@ stateid_error_map(stateid_t *stateid) if (EXPIRED_STATEID(stateid)) return nfserr_expired; - dprintk("NFSD: bad stateid (%08x/%08x/%08x/%08x)!\n", - stateid->si_boot, stateid->si_stateownerid, - stateid->si_fileid, stateid->si_generation); + dprintk("NFSD: bad stateid " STATEID_FMT "!\n", + STATEID_VAL(stateid)); return nfserr_bad_stateid; } @@ -2896,10 +2889,8 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, struct svc_fh *current_fh = &cstate->current_fh; __be32 status; - dprintk("NFSD: preprocess_seqid_op: seqid=%d " - "stateid = (%08x/%08x/%08x/%08x)\n", seqid, - stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid, - stateid->si_generation); + dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, + seqid, STATEID_VAL(stateid)); *stpp = NULL; *sopp = NULL; @@ -3031,12 +3022,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, sop->so_confirmed = 1; update_stateid(&stp->st_stateid); memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t)); - dprintk("NFSD: nfsd4_open_confirm: success, seqid=%d " - "stateid=(%08x/%08x/%08x/%08x)\n", oc->oc_seqid, - stp->st_stateid.si_boot, - stp->st_stateid.si_stateownerid, - stp->st_stateid.si_fileid, - stp->st_stateid.si_generation); + dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", + __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stateid)); nfsd4_create_clid_dir(sop->so_client); out: @@ -3295,9 +3282,8 @@ find_delegation_stateid(struct inode *ino, stateid_t *stid) struct nfs4_file *fp; struct nfs4_delegation *dl; - dprintk("NFSD:find_delegation_stateid stateid=(%08x/%08x/%08x/%08x)\n", - stid->si_boot, stid->si_stateownerid, - stid->si_fileid, stid->si_generation); + dprintk("NFSD: %s: stateid=" STATEID_FMT "\n", __func__, + STATEID_VAL(stid)); fp = find_file(ino); if (!fp) diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index b38d11324189..5aadf8aa3a97 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -60,6 +60,13 @@ typedef struct { #define si_stateownerid si_opaque.so_stateownerid #define si_fileid si_opaque.so_fileid +#define STATEID_FMT "(%08x/%08x/%08x/%08x)" +#define STATEID_VAL(s) \ + (s)->si_boot, \ + (s)->si_stateownerid, \ + (s)->si_fileid, \ + (s)->si_generation + struct nfsd4_cb_sequence { /* args/res */ u32 cbs_minorversion; -- cgit v1.2.3 From 7a9c906094de8b3dc227de448019dbc386cd25d4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 15 Sep 2009 22:57:31 +0200 Subject: drm: make drm_mode_object_find typesafe I've wasted half a day hunting a bug that could easily be spotted by gcc. Prevent this from reoccurring. Signed-off-by: Daniel Vetter Signed-off-by: Eric Anholt --- drivers/gpu/drm/drm_crtc.c | 3 ++- include/drm/drm_crtc.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5cae0b3eee9b..ee0cde1ecca5 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -247,7 +247,8 @@ static void drm_mode_object_put(struct drm_device *dev, mutex_unlock(&dev->mode_config.idr_mutex); } -void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) +struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + uint32_t id, uint32_t type) { struct drm_mode_object *obj = NULL; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index b69347b8904f..bfcc60d101db 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -711,7 +711,8 @@ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size); -extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); +extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + uint32_t id, uint32_t type); /* IOCTLs */ extern int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From 48764bf43f746113fc77877d7e80f2df23ca4cbb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 15 Sep 2009 22:57:32 +0200 Subject: drm/i915: add i915_lp_ring_sync helper This just waits until the hw passed the current ring position with cmd execution. This slightly changes the existing i915_wait_request function to make uninterruptible waiting possible - no point in returning to userspace while mucking around with the overlay, that piece of hw is just too fragile. Also replace a magic 0 with the symbolic constant (and kill the then superflous comment) while I was looking at the code. Signed-off-by: Daniel Vetter Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 49 ++++++++++++++++++++++++++++++++--------- include/drm/drm_os_linux.h | 2 +- 3 files changed, 41 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 95391191316a..e440f70e477a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -803,6 +803,7 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); int i915_gem_idle(struct drm_device *dev); +int i915_lp_ring_sync(struct drm_device *dev); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index abfc27b0c2ea..7d1e9adf0f4c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1820,12 +1820,8 @@ i915_gem_retire_work_handler(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -/** - * Waits for a sequence number to be signaled, and cleans up the - * request and object lists appropriately for that event. - */ static int -i915_wait_request(struct drm_device *dev, uint32_t seqno) +i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) { drm_i915_private_t *dev_priv = dev->dev_private; u32 ier; @@ -1852,10 +1848,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) dev_priv->mm.waiting_gem_seqno = seqno; i915_user_irq_get(dev); - ret = wait_event_interruptible(dev_priv->irq_queue, - i915_seqno_passed(i915_get_gem_seqno(dev), - seqno) || - atomic_read(&dev_priv->mm.wedged)); + if (interruptible) + ret = wait_event_interruptible(dev_priv->irq_queue, + i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || + atomic_read(&dev_priv->mm.wedged)); + else + wait_event(dev_priv->irq_queue, + i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || + atomic_read(&dev_priv->mm.wedged)); + i915_user_irq_put(dev); dev_priv->mm.waiting_gem_seqno = 0; @@ -1879,6 +1880,34 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) return ret; } +/** + * Waits for a sequence number to be signaled, and cleans up the + * request and object lists appropriately for that event. + */ +static int +i915_wait_request(struct drm_device *dev, uint32_t seqno) +{ + return i915_do_wait_request(dev, seqno, 1); +} + +/** + * Waits for the ring to finish up to the latest request. Usefull for waiting + * for flip events, e.g for the overlay support. */ +int i915_lp_ring_sync(struct drm_device *dev) +{ + uint32_t seqno; + int ret; + + seqno = i915_add_request(dev, NULL, 0); + + if (seqno == 0) + return -ENOMEM; + + ret = i915_do_wait_request(dev, seqno, 0); + BUG_ON(ret == -ERESTARTSYS); + return ret; +} + static void i915_gem_flush(struct drm_device *dev, uint32_t invalidate_domains, @@ -1947,7 +1976,7 @@ i915_gem_flush(struct drm_device *dev, #endif BEGIN_LP_RING(2); OUT_RING(cmd); - OUT_RING(0); /* noop */ + OUT_RING(MI_NOOP); ADVANCE_LP_RING(); } } diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 26641e95e0a4..393369147a2d 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h @@ -123,5 +123,5 @@ do { \ remove_wait_queue(&(queue), &entry); \ } while (0) -#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) +#define DRM_WAKEUP( queue ) wake_up( queue ) #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) -- cgit v1.2.3 From 02e792fbaadb75dec8e476a05b610e49908fc6a4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 15 Sep 2009 22:57:34 +0200 Subject: drm/i915: implement drmmode overlay support v4 This implements intel overlay support for kms via a device-specific ioctl. Thomas Hellstrom brought up the idea of a general ioctl (on dri-devel). We've reached the conclusion that such an infrastructure only makes sense when multiple kms overlay implementations exists, which atm don't (and it doesn't look like this is gonna change). Open issues: - Runs in sync with the gpu, i.e. unnecessary waiting. I've decided to wait on this because the hw tends to hang when changing something in this area. I left some dummy functions as infrastructure. - polyphase filtering uses a static table. - uses uninterruptible sleeps. Unfortunately the alternatives may unnecessarily wedged the hw if/when we timeout too early (and userspace only overloaded the batch buffers with stuff worth a few secs of gpu time). Changes since v1: - fix off-by-one misconception on my side. This fixes fullscreen playback. Changes since v2: - add underrun detection as spec'ed for i965. - flush caches properly, fixing visual corruptions. Changes since v4: - fix up cache flushing of overlay memory regs. - killed require_pipe_a logic - it hangs the chip. Tested-By: diego.abelenda@gmail.com (on a 865G) Signed-off-by: Daniel Vetter [anholt: Resolved against the MADVISE ioctl going in before this one] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_dma.c | 7 + drivers/gpu/drm/i915/i915_drv.h | 5 + drivers/gpu/drm/i915/i915_reg.h | 5 + drivers/gpu/drm/i915/intel_display.c | 25 +- drivers/gpu/drm/i915/intel_drv.h | 28 + drivers/gpu/drm/i915/intel_overlay.c | 1293 ++++++++++++++++++++++++++++++++++ include/drm/i915_drm.h | 71 ++ 8 files changed, 1432 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_overlay.c (limited to 'include') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fa7b9be096bc..87b21996cd6a 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -23,6 +23,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_fb.o \ intel_tv.o \ intel_dvo.o \ + intel_overlay.o \ dvo_ch7xxx.o \ dvo_ch7017.o \ dvo_ivch.o \ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e5b138be45fa..138be49259c3 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -807,6 +807,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_NUM_FENCES_AVAIL: value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; + case I915_PARAM_HAS_OVERLAY: + value = dev_priv->overlay ? 1 : 0; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -1548,6 +1551,8 @@ int i915_driver_unload(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); drm_mm_takedown(&dev_priv->vram); i915_gem_lastclose(dev); + + intel_cleanup_overlay(dev); } pci_dev_put(dev_priv->bridge_dev); @@ -1656,6 +1661,8 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4e8b26161a74..ce03fd5b3f5b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -170,6 +170,8 @@ struct drm_i915_display_funcs { /* clock gating init */ }; +struct intel_overlay; + typedef struct drm_i915_private { struct drm_device *dev; @@ -241,6 +243,9 @@ typedef struct drm_i915_private { struct intel_opregion opregion; + /* overlay */ + struct intel_overlay *overlay; + /* LVDS info */ int backlight_duty_cycle; /* restore backlight to this value */ bool panel_wants_dither; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 51fd152f47f3..d1be1849580d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -140,6 +140,7 @@ #define MI_NOOP MI_INSTR(0, 0) #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) +#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) @@ -151,6 +152,10 @@ #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0) +#define MI_OVERLAY_FLIP MI_INSTR(0x11,0) +#define MI_OVERLAY_CONTINUE (0x0<<21) +#define MI_OVERLAY_ON (0x1<<21) +#define MI_OVERLAY_OFF (0x2<<21) #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0be624a52e50..6f818fadcbe3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1781,6 +1781,22 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) } } +static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) +{ + struct intel_overlay *overlay; + + if (!enable && intel_crtc->overlay) { + overlay = intel_crtc->overlay; + mutex_lock(&overlay->dev->struct_mutex); + intel_overlay_switch_off(overlay); + mutex_unlock(&overlay->dev->struct_mutex); + } + /* Let userspace switch the overlay on again. In most cases userspace + * has to recompute where to put it anyway. */ + + return; +} + static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; @@ -1839,12 +1855,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) intel_update_fbc(crtc, &crtc->mode); /* Give the overlay scaler a chance to enable if it's on this pipe */ - //intel_crtc_dpms_video(crtc, true); TODO + intel_crtc_dpms_overlay(intel_crtc, true); break; case DRM_MODE_DPMS_OFF: intel_update_watermarks(dev); + /* Give the overlay scaler a chance to disable if it's on this pipe */ - //intel_crtc_dpms_video(crtc, FALSE); TODO + intel_crtc_dpms_overlay(intel_crtc, false); if (dev_priv->cfb_plane == plane && dev_priv->display.disable_fbc) @@ -2039,7 +2056,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev) * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ -static int intel_panel_fitter_pipe (struct drm_device *dev) +int intel_panel_fitter_pipe (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control; @@ -4458,6 +4475,8 @@ void intel_modeset_init(struct drm_device *dev) INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); + + intel_setup_overlay(dev); } void intel_modeset_cleanup(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ef61fe9507e2..c9b1b97ab792 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -110,6 +110,25 @@ struct intel_output { int clone_mask; }; +struct intel_crtc; +struct intel_overlay { + struct drm_device *dev; + struct intel_crtc *crtc; + struct drm_i915_gem_object *vid_bo; + struct drm_i915_gem_object *old_vid_bo; + int active; + int pfit_active; + u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ + u32 color_key; + u32 brightness, contrast, saturation; + u32 old_xscale, old_yscale; + /* register access */ + u32 flip_addr; + struct drm_i915_gem_object *reg_bo; + void *virt_addr; + int hw_wedged; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -121,6 +140,7 @@ struct intel_crtc { bool busy; /* is scanout buffer being updated frequently? */ struct timer_list idle_timer; bool lowfreq_avail; + struct intel_overlay *overlay; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) @@ -148,6 +168,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, extern void intel_edp_link_config (struct intel_output *, int *, int *); +extern int intel_panel_fitter_pipe (struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_commit (struct drm_encoder *encoder); @@ -183,4 +204,11 @@ extern int intel_framebuffer_create(struct drm_device *dev, struct drm_framebuffer **fb, struct drm_gem_object *obj); +extern void intel_setup_overlay(struct drm_device *dev); +extern void intel_cleanup_overlay(struct drm_device *dev); +extern int intel_overlay_switch_off(struct intel_overlay *overlay); +extern int intel_overlay_put_image(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int intel_overlay_attrs(struct drm_device *dev, void *data, + struct drm_file *file_priv); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c new file mode 100644 index 000000000000..3f6f3a36929f --- /dev/null +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -0,0 +1,1293 @@ +/* + * Copyright © 2009 + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Daniel Vetter + * + * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c + */ +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_drv.h" + +/* Limits for overlay size. According to intel doc, the real limits are: + * Y width: 4095, UV width (planar): 2047, Y height: 2047, + * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use + * the mininum of both. */ +#define IMAGE_MAX_WIDTH 2048 +#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */ +/* on 830 and 845 these large limits result in the card hanging */ +#define IMAGE_MAX_WIDTH_LEGACY 1024 +#define IMAGE_MAX_HEIGHT_LEGACY 1088 + +/* overlay register definitions */ +/* OCMD register */ +#define OCMD_TILED_SURFACE (0x1<<19) +#define OCMD_MIRROR_MASK (0x3<<17) +#define OCMD_MIRROR_MODE (0x3<<17) +#define OCMD_MIRROR_HORIZONTAL (0x1<<17) +#define OCMD_MIRROR_VERTICAL (0x2<<17) +#define OCMD_MIRROR_BOTH (0x3<<17) +#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */ +#define OCMD_UV_SWAP (0x1<<14) /* YVYU */ +#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */ +#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */ +#define OCMD_SOURCE_FORMAT_MASK (0xf<<10) +#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */ +#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */ +#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */ +#define OCMD_YUV_422_PACKED (0x8<<10) +#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */ +#define OCMD_YUV_420_PLANAR (0xc<<10) +#define OCMD_YUV_422_PLANAR (0xd<<10) +#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ +#define OCMD_TVSYNCFLIP_PARITY (0x1<<9) +#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) +#define OCMD_BUF_TYPE_MASK (Ox1<<5) +#define OCMD_BUF_TYPE_FRAME (0x0<<5) +#define OCMD_BUF_TYPE_FIELD (0x1<<5) +#define OCMD_TEST_MODE (0x1<<4) +#define OCMD_BUFFER_SELECT (0x3<<2) +#define OCMD_BUFFER0 (0x0<<2) +#define OCMD_BUFFER1 (0x1<<2) +#define OCMD_FIELD_SELECT (0x1<<2) +#define OCMD_FIELD0 (0x0<<1) +#define OCMD_FIELD1 (0x1<<1) +#define OCMD_ENABLE (0x1<<0) + +/* OCONFIG register */ +#define OCONF_PIPE_MASK (0x1<<18) +#define OCONF_PIPE_A (0x0<<18) +#define OCONF_PIPE_B (0x1<<18) +#define OCONF_GAMMA2_ENABLE (0x1<<16) +#define OCONF_CSC_MODE_BT601 (0x0<<5) +#define OCONF_CSC_MODE_BT709 (0x1<<5) +#define OCONF_CSC_BYPASS (0x1<<4) +#define OCONF_CC_OUT_8BIT (0x1<<3) +#define OCONF_TEST_MODE (0x1<<2) +#define OCONF_THREE_LINE_BUFFER (0x1<<0) +#define OCONF_TWO_LINE_BUFFER (0x0<<0) + +/* DCLRKM (dst-key) register */ +#define DST_KEY_ENABLE (0x1<<31) +#define CLK_RGB24_MASK 0x0 +#define CLK_RGB16_MASK 0x070307 +#define CLK_RGB15_MASK 0x070707 +#define CLK_RGB8I_MASK 0xffffff + +#define RGB16_TO_COLORKEY(c) \ + (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3)) +#define RGB15_TO_COLORKEY(c) \ + (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3)) + +/* overlay flip addr flag */ +#define OFC_UPDATE 0x1 + +/* polyphase filter coefficients */ +#define N_HORIZ_Y_TAPS 5 +#define N_VERT_Y_TAPS 3 +#define N_HORIZ_UV_TAPS 3 +#define N_VERT_UV_TAPS 3 +#define N_PHASES 17 +#define MAX_TAPS 5 + +/* memory bufferd overlay registers */ +struct overlay_registers { + u32 OBUF_0Y; + u32 OBUF_1Y; + u32 OBUF_0U; + u32 OBUF_0V; + u32 OBUF_1U; + u32 OBUF_1V; + u32 OSTRIDE; + u32 YRGB_VPH; + u32 UV_VPH; + u32 HORZ_PH; + u32 INIT_PHS; + u32 DWINPOS; + u32 DWINSZ; + u32 SWIDTH; + u32 SWIDTHSW; + u32 SHEIGHT; + u32 YRGBSCALE; + u32 UVSCALE; + u32 OCLRC0; + u32 OCLRC1; + u32 DCLRKV; + u32 DCLRKM; + u32 SCLRKVH; + u32 SCLRKVL; + u32 SCLRKEN; + u32 OCONFIG; + u32 OCMD; + u32 RESERVED1; /* 0x6C */ + u32 OSTART_0Y; + u32 OSTART_1Y; + u32 OSTART_0U; + u32 OSTART_0V; + u32 OSTART_1U; + u32 OSTART_1V; + u32 OTILEOFF_0Y; + u32 OTILEOFF_1Y; + u32 OTILEOFF_0U; + u32 OTILEOFF_0V; + u32 OTILEOFF_1U; + u32 OTILEOFF_1V; + u32 FASTHSCALE; /* 0xA0 */ + u32 UVSCALEV; /* 0xA4 */ + u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ + u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ + u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; + u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ + u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; + u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ + u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; + u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ + u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; +}; + +/* overlay flip addr flag */ +#define OFC_UPDATE 0x1 + +#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) +#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IGDNG(dev)) + + +static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct overlay_registers *regs; + + /* no recursive mappings */ + BUG_ON(overlay->virt_addr); + + if (OVERLAY_NONPHYSICAL(overlay->dev)) { + regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + overlay->reg_bo->gtt_offset); + + if (!regs) { + DRM_ERROR("failed to map overlay regs in GTT\n"); + return NULL; + } + } else + regs = overlay->reg_bo->phys_obj->handle->vaddr; + + return overlay->virt_addr = regs; +} + +static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (OVERLAY_NONPHYSICAL(overlay->dev)) + io_mapping_unmap_atomic(overlay->virt_addr); + + overlay->virt_addr = NULL; + + I915_READ(OVADD); /* flush wc cashes */ + + return; +} + +/* overlay needs to be disable in OCMD reg */ +static int intel_overlay_on(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + RING_LOCALS; + + BUG_ON(overlay->active); + + BEGIN_LP_RING(6); + OUT_RING(MI_FLUSH); + OUT_RING(MI_NOOP); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); + OUT_RING(overlay->flip_addr | OFC_UPDATE); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + + ret = i915_lp_ring_sync(dev); + if (ret != 0) { + DRM_ERROR("intel overlay: ring sync failed, hw likely wedged\n"); + overlay->hw_wedged = 1; + return 0; + } + + overlay->active = 1; + + return 0; +} + +/* overlay needs to be enabled in OCMD reg */ +static void intel_overlay_continue(struct intel_overlay *overlay, + bool load_polyphase_filter) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 flip_addr = overlay->flip_addr; + u32 tmp; + int ret; + RING_LOCALS; + + BUG_ON(!overlay->active); + + if (load_polyphase_filter) + flip_addr |= OFC_UPDATE; + + /* check for underruns */ + tmp = I915_READ(DOVSTA); + if (tmp & (1 << 17)) + DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); + + BEGIN_LP_RING(6); + OUT_RING(MI_FLUSH); + OUT_RING(MI_NOOP); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + + /* run in lockstep with the hw for easier testing */ + ret = i915_lp_ring_sync(dev); + if (ret != 0) { + DRM_ERROR("intel overlay: ring sync failed, hw likely wedged\n"); + overlay->hw_wedged = 1; + } +} + +static int intel_overlay_wait_flip(struct intel_overlay *overlay) +{ + /* don't overcomplicate things for now with asynchronous operations + * see comment above */ + return 0; +} + +/* overlay needs to be disabled in OCMD reg */ +static int intel_overlay_off(struct intel_overlay *overlay) +{ + u32 flip_addr = overlay->flip_addr; + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + RING_LOCALS; + + BUG_ON(!overlay->active); + + /* According to intel docs the overlay hw may hang (when switching + * off) without loading the filter coeffs. It is however unclear whether + * this applies to the disabling of the overlay or to the switching off + * of the hw. Do it in both cases */ + flip_addr |= OFC_UPDATE; + + /* wait for overlay to go idle */ + BEGIN_LP_RING(6); + OUT_RING(MI_FLUSH); + OUT_RING(MI_NOOP); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + + ret = i915_lp_ring_sync(dev); + if (ret != 0) { + DRM_ERROR("intel overlay: ring sync failed, hw likely wedged\n"); + overlay->hw_wedged = 1; + return ret; + } + + /* turn overlay off */ + /* this is not done in userspace! + BEGIN_LP_RING(6); + OUT_RING(MI_FLUSH); + OUT_RING(MI_NOOP); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + + ret = i915_lp_ring_sync(dev); + if (ret != 0) { + DRM_ERROR("intel overlay: ring sync failed, hw likely wedged\n"); + overlay->hw_wedged = 1; + return ret; + }*/ + + overlay->active = 0; + + return ret; +} + +/* wait for pending overlay flip and release old frame */ +static int intel_overlay_release_old_vid(struct intel_overlay *overlay) +{ + int ret; + struct drm_gem_object *obj; + + ret = intel_overlay_wait_flip(overlay); + if (ret != 0) + return ret; + + if (!overlay->old_vid_bo) + return 0; + + obj = overlay->old_vid_bo->obj; + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + overlay->old_vid_bo = NULL; + + return 0; +} + +struct put_image_params { + int format; + short dst_x; + short dst_y; + short dst_w; + short dst_h; + short src_w; + short src_scan_h; + short src_scan_w; + short src_h; + short stride_Y; + short stride_UV; + int offset_Y; + int offset_U; + int offset_V; +}; + +static int packed_depth_bytes(u32 format) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + return 4; + case I915_OVERLAY_YUV411: + /* return 6; not implemented */ + default: + return -EINVAL; + } +} + +static int packed_width_bytes(u32 format, short width) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + return width << 1; + default: + return -EINVAL; + } +} + +static int uv_hsubsampling(u32 format) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV420: + return 2; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + return 4; + default: + return -EINVAL; + } +} + +static int uv_vsubsampling(u32 format) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV420: + case I915_OVERLAY_YUV410: + return 2; + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV411: + return 1; + default: + return -EINVAL; + } +} + +static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) +{ + u32 mask, shift, ret; + if (IS_I9XX(dev)) { + mask = 0x3f; + shift = 6; + } else { + mask = 0x1f; + shift = 5; + } + ret = ((offset + width + mask) >> shift) - (offset >> shift); + if (IS_I9XX(dev)) + ret <<= 1; + ret -=1; + return ret << 2; +} + +static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { + 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, + 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, + 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, + 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, + 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, + 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, + 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, + 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, + 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, + 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, + 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, + 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, + 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, + 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, + 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, + 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, + 0xb000, 0x3000, 0x0800, 0x3000, 0xb000}; +static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { + 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, + 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, + 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880, + 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00, + 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0, + 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, + 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, + 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, + 0x3000, 0x0800, 0x3000}; + +static void update_polyphase_filter(struct overlay_registers *regs) +{ + memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); + memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs)); +} + +static bool update_scaling_factors(struct intel_overlay *overlay, + struct overlay_registers *regs, + struct put_image_params *params) +{ + /* fixed point with a 12 bit shift */ + u32 xscale, yscale, xscale_UV, yscale_UV; +#define FP_SHIFT 12 +#define FRACT_MASK 0xfff + bool scale_changed = false; + int uv_hscale = uv_hsubsampling(params->format); + int uv_vscale = uv_vsubsampling(params->format); + + if (params->dst_w > 1) + xscale = ((params->src_scan_w - 1) << FP_SHIFT) + /(params->dst_w); + else + xscale = 1 << FP_SHIFT; + + if (params->dst_h > 1) + yscale = ((params->src_scan_h - 1) << FP_SHIFT) + /(params->dst_h); + else + yscale = 1 << FP_SHIFT; + + /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ + xscale_UV = xscale/uv_hscale; + yscale_UV = yscale/uv_vscale; + /* make the Y scale to UV scale ratio an exact multiply */ + xscale = xscale_UV * uv_hscale; + yscale = yscale_UV * uv_vscale; + /*} else { + xscale_UV = 0; + yscale_UV = 0; + }*/ + + if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) + scale_changed = true; + overlay->old_xscale = xscale; + overlay->old_yscale = yscale; + + regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20) + | ((xscale >> FP_SHIFT) << 16) + | ((xscale & FRACT_MASK) << 3); + regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20) + | ((xscale_UV >> FP_SHIFT) << 16) + | ((xscale_UV & FRACT_MASK) << 3); + regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16) + | ((yscale_UV >> FP_SHIFT) << 0); + + if (scale_changed) + update_polyphase_filter(regs); + + return scale_changed; +} + +static void update_colorkey(struct intel_overlay *overlay, + struct overlay_registers *regs) +{ + u32 key = overlay->color_key; + switch (overlay->crtc->base.fb->bits_per_pixel) { + case 8: + regs->DCLRKV = 0; + regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; + case 16: + if (overlay->crtc->base.fb->depth == 15) { + regs->DCLRKV = RGB15_TO_COLORKEY(key); + regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; + } else { + regs->DCLRKV = RGB16_TO_COLORKEY(key); + regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; + } + case 24: + case 32: + regs->DCLRKV = key; + regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; + } +} + +static u32 overlay_cmd_reg(struct put_image_params *params) +{ + u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0; + + if (params->format & I915_OVERLAY_YUV_PLANAR) { + switch (params->format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PLANAR; + break; + case I915_OVERLAY_YUV420: + cmd |= OCMD_YUV_420_PLANAR; + break; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + cmd |= OCMD_YUV_410_PLANAR; + break; + } + } else { /* YUV packed */ + switch (params->format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PACKED; + break; + case I915_OVERLAY_YUV411: + cmd |= OCMD_YUV_411_PACKED; + break; + } + + switch (params->format & I915_OVERLAY_SWAP_MASK) { + case I915_OVERLAY_NO_SWAP: + break; + case I915_OVERLAY_UV_SWAP: + cmd |= OCMD_UV_SWAP; + break; + case I915_OVERLAY_Y_SWAP: + cmd |= OCMD_Y_SWAP; + break; + case I915_OVERLAY_Y_AND_UV_SWAP: + cmd |= OCMD_Y_AND_UV_SWAP; + break; + } + } + + return cmd; +} + +int intel_overlay_do_put_image(struct intel_overlay *overlay, + struct drm_gem_object *new_bo, + struct put_image_params *params) +{ + int ret, tmp_width; + struct overlay_registers *regs; + bool scale_changed = false; + struct drm_i915_gem_object *bo_priv = new_bo->driver_private; + struct drm_device *dev = overlay->dev; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); + BUG_ON(!overlay); + + if (overlay->hw_wedged) + return -EBUSY; + + ret = intel_overlay_release_old_vid(overlay); + if (ret != 0) + return ret; + + ret = i915_gem_object_pin(new_bo, PAGE_SIZE); + if (ret != 0) + return ret; + + ret = i915_gem_object_set_to_gtt_domain(new_bo, 0); + if (ret != 0) + goto out_unpin; + + if (!overlay->active) { + regs = intel_overlay_map_regs_atomic(overlay); + if (!regs) { + ret = -ENOMEM; + goto out_unpin; + } + regs->OCONFIG = OCONF_CC_OUT_8BIT; + if (IS_I965GM(overlay->dev)) + regs->OCONFIG |= OCONF_CSC_MODE_BT709; + regs->OCONFIG |= overlay->crtc->pipe == 0 ? + OCONF_PIPE_A : OCONF_PIPE_B; + intel_overlay_unmap_regs_atomic(overlay); + + ret = intel_overlay_on(overlay); + if (ret != 0) + goto out_unpin; + } + + regs = intel_overlay_map_regs_atomic(overlay); + if (!regs) { + ret = -ENOMEM; + goto out_unpin; + } + + regs->DWINPOS = (params->dst_y << 16) | params->dst_x; + regs->DWINSZ = (params->dst_h << 16) | params->dst_w; + + if (params->format & I915_OVERLAY_YUV_PACKED) + tmp_width = packed_width_bytes(params->format, params->src_w); + else + tmp_width = params->src_w; + + regs->SWIDTH = params->src_w; + regs->SWIDTHSW = calc_swidthsw(overlay->dev, + params->offset_Y, tmp_width); + regs->SHEIGHT = params->src_h; + regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; + regs->OSTRIDE = params->stride_Y; + + if (params->format & I915_OVERLAY_YUV_PLANAR) { + int uv_hscale = uv_hsubsampling(params->format); + int uv_vscale = uv_vsubsampling(params->format); + u32 tmp_U, tmp_V; + regs->SWIDTH |= (params->src_w/uv_hscale) << 16; + tmp_U = calc_swidthsw(overlay->dev, params->offset_U, + params->src_w/uv_hscale); + tmp_V = calc_swidthsw(overlay->dev, params->offset_V, + params->src_w/uv_hscale); + regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; + regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; + regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; + regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; + regs->OSTRIDE |= params->stride_UV << 16; + } + + scale_changed = update_scaling_factors(overlay, regs, params); + + update_colorkey(overlay, regs); + + regs->OCMD = overlay_cmd_reg(params); + + intel_overlay_unmap_regs_atomic(overlay); + + intel_overlay_continue(overlay, scale_changed); + + overlay->old_vid_bo = overlay->vid_bo; + overlay->vid_bo = new_bo->driver_private; + + return 0; + +out_unpin: + i915_gem_object_unpin(new_bo); + return ret; +} + +int intel_overlay_switch_off(struct intel_overlay *overlay) +{ + int ret; + struct overlay_registers *regs; + struct drm_gem_object *obj; + struct drm_device *dev = overlay->dev; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + if (!overlay->active) + return 0; + + if (overlay->hw_wedged) + return -EBUSY; + + ret = intel_overlay_release_old_vid(overlay); + if (ret != 0) + return ret; + + regs = intel_overlay_map_regs_atomic(overlay); + regs->OCMD = 0; + intel_overlay_unmap_regs_atomic(overlay); + + ret = intel_overlay_off(overlay); + /* never have the overlay hw on without showing a frame */ + BUG_ON(!overlay->vid_bo); + obj = overlay->vid_bo->obj; + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + overlay->vid_bo = NULL; + + overlay->crtc->overlay = NULL; + overlay->crtc = NULL; + + return 0; +} + +static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, + struct intel_crtc *crtc) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + u32 pipeconf; + int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF; + + if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) + return -EINVAL; + + pipeconf = I915_READ(pipeconf_reg); + + /* can't use the overlay with double wide pipe */ + if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE) + return -EINVAL; + + return 0; +} + +static void update_pfit_vscale_ratio(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 ratio; + u32 pfit_control = I915_READ(PFIT_CONTROL); + + /* XXX: This is not the same logic as in the xorg driver, but more in + * line with the intel documentation for the i965 */ + if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) { + ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT; + } else { /* on i965 use the PGM reg to read out the autoscaler values */ + ratio = I915_READ(PFIT_PGM_RATIOS); + if (IS_I965G(dev)) + ratio >>= PFIT_VERT_SCALE_SHIFT_965; + else + ratio >>= PFIT_VERT_SCALE_SHIFT; + } + + overlay->pfit_vscale_ratio = ratio; +} + +static int check_overlay_dst(struct intel_overlay *overlay, + struct drm_intel_overlay_put_image *rec) +{ + struct drm_display_mode *mode = &overlay->crtc->base.mode; + + if ((rec->dst_x < mode->crtc_hdisplay) + && (rec->dst_x + rec->dst_width + <= mode->crtc_hdisplay) + && (rec->dst_y < mode->crtc_vdisplay) + && (rec->dst_y + rec->dst_height + <= mode->crtc_vdisplay)) + return 0; + else + return -EINVAL; +} + +static int check_overlay_scaling(struct put_image_params *rec) +{ + u32 tmp; + + /* downscaling limit is 8.0 */ + tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16; + if (tmp > 7) + return -EINVAL; + tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16; + if (tmp > 7) + return -EINVAL; + + return 0; +} + +static int check_overlay_src(struct drm_device *dev, + struct drm_intel_overlay_put_image *rec, + struct drm_gem_object *new_bo) +{ + u32 stride_mask; + int depth; + int uv_hscale = uv_hsubsampling(rec->flags); + int uv_vscale = uv_vsubsampling(rec->flags); + size_t tmp; + + /* check src dimensions */ + if (IS_845G(dev) || IS_I830(dev)) { + if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY + || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) + return -EINVAL; + } else { + if (rec->src_height > IMAGE_MAX_HEIGHT + || rec->src_width > IMAGE_MAX_WIDTH) + return -EINVAL; + } + /* better safe than sorry, use 4 as the maximal subsampling ratio */ + if (rec->src_height < N_VERT_Y_TAPS*4 + || rec->src_width < N_HORIZ_Y_TAPS*4) + return -EINVAL; + + /* check alingment constrains */ + switch (rec->flags & I915_OVERLAY_TYPE_MASK) { + case I915_OVERLAY_RGB: + /* not implemented */ + return -EINVAL; + case I915_OVERLAY_YUV_PACKED: + depth = packed_depth_bytes(rec->flags); + if (uv_vscale != 1) + return -EINVAL; + if (depth < 0) + return depth; + /* ignore UV planes */ + rec->stride_UV = 0; + rec->offset_U = 0; + rec->offset_V = 0; + /* check pixel alignment */ + if (rec->offset_Y % depth) + return -EINVAL; + break; + case I915_OVERLAY_YUV_PLANAR: + if (uv_vscale < 0 || uv_hscale < 0) + return -EINVAL; + /* no offset restrictions for planar formats */ + break; + default: + return -EINVAL; + } + + if (rec->src_width % uv_hscale) + return -EINVAL; + + /* stride checking */ + stride_mask = 63; + + if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) + return -EINVAL; + if (IS_I965G(dev) && rec->stride_Y < 512) + return -EINVAL; + + tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? + 4 : 8; + if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024) + return -EINVAL; + + /* check buffer dimensions */ + switch (rec->flags & I915_OVERLAY_TYPE_MASK) { + case I915_OVERLAY_RGB: + case I915_OVERLAY_YUV_PACKED: + /* always 4 Y values per depth pixels */ + if (packed_width_bytes(rec->flags, rec->src_width) + > rec->stride_Y) + return -EINVAL; + + tmp = rec->stride_Y*rec->src_height; + if (rec->offset_Y + tmp > new_bo->size) + return -EINVAL; + break; + case I915_OVERLAY_YUV_PLANAR: + if (rec->src_width > rec->stride_Y) + return -EINVAL; + if (rec->src_width/uv_hscale > rec->stride_UV) + return -EINVAL; + + tmp = rec->stride_Y*rec->src_height; + if (rec->offset_Y + tmp > new_bo->size) + return -EINVAL; + tmp = rec->stride_UV*rec->src_height; + tmp /= uv_vscale; + if (rec->offset_U + tmp > new_bo->size + || rec->offset_V + tmp > new_bo->size) + return -EINVAL; + break; + } + + return 0; +} + +int intel_overlay_put_image(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_intel_overlay_put_image *put_image_rec = data; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay; + struct drm_mode_object *drmmode_obj; + struct intel_crtc *crtc; + struct drm_gem_object *new_bo; + struct put_image_params *params; + int ret; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + overlay = dev_priv->overlay; + if (!overlay) { + DRM_DEBUG("userspace bug: no overlay\n"); + return -ENODEV; + } + + if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) { + mutex_lock(&dev->mode_config.mutex); + mutex_lock(&dev->struct_mutex); + + ret = intel_overlay_switch_off(overlay); + + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); + + return ret; + } + + params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, + DRM_MODE_OBJECT_CRTC); + if (!drmmode_obj) + return -ENOENT; + crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + + new_bo = drm_gem_object_lookup(dev, file_priv, + put_image_rec->bo_handle); + if (!new_bo) + return -ENOENT; + + mutex_lock(&dev->mode_config.mutex); + mutex_lock(&dev->struct_mutex); + + if (overlay->crtc != crtc) { + struct drm_display_mode *mode = &crtc->base.mode; + ret = intel_overlay_switch_off(overlay); + if (ret != 0) + goto out_unlock; + + ret = check_overlay_possible_on_crtc(overlay, crtc); + if (ret != 0) + goto out_unlock; + + overlay->crtc = crtc; + crtc->overlay = overlay; + + if (intel_panel_fitter_pipe(dev) == crtc->pipe + /* and line to wide, i.e. one-line-mode */ + && mode->hdisplay > 1024) { + overlay->pfit_active = 1; + update_pfit_vscale_ratio(overlay); + } else + overlay->pfit_active = 0; + } + + ret = check_overlay_dst(overlay, put_image_rec); + if (ret != 0) + goto out_unlock; + + if (overlay->pfit_active) { + params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / + overlay->pfit_vscale_ratio); + /* shifting right rounds downwards, so add 1 */ + params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / + overlay->pfit_vscale_ratio) + 1; + } else { + params->dst_y = put_image_rec->dst_y; + params->dst_h = put_image_rec->dst_height; + } + params->dst_x = put_image_rec->dst_x; + params->dst_w = put_image_rec->dst_width; + + params->src_w = put_image_rec->src_width; + params->src_h = put_image_rec->src_height; + params->src_scan_w = put_image_rec->src_scan_width; + params->src_scan_h = put_image_rec->src_scan_height; + if (params->src_scan_h > params->src_h + || params->src_scan_w > params->src_w) { + ret = -EINVAL; + goto out_unlock; + } + + ret = check_overlay_src(dev, put_image_rec, new_bo); + if (ret != 0) + goto out_unlock; + params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; + params->stride_Y = put_image_rec->stride_Y; + params->stride_UV = put_image_rec->stride_UV; + params->offset_Y = put_image_rec->offset_Y; + params->offset_U = put_image_rec->offset_U; + params->offset_V = put_image_rec->offset_V; + + /* Check scaling after src size to prevent a divide-by-zero. */ + ret = check_overlay_scaling(params); + if (ret != 0) + goto out_unlock; + + ret = intel_overlay_do_put_image(overlay, new_bo, params); + if (ret != 0) + goto out_unlock; + + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); + + kfree(params); + + return 0; + +out_unlock: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); + drm_gem_object_unreference(new_bo); + kfree(params); + + return ret; +} + +static void update_reg_attrs(struct intel_overlay *overlay, + struct overlay_registers *regs) +{ + regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff); + regs->OCLRC1 = overlay->saturation; +} + +static bool check_gamma_bounds(u32 gamma1, u32 gamma2) +{ + int i; + + if (gamma1 & 0xff000000 || gamma2 & 0xff000000) + return false; + + for (i = 0; i < 3; i++) { + if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) + return false; + } + + return true; +} + +static bool check_gamma5_errata(u32 gamma5) +{ + int i; + + for (i = 0; i < 3; i++) { + if (((gamma5 >> i*8) & 0xff) == 0x80) + return false; + } + + return true; +} + +static int check_gamma(struct drm_intel_overlay_attrs *attrs) +{ + if (!check_gamma_bounds(0, attrs->gamma0) + || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) + || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) + || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) + || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) + || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) + || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) + return -EINVAL; + if (!check_gamma5_errata(attrs->gamma5)) + return -EINVAL; + return 0; +} + +int intel_overlay_attrs(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_intel_overlay_attrs *attrs = data; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay; + struct overlay_registers *regs; + int ret; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + overlay = dev_priv->overlay; + if (!overlay) { + DRM_DEBUG("userspace bug: no overlay\n"); + return -ENODEV; + } + + mutex_lock(&dev->mode_config.mutex); + mutex_lock(&dev->struct_mutex); + + if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { + attrs->color_key = overlay->color_key; + attrs->brightness = overlay->brightness; + attrs->contrast = overlay->contrast; + attrs->saturation = overlay->saturation; + + if (IS_I9XX(dev)) { + attrs->gamma0 = I915_READ(OGAMC0); + attrs->gamma1 = I915_READ(OGAMC1); + attrs->gamma2 = I915_READ(OGAMC2); + attrs->gamma3 = I915_READ(OGAMC3); + attrs->gamma4 = I915_READ(OGAMC4); + attrs->gamma5 = I915_READ(OGAMC5); + } + ret = 0; + } else { + overlay->color_key = attrs->color_key; + if (attrs->brightness >= -128 && attrs->brightness <= 127) { + overlay->brightness = attrs->brightness; + } else { + ret = -EINVAL; + goto out_unlock; + } + if (attrs->contrast <= 255) { + overlay->contrast = attrs->contrast; + } else { + ret = -EINVAL; + goto out_unlock; + } + if (attrs->saturation <= 1023) { + overlay->saturation = attrs->saturation; + } else { + ret = -EINVAL; + goto out_unlock; + } + + regs = intel_overlay_map_regs_atomic(overlay); + if (!regs) { + ret = -ENOMEM; + goto out_unlock; + } + + update_reg_attrs(overlay, regs); + + intel_overlay_unmap_regs_atomic(overlay); + + if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { + if (!IS_I9XX(dev)) { + ret = -EINVAL; + goto out_unlock; + } + + if (overlay->active) { + ret = -EBUSY; + goto out_unlock; + } + + ret = check_gamma(attrs); + if (ret != 0) + goto out_unlock; + + I915_WRITE(OGAMC0, attrs->gamma0); + I915_WRITE(OGAMC1, attrs->gamma1); + I915_WRITE(OGAMC2, attrs->gamma2); + I915_WRITE(OGAMC3, attrs->gamma3); + I915_WRITE(OGAMC4, attrs->gamma4); + I915_WRITE(OGAMC5, attrs->gamma5); + } + ret = 0; + } + +out_unlock: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); + + return ret; +} + +void intel_setup_overlay(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay; + struct drm_gem_object *reg_bo; + struct overlay_registers *regs; + int ret; + + if (!OVERLAY_EXISTS(dev)) + return; + + overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); + if (!overlay) + return; + overlay->dev = dev; + + reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); + if (!reg_bo) + goto out_free; + overlay->reg_bo = reg_bo->driver_private; + + if (OVERLAY_NONPHYSICAL(dev)) { + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); + if (ret) { + DRM_ERROR("failed to pin overlay register bo\n"); + goto out_free_bo; + } + overlay->flip_addr = overlay->reg_bo->gtt_offset; + } else { + ret = i915_gem_attach_phys_object(dev, reg_bo, + I915_GEM_PHYS_OVERLAY_REGS); + if (ret) { + DRM_ERROR("failed to attach phys overlay regs\n"); + goto out_free_bo; + } + overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; + } + + /* init all values */ + overlay->color_key = 0x0101fe; + overlay->brightness = -19; + overlay->contrast = 75; + overlay->saturation = 146; + + regs = intel_overlay_map_regs_atomic(overlay); + if (!regs) + goto out_free_bo; + + memset(regs, 0, sizeof(struct overlay_registers)); + update_polyphase_filter(regs); + + update_reg_attrs(overlay, regs); + + intel_overlay_unmap_regs_atomic(overlay); + + dev_priv->overlay = overlay; + DRM_INFO("initialized overlay support\n"); + return; + +out_free_bo: + drm_gem_object_unreference(reg_bo); +out_free: + kfree(overlay); + return; +} + +void intel_cleanup_overlay(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (dev_priv->overlay) { + /* The bo's should be free'd by the generic code already. + * Furthermore modesetting teardown happens beforehand so the + * hardware should be off already */ + BUG_ON(dev_priv->overlay->active); + + kfree(dev_priv->overlay); + } +} diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7e0cb1da92e6..c900499f2f63 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -186,6 +186,8 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_MMAP_GTT 0x24 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 #define DRM_I915_GEM_MADVISE 0x26 +#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 +#define DRM_I915_OVERLAY_ATTRS 0x28 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -223,6 +225,8 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) +#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image) +#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -266,6 +270,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_CHIPSET_ID 4 #define I915_PARAM_HAS_GEM 5 #define I915_PARAM_NUM_FENCES_AVAIL 6 +#define I915_PARAM_HAS_OVERLAY 7 typedef struct drm_i915_getparam { int param; @@ -686,4 +691,70 @@ struct drm_i915_gem_madvise { __u32 retained; }; +/* flags */ +#define I915_OVERLAY_TYPE_MASK 0xff +#define I915_OVERLAY_YUV_PLANAR 0x01 +#define I915_OVERLAY_YUV_PACKED 0x02 +#define I915_OVERLAY_RGB 0x03 + +#define I915_OVERLAY_DEPTH_MASK 0xff00 +#define I915_OVERLAY_RGB24 0x1000 +#define I915_OVERLAY_RGB16 0x2000 +#define I915_OVERLAY_RGB15 0x3000 +#define I915_OVERLAY_YUV422 0x0100 +#define I915_OVERLAY_YUV411 0x0200 +#define I915_OVERLAY_YUV420 0x0300 +#define I915_OVERLAY_YUV410 0x0400 + +#define I915_OVERLAY_SWAP_MASK 0xff0000 +#define I915_OVERLAY_NO_SWAP 0x000000 +#define I915_OVERLAY_UV_SWAP 0x010000 +#define I915_OVERLAY_Y_SWAP 0x020000 +#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 + +#define I915_OVERLAY_FLAGS_MASK 0xff000000 +#define I915_OVERLAY_ENABLE 0x01000000 + +struct drm_intel_overlay_put_image { + /* various flags and src format description */ + __u32 flags; + /* source picture description */ + __u32 bo_handle; + /* stride values and offsets are in bytes, buffer relative */ + __u16 stride_Y; /* stride for packed formats */ + __u16 stride_UV; + __u32 offset_Y; /* offset for packet formats */ + __u32 offset_U; + __u32 offset_V; + /* in pixels */ + __u16 src_width; + __u16 src_height; + /* to compensate the scaling factors for partially covered surfaces */ + __u16 src_scan_width; + __u16 src_scan_height; + /* output crtc description */ + __u32 crtc_id; + __u16 dst_x; + __u16 dst_y; + __u16 dst_width; + __u16 dst_height; +}; + +/* flags */ +#define I915_OVERLAY_UPDATE_ATTRS (1<<0) +#define I915_OVERLAY_UPDATE_GAMMA (1<<1) +struct drm_intel_overlay_attrs { + __u32 flags; + __u32 color_key; + __s32 brightness; + __u32 contrast; + __u32 saturation; + __u32 gamma0; + __u32 gamma1; + __u32 gamma2; + __u32 gamma3; + __u32 gamma4; + __u32 gamma5; +}; + #endif /* _I915_DRM_H_ */ -- cgit v1.2.3 From d81c45e1c9369855901420f79114852eba2ea16a Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Fri, 16 Oct 2009 09:20:41 +0800 Subject: ACPI: Notify the _PPC evaluation status to the platform According to the ACPI spec(section 8.4.4.3) OSPM should convey the _PPC evaluations status to the platform if there exists the _OST object. The _OST contains two arguments: The first is the PERFORMANCE notificatin event. The second is the status of _PPC object. OSPM will convey the _PPC evaluation status to the platform. Of course when the module parameter of "ignore_ppc" is added, OSPM won't evaluate the _PPC object. But it will call the _OST object. At the same time the _OST object will be evaluated only when the PERFORMANCE notification event is received. Signed-off-by: Zhao Yakui Signed-off-by: Len Brown --- drivers/acpi/processor_core.c | 6 ++--- drivers/acpi/processor_perflib.c | 50 +++++++++++++++++++++++++++++++++++++--- include/acpi/processor.h | 5 ++-- 3 files changed, 53 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index c567b46dfa0f..773d7e76f301 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -722,7 +722,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event) switch (event) { case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: saved = pr->performance_platform_limit; - acpi_processor_ppc_has_changed(pr); + acpi_processor_ppc_has_changed(pr, 1); if (saved == pr->performance_platform_limit) break; acpi_bus_generate_proc_event(device, event, @@ -758,7 +758,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, struct acpi_processor *pr = per_cpu(processors, cpu); if (action == CPU_ONLINE && pr) { - acpi_processor_ppc_has_changed(pr); + acpi_processor_ppc_has_changed(pr, 0); acpi_processor_cst_has_changed(pr); acpi_processor_tstate_has_changed(pr); } @@ -830,7 +830,7 @@ static int acpi_processor_add(struct acpi_device *device) arch_acpi_processor_cleanup_pdc(pr); #ifdef CONFIG_CPU_FREQ - acpi_processor_ppc_has_changed(pr); + acpi_processor_ppc_has_changed(pr, 0); #endif acpi_processor_get_throttling_info(pr); acpi_processor_get_limit_info(pr); diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 8ba0ed0b9ddb..fc1f49bbbeef 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -152,15 +152,59 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) return 0; } -int acpi_processor_ppc_has_changed(struct acpi_processor *pr) +#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 +/* + * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status + * @handle: ACPI processor handle + * @status: the status code of _PPC evaluation + * 0: success. OSPM is now using the performance state specificed. + * 1: failure. OSPM has not changed the number of P-states in use + */ +static void acpi_processor_ppc_ost(acpi_handle handle, int status) +{ + union acpi_object params[2] = { + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + }; + struct acpi_object_list arg_list = {2, params}; + acpi_handle temp; + + params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE; + params[1].integer.value = status; + + /* when there is no _OST , skip it */ + if (ACPI_FAILURE(acpi_get_handle(handle, "_OST", &temp))) + return; + + acpi_evaluate_object(handle, "_OST", &arg_list, NULL); + return; +} + +int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) { int ret; - if (ignore_ppc) + if (ignore_ppc) { + /* + * Only when it is notification event, the _OST object + * will be evaluated. Otherwise it is skipped. + */ + if (event_flag) + acpi_processor_ppc_ost(pr->handle, 1); return 0; + } ret = acpi_processor_get_platform_limit(pr); - + /* + * Only when it is notification event, the _OST object + * will be evaluated. Otherwise it is skipped. + */ + if (event_flag) { + if (ret < 0) + acpi_processor_ppc_ost(pr->handle, 1); + else + acpi_processor_ppc_ost(pr->handle, 0); + } if (ret < 0) return (ret); else diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 740ac3ad8fd0..a920237f7c62 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -294,7 +294,7 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx #ifdef CONFIG_CPU_FREQ void acpi_processor_ppc_init(void); void acpi_processor_ppc_exit(void); -int acpi_processor_ppc_has_changed(struct acpi_processor *pr); +int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); #else static inline void acpi_processor_ppc_init(void) { @@ -304,7 +304,8 @@ static inline void acpi_processor_ppc_exit(void) { return; } -static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr) +static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, + int event_flag) { static unsigned int printout = 1; if (printout) { -- cgit v1.2.3 From 0efea0006335a2425b1a12a2ad35efad626fe353 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Thu, 5 Nov 2009 12:05:11 +0900 Subject: PCI: cache PCIe capability offset There are a lot of codes that searches PCI express capability offset in the PCI configuration space using pci_find_capability(). Caching it in the struct pci_dev will reduce unncecessary search. This patch adds an additional 'pcie_cap' fields into struct pci_dev, which is initialized at pci device scan time (in set_pcie_port_type()). Signed-off-by: Kenji Kaneshige Signed-off-by: Jesse Barnes --- drivers/pci/probe.c | 1 + include/linux/pci.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 623086f9ba84..54b9f1501487 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -692,6 +692,7 @@ static void set_pcie_port_type(struct pci_dev *pdev) if (!pos) return; pdev->is_pcie = 1; + pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; } diff --git a/include/linux/pci.h b/include/linux/pci.h index 86c31ac454d1..233b3a092035 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -218,6 +218,7 @@ struct pci_dev { unsigned int class; /* 3 bytes: (base,sub,prog-if) */ u8 revision; /* PCI revision, low byte of class word */ u8 hdr_type; /* PCI header type (`multi' flag masked out) */ + u8 pcie_cap; /* PCI-E capability offset */ u8 pcie_type; /* PCI-E device/port type */ u8 rom_base_reg; /* which config register controls the ROM */ u8 pin; /* which interrupt pin this device uses */ -- cgit v1.2.3 From 87ec0e98cfdd8b68da6a7f9e70142ffc0e404fbb Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Mon, 12 Oct 2009 20:49:25 +0400 Subject: spi_mpc8xxx: Turn qe_mode into flags Soon there will be more flags introduced in subsequent patches, so let's turn qe_mode into flags. Also introduce mpc8xxx_spi_strmode() and print current SPI mode. Signed-off-by: Anton Vorontsov Acked-by: David Brownell Signed-off-by: Kumar Gala --- drivers/spi/spi_mpc8xxx.c | 30 +++++++++++++++++++----------- include/linux/fsl_devices.h | 2 +- 2 files changed, 20 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index 4b119eaf4e6b..80374dfa9708 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c @@ -96,7 +96,8 @@ struct mpc8xxx_spi { u32 rx_shift; /* RX data reg shift when in qe mode */ u32 tx_shift; /* TX data reg shift when in qe mode */ - bool qe_mode; + unsigned int flags; +#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */ struct workqueue_struct *workqueue; struct work_struct work; @@ -235,14 +236,14 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) if (bits_per_word <= 8) { cs->get_rx = mpc8xxx_spi_rx_buf_u8; cs->get_tx = mpc8xxx_spi_tx_buf_u8; - if (mpc8xxx_spi->qe_mode) { + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { cs->rx_shift = 16; cs->tx_shift = 24; } } else if (bits_per_word <= 16) { cs->get_rx = mpc8xxx_spi_rx_buf_u16; cs->get_tx = mpc8xxx_spi_tx_buf_u16; - if (mpc8xxx_spi->qe_mode) { + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { cs->rx_shift = 16; cs->tx_shift = 16; } @@ -252,7 +253,8 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) } else return -EINVAL; - if (mpc8xxx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) { + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && + spi->mode & SPI_LSB_FIRST) { cs->tx_shift = 0; if (bits_per_word <= 8) cs->rx_shift = 8; @@ -518,6 +520,13 @@ static void mpc8xxx_spi_cleanup(struct spi_device *spi) kfree(spi->controller_state); } +static const char *mpc8xxx_spi_strmode(unsigned int flags) +{ + if (flags & SPI_QE_CPU_MODE) + return "QE CPU"; + return "CPU"; +} + static struct spi_master * __devinit mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) { @@ -544,14 +553,14 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) master->cleanup = mpc8xxx_spi_cleanup; mpc8xxx_spi = spi_master_get_devdata(master); - mpc8xxx_spi->qe_mode = pdata->qe_mode; mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; + mpc8xxx_spi->flags = pdata->flags; mpc8xxx_spi->spibrg = pdata->sysclk; mpc8xxx_spi->rx_shift = 0; mpc8xxx_spi->tx_shift = 0; - if (mpc8xxx_spi->qe_mode) { + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { mpc8xxx_spi->rx_shift = 16; mpc8xxx_spi->tx_shift = 24; } @@ -584,7 +593,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) /* Enable SPI interface */ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; - if (pdata->qe_mode) + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) regval |= SPMODE_OP; mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); @@ -604,9 +613,8 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) if (ret < 0) goto unreg_master; - printk(KERN_INFO - "%s: MPC8xxx SPI Controller driver at 0x%p (irq = %d)\n", - dev_name(dev), mpc8xxx_spi->base, mpc8xxx_spi->irq); + dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base, + mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); return master; @@ -797,7 +805,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev, prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) - pdata->qe_mode = 1; + pdata->flags = SPI_QE_CPU_MODE; ret = of_mpc8xxx_spi_get_chipselects(dev); if (ret) diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 43fc95d822d5..39fd94681e74 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -74,7 +74,7 @@ struct spi_device; struct fsl_spi_platform_data { u32 initial_spmode; /* initial SPMODE value */ s16 bus_num; - bool qe_mode; + unsigned int flags; /* board specific information */ u16 max_chipselect; void (*cs_control)(struct spi_device *spi, bool on); -- cgit v1.2.3 From 2e9d546eda5888962a441da1e96bbf92cb5b1cbb Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Wed, 23 Sep 2009 22:52:38 +0400 Subject: powerpc/fsl: Make fsl_deep_sleep() usable w/ modules and non-83xx builds Export is needed for modular builds, and a static inline stub is needed for non-MPC83xx builds. Signed-off-by: Anton Vorontsov Signed-off-by: Kumar Gala --- arch/powerpc/platforms/83xx/suspend.c | 1 + include/linux/fsl_devices.h | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 08e65fc8b98c..d306f07b9aa1 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -96,6 +96,7 @@ int fsl_deep_sleep(void) { return deep_sleeping; } +EXPORT_SYMBOL(fsl_deep_sleep); static int mpc83xx_change_state(void) { diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 39fd94681e74..47188d512b8f 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -90,6 +90,10 @@ struct mpc8xx_pcmcia_ops { * lead to a deep sleep (i.e. power removed from the core, * instead of just the clock). */ +#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND) int fsl_deep_sleep(void); +#else +static inline int fsl_deep_sleep(void) { return 0; } +#endif #endif /* _FSL_DEVICE_H_ */ -- cgit v1.2.3 From 417608c20a4c8397bc5307d949ec01ea0a0dd8e5 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 12 Nov 2009 11:19:44 -0800 Subject: IB/mlx4: Remove limitation on LSO header size Current code has a limitation: an LSO header is not allowed to cross a 64 byte boundary. This patch removes this limitation by setting the WQE RR for large headers thus allowing LSO headers of any size. The extra buffer reserved for MLX4_IB_QP_LSO QPs has been doubled, from 64 to 128 bytes, assuming this is reasonable upper limit for header length. Also, this patch will cause IB_DEVICE_UD_TSO to be set only for HCA FW versions that set MLX4_DEV_CAP_FLAG_BLH; e.g. FW version 2.6.000 and higher. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/main.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 24 ++++++++++++------------ drivers/net/mlx4/fw.c | 1 + include/linux/mlx4/device.h | 1 + 4 files changed, 15 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 3cb3f47a10b8..e596537ff353 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -103,7 +103,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; - if (dev->dev->caps.max_gso_sz) + if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) props->device_cap_flags |= IB_DEVICE_UD_TSO; if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 518d561970aa..847030c89a8d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -54,7 +54,8 @@ enum { /* * Largest possible UD header: send with GRH and immediate data. */ - MLX4_IB_UD_HEADER_SIZE = 72 + MLX4_IB_UD_HEADER_SIZE = 72, + MLX4_IB_LSO_HEADER_SPARE = 128, }; struct mlx4_ib_sqp { @@ -67,7 +68,8 @@ struct mlx4_ib_sqp { }; enum { - MLX4_IB_MIN_SQ_STRIDE = 6 + MLX4_IB_MIN_SQ_STRIDE = 6, + MLX4_IB_CACHE_LINE_SIZE = 64, }; static const __be32 mlx4_ib_opcode[] = { @@ -261,7 +263,7 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags) case IB_QPT_UD: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + - ((flags & MLX4_IB_QP_LSO) ? 64 : 0); + ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); case IB_QPT_UC: return sizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_raddr_seg); @@ -1466,16 +1468,12 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, - __be32 *lso_hdr_sz) + __be32 *lso_hdr_sz, __be32 *blh) { unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); - /* - * This is a temporary limitation and will be removed in - * a forthcoming FW release: - */ - if (unlikely(halign > 64)) - return -EINVAL; + if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) + *blh = cpu_to_be32(1 << 6); if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && wr->num_sge > qp->sq.max_gs - (halign >> 4))) @@ -1521,6 +1519,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, __be32 dummy; __be32 *lso_wqe; __be32 uninitialized_var(lso_hdr_sz); + __be32 blh; int i; spin_lock_irqsave(&qp->sq.lock, flags); @@ -1529,6 +1528,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, for (nreq = 0; wr; ++nreq, wr = wr->next) { lso_wqe = &dummy; + blh = 0; if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { err = -ENOMEM; @@ -1615,7 +1615,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, size += sizeof (struct mlx4_wqe_datagram_seg) / 16; if (wr->opcode == IB_WR_LSO) { - err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz); + err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); if (unlikely(err)) { *bad_wr = wr; goto out; @@ -1686,7 +1686,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | - (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); + (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; stamp = ind + qp->sq_spare_wqes; ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 3c16602172fc..7194be3a2894 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -90,6 +90,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) [ 9] = "Q_Key violation counter", [10] = "VMM", [12] = "DPDP", + [15] = "Big LSO headers", [16] = "MW support", [17] = "APM support", [18] = "Atomic ops support", diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ce7cc6c7bcbb..e92d1bfdb330 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -61,6 +61,7 @@ enum { MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, + MLX4_DEV_CAP_FLAG_BLH = 1 << 15, MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, MLX4_DEV_CAP_FLAG_APM = 1 << 17, MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, -- cgit v1.2.3 From 0a3adadee42f2865bb867b8c5f4955b7def9baad Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 4 Nov 2009 18:12:35 -0500 Subject: nfsd: make fs/nfsd/vfs.h for common includes None of this stuff is used outside nfsd, so move it out of the common linux include directory. Actually, probably none of the stuff in include/linux/nfsd/nfsd.h really belongs there, so later we may remove that file entirely. Signed-off-by: J. Bruce Fields --- fs/nfsd/lockd.c | 1 + fs/nfsd/nfs2acl.c | 1 + fs/nfsd/nfs3acl.c | 1 + fs/nfsd/nfs3proc.c | 1 + fs/nfsd/nfs4proc.c | 1 + fs/nfsd/nfs4recover.c | 1 + fs/nfsd/nfs4state.c | 1 + fs/nfsd/nfs4xdr.c | 1 + fs/nfsd/nfsfh.c | 1 + fs/nfsd/nfsproc.c | 1 + fs/nfsd/nfssvc.c | 1 + fs/nfsd/vfs.c | 1 + fs/nfsd/vfs.h | 98 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/nfsd/nfsd.h | 87 +---------------------------------------- 14 files changed, 111 insertions(+), 86 deletions(-) create mode 100644 fs/nfsd/vfs.h (limited to 'include') diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index b2786a5f9afe..812bc64874f6 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c @@ -16,6 +16,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_LOCKD diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index e2a17f0a96a7..38c883d48b02 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -14,6 +14,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC #define RETURN_STATUS(st) { resp->status = (st); return (st); } diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index ff73596eb550..526d85a65f76 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -13,6 +13,7 @@ #include #include #include +#include "vfs.h" #define RETURN_STATUS(st) { resp->status = (st); return (st); } diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index a713c418a922..1a259d313e14 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -25,6 +25,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index bebc0c2e1b0a..60a93cdefef5 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -48,6 +48,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index b5348405046b..c7a6b245c7ad 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -47,6 +47,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c8b621a120cd..850960e5d626 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -56,6 +56,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 0fbd50cee1f6..db0fc55670b3 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -57,6 +57,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_XDR diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 01965b2f3a76..d0d8a217a3ea 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -22,6 +22,7 @@ #include #include #include +#include "vfs.h" #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_FH diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index c5393d1b8955..6c967e1ba37b 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -24,6 +24,7 @@ #include #include #include +#include "vfs.h" typedef struct svc_rqst svc_rqst; typedef struct svc_buf svc_buf; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 67ea83eedd43..2944b31dcbe6 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -35,6 +35,7 @@ #include #include #include +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_SVC diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 638573968dcf..a7038ede671a 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -56,6 +56,7 @@ #endif /* CONFIG_NFSD_V4 */ #include #include +#include "vfs.h" #include diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h new file mode 100644 index 000000000000..b8011fd2fcab --- /dev/null +++ b/fs/nfsd/vfs.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 1995-1997 Olaf Kirch + */ + +#ifndef LINUX_NFSD_VFS_H +#define LINUX_NFSD_VFS_H + +/* + * Flags for nfsd_permission + */ +#define NFSD_MAY_NOP 0 +#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */ +#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */ +#define NFSD_MAY_READ 4 /* == MAY_READ */ +#define NFSD_MAY_SATTR 8 +#define NFSD_MAY_TRUNC 16 +#define NFSD_MAY_LOCK 32 +#define NFSD_MAY_OWNER_OVERRIDE 64 +#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ +#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256 + +#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) +#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) + +/* + * Callback function for readdir + */ +typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); + +/* nfsd/vfs.c */ +int fh_lock_parent(struct svc_fh *, struct dentry *); +int nfsd_racache_init(int); +void nfsd_racache_shutdown(void); +int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, + struct svc_export **expp); +__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *, + const char *, unsigned int, struct svc_fh *); +__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *, + const char *, unsigned int, + struct svc_export **, struct dentry **); +__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *, + struct iattr *, int, time_t); +#ifdef CONFIG_NFSD_V4 +__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *, + struct nfs4_acl *); +int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **); +#endif /* CONFIG_NFSD_V4 */ +__be32 nfsd_create(struct svc_rqst *, struct svc_fh *, + char *name, int len, struct iattr *attrs, + int type, dev_t rdev, struct svc_fh *res); +#ifdef CONFIG_NFSD_V3 +__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *); +__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *, + char *name, int len, struct iattr *attrs, + struct svc_fh *res, int createmode, + u32 *verifier, int *truncp, int *created); +__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *, + loff_t, unsigned long); +#endif /* CONFIG_NFSD_V3 */ +__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int, + int, struct file **); +void nfsd_close(struct file *); +__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, + loff_t, struct kvec *, int, unsigned long *); +__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, + loff_t, struct kvec *,int, unsigned long *, int *); +__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, + char *, int *); +__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, + char *name, int len, char *path, int plen, + struct svc_fh *res, struct iattr *); +__be32 nfsd_link(struct svc_rqst *, struct svc_fh *, + char *, int, struct svc_fh *); +__be32 nfsd_rename(struct svc_rqst *, + struct svc_fh *, char *, int, + struct svc_fh *, char *, int); +__be32 nfsd_remove(struct svc_rqst *, + struct svc_fh *, char *, int); +__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type, + char *name, int len); +int nfsd_truncate(struct svc_rqst *, struct svc_fh *, + unsigned long size); +__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *, + loff_t *, struct readdir_cd *, filldir_t); +__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *, + struct kstatfs *, int access); + +int nfsd_notify_change(struct inode *, struct iattr *); +__be32 nfsd_permission(struct svc_rqst *, struct svc_export *, + struct dentry *, int); +int nfsd_sync_dir(struct dentry *dp); + +#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) +struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int); +int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); +#endif + +#endif /* LINUX_NFSD_VFS_H */ diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 510ffdd5020e..e4518d090a8c 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -25,30 +25,10 @@ */ #define NFSD_SUPPORTED_MINOR_VERSION 1 -/* - * Flags for nfsd_permission - */ -#define NFSD_MAY_NOP 0 -#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */ -#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */ -#define NFSD_MAY_READ 4 /* == MAY_READ */ -#define NFSD_MAY_SATTR 8 -#define NFSD_MAY_TRUNC 16 -#define NFSD_MAY_LOCK 32 -#define NFSD_MAY_OWNER_OVERRIDE 64 -#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ -#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256 - -#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) -#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) - -/* - * Callback function for readdir - */ struct readdir_cd { __be32 err; /* 0, nfserr, or nfserr_eof */ }; -typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); + extern struct svc_program nfsd_program; extern struct svc_version nfsd_version2, nfsd_version3, @@ -73,69 +53,6 @@ int nfsd_nrpools(void); int nfsd_get_nrthreads(int n, int *); int nfsd_set_nrthreads(int n, int *); -/* nfsd/vfs.c */ -int fh_lock_parent(struct svc_fh *, struct dentry *); -int nfsd_racache_init(int); -void nfsd_racache_shutdown(void); -int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, - struct svc_export **expp); -__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *, - const char *, unsigned int, struct svc_fh *); -__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *, - const char *, unsigned int, - struct svc_export **, struct dentry **); -__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *, - struct iattr *, int, time_t); -#ifdef CONFIG_NFSD_V4 -__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *, - struct nfs4_acl *); -int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **); -#endif /* CONFIG_NFSD_V4 */ -__be32 nfsd_create(struct svc_rqst *, struct svc_fh *, - char *name, int len, struct iattr *attrs, - int type, dev_t rdev, struct svc_fh *res); -#ifdef CONFIG_NFSD_V3 -__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *); -__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *, - char *name, int len, struct iattr *attrs, - struct svc_fh *res, int createmode, - u32 *verifier, int *truncp, int *created); -__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *, - loff_t, unsigned long); -#endif /* CONFIG_NFSD_V3 */ -__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int, - int, struct file **); -void nfsd_close(struct file *); -__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, - loff_t, struct kvec *, int, unsigned long *); -__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, - loff_t, struct kvec *,int, unsigned long *, int *); -__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, - char *, int *); -__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, - char *name, int len, char *path, int plen, - struct svc_fh *res, struct iattr *); -__be32 nfsd_link(struct svc_rqst *, struct svc_fh *, - char *, int, struct svc_fh *); -__be32 nfsd_rename(struct svc_rqst *, - struct svc_fh *, char *, int, - struct svc_fh *, char *, int); -__be32 nfsd_remove(struct svc_rqst *, - struct svc_fh *, char *, int); -__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type, - char *name, int len); -int nfsd_truncate(struct svc_rqst *, struct svc_fh *, - unsigned long size); -__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *, - loff_t *, struct readdir_cd *, filldir_t); -__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *, - struct kstatfs *, int access); - -int nfsd_notify_change(struct inode *, struct iattr *); -__be32 nfsd_permission(struct svc_rqst *, struct svc_export *, - struct dentry *, int); -int nfsd_sync_dir(struct dentry *dp); - #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) #ifdef CONFIG_NFSD_V2_ACL extern struct svc_version nfsd_acl_version2; @@ -147,8 +64,6 @@ extern struct svc_version nfsd_acl_version3; #else #define nfsd_acl_version3 NULL #endif -struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int); -int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); #endif enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; -- cgit v1.2.3 From 31b4ff06e01a9a98a8e6ae6e8c42213648eec1d1 Mon Sep 17 00:00:00 2001 From: Balaji Rao Date: Thu, 5 Nov 2009 00:24:55 +0300 Subject: pcf50633: introduces battery charging current control Implement a new sysfs attribute to allow changing MBC charging limit on the fly independently of usb current limit. It also gets set automatically every time usb current limit is changed. Limiting charging current also prevents violating USB specification in the case when the whole device is shut down and usb current limit is reset to the factory default by the pcf50633 state transition. Signed-off-by: Balaji Rao Signed-off-by: Paul Fertser Signed-off-by: Anton Vorontsov --- drivers/power/pcf50633-charger.c | 78 ++++++++++++++++++++++++++++++++++++--- include/linux/mfd/pcf50633/core.h | 7 ++++ 2 files changed, 80 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c index 48e92c020f4b..21b6e64e7805 100644 --- a/drivers/power/pcf50633-charger.c +++ b/drivers/power/pcf50633-charger.c @@ -48,16 +48,21 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) u8 bits; int charging_start = 1; u8 mbcs2, chgmod; + unsigned int mbcc5; - if (ma >= 1000) + if (ma >= 1000) { bits = PCF50633_MBCC7_USB_1000mA; - else if (ma >= 500) + ma = 1000; + } else if (ma >= 500) { bits = PCF50633_MBCC7_USB_500mA; - else if (ma >= 100) + ma = 500; + } else if (ma >= 100) { bits = PCF50633_MBCC7_USB_100mA; - else { + ma = 100; + } else { bits = PCF50633_MBCC7_USB_SUSPEND; charging_start = 0; + ma = 0; } ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, @@ -67,7 +72,24 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) else dev_info(pcf->dev, "usb curlim to %d mA\n", ma); - /* Manual charging start */ + /* + * We limit the charging current to be the USB current limit. + * The reason is that on pcf50633, when it enters PMU Standby mode, + * which it does when the device goes "off", the USB current limit + * reverts to the variant default. In at least one common case, that + * default is 500mA. By setting the charging current to be the same + * as the USB limit we set here before PMU standby, we enforce it only + * using the correct amount of current even when the USB current limit + * gets reset to the wrong thing + */ + + if (mbc->pcf->pdata->charger_reference_current_ma) { + mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma; + if (mbcc5 > 255) + mbcc5 = 255; + pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5); + } + mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); @@ -157,9 +179,55 @@ static ssize_t set_usblim(struct device *dev, static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim); +static ssize_t +show_chglim(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct pcf50633_mbc *mbc = dev_get_drvdata(dev); + u8 mbcc5 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC5); + unsigned int ma; + + if (!mbc->pcf->pdata->charger_reference_current_ma) + return -ENODEV; + + ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8; + + return sprintf(buf, "%u\n", ma); +} + +static ssize_t set_chglim(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct pcf50633_mbc *mbc = dev_get_drvdata(dev); + unsigned long ma; + unsigned int mbcc5; + int ret; + + if (!mbc->pcf->pdata->charger_reference_current_ma) + return -ENODEV; + + ret = strict_strtoul(buf, 10, &ma); + if (ret) + return -EINVAL; + + mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma; + if (mbcc5 > 255) + mbcc5 = 255; + pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5); + + return count; +} + +/* + * This attribute allows to change MBC charging limit on the fly + * independently of usb current limit. It also gets set automatically every + * time usb current limit is changed. + */ +static DEVICE_ATTR(chg_curlim, S_IRUGO | S_IWUSR, show_chglim, set_chglim); + static struct attribute *pcf50633_mbc_sysfs_entries[] = { &dev_attr_chgmode.attr, &dev_attr_usb_curlim.attr, + &dev_attr_chg_curlim.attr, NULL, }; diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 9aba7b779fbc..09af8fdfbb5d 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -31,6 +31,13 @@ struct pcf50633_platform_data { int charging_restart_interval; + /* + * Should be set accordingly to the reference resistor used, see + * I_{ch(ref)} charger reference current in the pcf50633 User + * Manual. + */ + int charger_reference_current_ma; + /* Callbacks */ void (*probe_done)(struct pcf50633 *); void (*mbc_event_callback)(struct pcf50633 *, int); -- cgit v1.2.3 From e98c73a24f33d6f54402f5cef2e7bf282d1d1fcc Mon Sep 17 00:00:00 2001 From: Paul Fertser Date: Thu, 5 Nov 2009 00:24:57 +0300 Subject: pcf50633: Get rid of charging restart software auto-triggering After reaching Battery Full condition MBC state machine switches back into charging mode when the battery voltage falls below 96% of a battery float voltage. The voltage drop in Li-Ion batteries is marginal (1-2%) till about 80% of its capacity - which means, after a BATFULL, charging won't be restarted until 75-80%. That is a desired behaviour recommended by battery manufacturers, don't mess with it. Signed-off-by: Paul Fertser Signed-off-by: Anton Vorontsov --- drivers/power/pcf50633-charger.c | 46 --------------------------------------- include/linux/mfd/pcf50633/core.h | 2 -- 2 files changed, 48 deletions(-) (limited to 'include') diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c index 21b6e64e7805..338311996eea 100644 --- a/drivers/power/pcf50633-charger.c +++ b/drivers/power/pcf50633-charger.c @@ -37,8 +37,6 @@ struct pcf50633_mbc { struct power_supply usb; struct power_supply adapter; struct power_supply ac; - - struct delayed_work charging_restart_work; }; int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) @@ -236,44 +234,10 @@ static struct attribute_group mbc_attr_group = { .attrs = pcf50633_mbc_sysfs_entries, }; -/* MBC state machine switches into charging mode when the battery voltage - * falls below 96% of a battery float voltage. But the voltage drop in Li-ion - * batteries is marginal(1~2 %) till about 80% of its capacity - which means, - * after a BATFULL, charging won't be restarted until 80%. - * - * This work_struct function restarts charging at regular intervals to make - * sure we don't discharge too much - */ - -static void pcf50633_mbc_charging_restart(struct work_struct *work) -{ - struct pcf50633_mbc *mbc; - u8 mbcs2, chgmod; - - mbc = container_of(work, struct pcf50633_mbc, - charging_restart_work.work); - - mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); - chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); - - if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) - return; - - /* Restart charging */ - pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1, - PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); - mbc->usb_active = 1; - power_supply_changed(&mbc->usb); - - dev_info(mbc->pcf->dev, "Charging restarted\n"); -} - static void pcf50633_mbc_irq_handler(int irq, void *data) { struct pcf50633_mbc *mbc = data; - int chg_restart_interval = - mbc->pcf->pdata->charging_restart_interval; /* USB */ if (irq == PCF50633_IRQ_USBINS) { @@ -282,7 +246,6 @@ pcf50633_mbc_irq_handler(int irq, void *data) mbc->usb_online = 0; mbc->usb_active = 0; pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); - cancel_delayed_work_sync(&mbc->charging_restart_work); } /* Adapter */ @@ -297,10 +260,6 @@ pcf50633_mbc_irq_handler(int irq, void *data) if (irq == PCF50633_IRQ_BATFULL) { mbc->usb_active = 0; mbc->adapter_active = 0; - - if (chg_restart_interval > 0) - schedule_delayed_work(&mbc->charging_restart_work, - chg_restart_interval); } else if (irq == PCF50633_IRQ_USBLIMON) mbc->usb_active = 0; else if (irq == PCF50633_IRQ_USBLIMOFF) @@ -463,9 +422,6 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) return ret; } - INIT_DELAYED_WORK(&mbc->charging_restart_work, - pcf50633_mbc_charging_restart); - ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); if (ret) dev_err(mbc->pcf->dev, "failed to create sysfs entries\n"); @@ -492,8 +448,6 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev) power_supply_unregister(&mbc->adapter); power_supply_unregister(&mbc->ac); - cancel_delayed_work_sync(&mbc->charging_restart_work); - kfree(mbc); return 0; diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 09af8fdfbb5d..46df7f053c29 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -29,8 +29,6 @@ struct pcf50633_platform_data { char **batteries; int num_batteries; - int charging_restart_interval; - /* * Should be set accordingly to the reference resistor used, see * I_{ch(ref)} charger reference current in the pcf50633 User -- cgit v1.2.3 From c329795052aa339850a45fab649ab97a36905136 Mon Sep 17 00:00:00 2001 From: Paul Fertser Date: Thu, 5 Nov 2009 00:24:59 +0300 Subject: pcf50633: Query charger status directly Current scheme is fragile and is likely to go off sync, especially on batfull->adapter charging automatic MBC transition. Query the status bit every time we need it instead. We need to export another function to query for USB presence because we can't read anything from PCF50633 (via I2C) inside irq context and that is needed by usb gadgets. Signed-off-by: Paul Fertser Signed-off-by: Anton Vorontsov --- drivers/power/pcf50633-charger.c | 50 +++++++++++++++++++++++----------------- include/linux/mfd/pcf50633/mbc.h | 1 + 2 files changed, 30 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c index 47187337b96c..b915a008f587 100644 --- a/drivers/power/pcf50633-charger.c +++ b/drivers/power/pcf50633-charger.c @@ -29,9 +29,7 @@ struct pcf50633_mbc { struct pcf50633 *pcf; - int adapter_active; int adapter_online; - int usb_active; int usb_online; struct power_supply usb; @@ -88,7 +86,7 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5); } - mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); + mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); /* If chgmod == BATFULL, setting chgena has no effect. @@ -105,8 +103,6 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); } - mbc->usb_active = charging_start; - power_supply_changed(&mbc->usb); return ret; @@ -117,20 +113,44 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf) { struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); int status = 0; + u8 chgmod; + + if (!mbc) + return 0; + + chgmod = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2) + & PCF50633_MBCS2_MBC_MASK; if (mbc->usb_online) status |= PCF50633_MBC_USB_ONLINE; - if (mbc->usb_active) + if (chgmod == PCF50633_MBCS2_MBC_USB_PRE || + chgmod == PCF50633_MBCS2_MBC_USB_PRE_WAIT || + chgmod == PCF50633_MBCS2_MBC_USB_FAST || + chgmod == PCF50633_MBCS2_MBC_USB_FAST_WAIT) status |= PCF50633_MBC_USB_ACTIVE; if (mbc->adapter_online) status |= PCF50633_MBC_ADAPTER_ONLINE; - if (mbc->adapter_active) + if (chgmod == PCF50633_MBCS2_MBC_ADP_PRE || + chgmod == PCF50633_MBCS2_MBC_ADP_PRE_WAIT || + chgmod == PCF50633_MBCS2_MBC_ADP_FAST || + chgmod == PCF50633_MBCS2_MBC_ADP_FAST_WAIT) status |= PCF50633_MBC_ADAPTER_ACTIVE; return status; } EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); +int pcf50633_mbc_get_usb_online_status(struct pcf50633 *pcf) +{ + struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); + + if (!mbc) + return 0; + + return mbc->usb_online; +} +EXPORT_SYMBOL_GPL(pcf50633_mbc_get_usb_online_status); + static ssize_t show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) { @@ -248,26 +268,14 @@ pcf50633_mbc_irq_handler(int irq, void *data) mbc->usb_online = 1; } else if (irq == PCF50633_IRQ_USBREM) { mbc->usb_online = 0; - mbc->usb_active = 0; pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); } /* Adapter */ - if (irq == PCF50633_IRQ_ADPINS) { + if (irq == PCF50633_IRQ_ADPINS) mbc->adapter_online = 1; - mbc->adapter_active = 1; - } else if (irq == PCF50633_IRQ_ADPREM) { + else if (irq == PCF50633_IRQ_ADPREM) mbc->adapter_online = 0; - mbc->adapter_active = 0; - } - - if (irq == PCF50633_IRQ_BATFULL) { - mbc->usb_active = 0; - mbc->adapter_active = 0; - } else if (irq == PCF50633_IRQ_USBLIMON) - mbc->usb_active = 0; - else if (irq == PCF50633_IRQ_USBLIMOFF) - mbc->usb_active = 1; power_supply_changed(&mbc->ac); power_supply_changed(&mbc->usb); diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h index 4119579acf2c..df4f5fa88de3 100644 --- a/include/linux/mfd/pcf50633/mbc.h +++ b/include/linux/mfd/pcf50633/mbc.h @@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 { int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); int pcf50633_mbc_get_status(struct pcf50633 *); +int pcf50633_mbc_get_usb_online_status(struct pcf50633 *); #endif -- cgit v1.2.3 From a7ca1f00ed2921b804d7ebda0f6fca8c9078fa42 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Mon, 16 Nov 2009 09:30:33 -0800 Subject: RDMA/ucma: Add option to manually set IB path Export rdma_set_ib_paths to user space to allow applications to manually set the IB path used for connections. This allows alternative ways for a user space application or library to obtain path record information, including retrieving path information from cached data, avoiding direct interaction with the IB SA. The IB SA is a single, centralized entity that can limit scaling on large clusters running MPI applications. Future changes to the rdma cm can expand on this framework to support the full range of features allowed by the IB CM, such as separate forward and reverse paths and APM. Signed-off-by: Sean Hefty Reviewed-By: Jason Gunthorpe Signed-off-by: Roland Dreier --- drivers/infiniband/core/sa_query.c | 6 +++++ drivers/infiniband/core/ucma.c | 49 ++++++++++++++++++++++++++++++++++++++ include/rdma/ib_sa.h | 6 +++++ include/rdma/ib_user_sa.h | 16 +++++++++++++ include/rdma/rdma_user_cm.h | 6 +++-- 5 files changed, 81 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 82543716d59e..7e1ffd8ccd5c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -604,6 +604,12 @@ retry: return ret ? ret : id; } +void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec) +{ + ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); +} +EXPORT_SYMBOL(ib_sa_unpack_path); + static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, int status, struct ib_sa_mad *mad) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index bb96d3c4b0f4..f1cbd26a9de0 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -43,6 +43,7 @@ #include #include #include +#include MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); @@ -812,6 +813,51 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, return ret; } +static int ucma_set_ib_path(struct ucma_context *ctx, + struct ib_path_rec_data *path_data, size_t optlen) +{ + struct ib_sa_path_rec sa_path; + struct rdma_cm_event event; + int ret; + + if (optlen % sizeof(*path_data)) + return -EINVAL; + + for (; optlen; optlen -= sizeof(*path_data), path_data++) { + if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | + IB_PATH_BIDIRECTIONAL)) + break; + } + + if (!optlen) + return -EINVAL; + + ib_sa_unpack_path(path_data->path_rec, &sa_path); + ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); + if (ret) + return ret; + + memset(&event, 0, sizeof event); + event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; + return ucma_event_handler(ctx->cm_id, &event); +} + +static int ucma_set_option_ib(struct ucma_context *ctx, int optname, + void *optval, size_t optlen) +{ + int ret; + + switch (optname) { + case RDMA_OPTION_IB_PATH: + ret = ucma_set_ib_path(ctx, optval, optlen); + break; + default: + ret = -ENOSYS; + } + + return ret; +} + static int ucma_set_option_level(struct ucma_context *ctx, int level, int optname, void *optval, size_t optlen) { @@ -821,6 +867,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level, case RDMA_OPTION_ID: ret = ucma_set_option_id(ctx, optname, optval, optlen); break; + case RDMA_OPTION_IB: + ret = ucma_set_option_ib(ctx, optname, optval, optlen); + break; default: ret = -ENOSYS; } diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 3841c1aff692..1082afaed158 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -379,4 +379,10 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr); +/** + * ib_sa_unpack_path - Convert a path record from MAD format to struct + * ib_sa_path_rec. + */ +void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec); + #endif /* IB_SA_H */ diff --git a/include/rdma/ib_user_sa.h b/include/rdma/ib_user_sa.h index 659120157e14..cfc7c9ba781e 100644 --- a/include/rdma/ib_user_sa.h +++ b/include/rdma/ib_user_sa.h @@ -35,6 +35,22 @@ #include +enum { + IB_PATH_GMP = 1, + IB_PATH_PRIMARY = (1<<1), + IB_PATH_ALTERNATE = (1<<2), + IB_PATH_OUTBOUND = (1<<3), + IB_PATH_INBOUND = (1<<4), + IB_PATH_INBOUND_REVERSE = (1<<5), + IB_PATH_BIDIRECTIONAL = IB_PATH_OUTBOUND | IB_PATH_INBOUND_REVERSE +}; + +struct ib_path_rec_data { + __u32 flags; + __u32 reserved; + __u32 path_rec[16]; +}; + struct ib_user_path_rec { __u8 dgid[16]; __u8 sgid[16]; diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h index c55705460b87..1d165022c02d 100644 --- a/include/rdma/rdma_user_cm.h +++ b/include/rdma/rdma_user_cm.h @@ -215,12 +215,14 @@ struct rdma_ucm_event_resp { /* Option levels */ enum { - RDMA_OPTION_ID = 0 + RDMA_OPTION_ID = 0, + RDMA_OPTION_IB = 1 }; /* Option details */ enum { - RDMA_OPTION_ID_TOS = 0 + RDMA_OPTION_ID_TOS = 0, + RDMA_OPTION_IB_PATH = 1 }; struct rdma_ucm_set_option { -- cgit v1.2.3 From c9a9c5e02aedc1a2815877b0268f886d2640b771 Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Sat, 12 Sep 2009 04:33:34 +1000 Subject: drm: Add async event synchronization for drmWaitVblank MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a new flag to the drmWaitVblank ioctl, which asks the drm to return immediately and notify userspace when the specified vblank sequence happens by sending an event back on the drm fd. The event mechanism works with the other flags supported by the ioctls, specifically, the vblank sequence can be specified relatively or absolutely, and works for primary and seconday crtc. The signal field of the vblank request is used to provide user data, which will be sent back to user space in the vblank event. Signed-off-by: Kristian Høgsberg Reviewed-by: Jesse Barnes Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 98 ++++++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/drm_irq.c | 95 +++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_stub.c | 2 + drivers/gpu/drm/i915/i915_drv.c | 1 + include/drm/drm.h | 33 +++++++++++++- include/drm/drmP.h | 26 +++++++++++ 6 files changed, 251 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 251bc0e3b5ec..8ac7fbf6b2b7 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, INIT_LIST_HEAD(&priv->lhead); INIT_LIST_HEAD(&priv->fbs); + INIT_LIST_HEAD(&priv->event_list); + init_waitqueue_head(&priv->event_wait); + priv->event_space = 4096; /* set aside 4k for event buffer */ if (dev->driver->driver_features & DRIVER_GEM) drm_gem_open(dev, priv); @@ -413,6 +416,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) } } +static void drm_events_release(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_pending_event *e, *et; + struct drm_pending_vblank_event *v, *vt; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + + /* Remove pending flips */ + list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link) + if (v->base.file_priv == file_priv) { + list_del(&v->base.link); + drm_vblank_put(dev, v->pipe); + v->base.destroy(&v->base); + } + + /* Remove unconsumed events */ + list_for_each_entry_safe(e, et, &file_priv->event_list, link) + e->destroy(e); + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + /** * Release file. * @@ -451,6 +478,8 @@ int drm_release(struct inode *inode, struct file *filp) if (file_priv->minor->master) drm_master_release(dev, filp); + drm_events_release(file_priv); + if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); @@ -544,9 +573,74 @@ int drm_release(struct inode *inode, struct file *filp) } EXPORT_SYMBOL(drm_release); -/** No-op. */ +static bool +drm_dequeue_event(struct drm_file *file_priv, + size_t total, size_t max, struct drm_pending_event **out) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_pending_event *e; + unsigned long flags; + bool ret = false; + + spin_lock_irqsave(&dev->event_lock, flags); + + *out = NULL; + if (list_empty(&file_priv->event_list)) + goto out; + e = list_first_entry(&file_priv->event_list, + struct drm_pending_event, link); + if (e->event->length + total > max) + goto out; + + file_priv->event_space += e->event->length; + list_del(&e->link); + *out = e; + ret = true; + +out: + spin_unlock_irqrestore(&dev->event_lock, flags); + return ret; +} + +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_pending_event *e; + size_t total; + ssize_t ret; + + ret = wait_event_interruptible(file_priv->event_wait, + !list_empty(&file_priv->event_list)); + if (ret < 0) + return ret; + + total = 0; + while (drm_dequeue_event(file_priv, total, count, &e)) { + if (copy_to_user(buffer + total, + e->event, e->event->length)) { + total = -EFAULT; + break; + } + + total += e->event->length; + e->destroy(e); + } + + return total; +} +EXPORT_SYMBOL(drm_read); + unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) { - return 0; + struct drm_file *file_priv = filp->private_data; + unsigned int mask = 0; + + poll_wait(filp, &file_priv->event_wait, wait); + + if (!list_empty(&file_priv->event_list)) + mask |= POLLIN | POLLRDNORM; + + return mask; } EXPORT_SYMBOL(drm_poll); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0a6f0b3bdc78..72754aca7abf 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -550,6 +550,62 @@ out: return ret; } +static int drm_queue_vblank_event(struct drm_device *dev, int pipe, + union drm_wait_vblank *vblwait, + struct drm_file *file_priv) +{ + struct drm_pending_vblank_event *e; + struct timeval now; + unsigned long flags; + unsigned int seq; + + e = kzalloc(sizeof *e, GFP_KERNEL); + if (e == NULL) + return -ENOMEM; + + e->pipe = pipe; + e->event.base.type = DRM_EVENT_VBLANK; + e->event.base.length = sizeof e->event; + e->event.user_data = vblwait->request.signal; + e->base.event = &e->event.base; + e->base.file_priv = file_priv; + e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; + + do_gettimeofday(&now); + spin_lock_irqsave(&dev->event_lock, flags); + + if (file_priv->event_space < sizeof e->event) { + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(e); + return -ENOMEM; + } + + file_priv->event_space -= sizeof e->event; + seq = drm_vblank_count(dev, pipe); + if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && + (seq - vblwait->request.sequence) <= (1 << 23)) { + vblwait->request.sequence = seq + 1; + } + + DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", + vblwait->request.sequence, seq, pipe); + + e->event.sequence = vblwait->request.sequence; + if ((seq - vblwait->request.sequence) <= (1 << 23)) { + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + drm_vblank_put(dev, e->pipe); + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } else { + list_add_tail(&e->base.link, &dev->vblank_event_list); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); + + return 0; +} + /** * Wait for VBLANK. * @@ -609,6 +665,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, goto done; } + if (flags & _DRM_VBLANK_EVENT) + return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); + if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { vblwait->request.sequence = seq + 1; @@ -641,6 +700,38 @@ done: return ret; } +void drm_handle_vblank_events(struct drm_device *dev, int crtc) +{ + struct drm_pending_vblank_event *e, *t; + struct timeval now; + unsigned long flags; + unsigned int seq; + + do_gettimeofday(&now); + seq = drm_vblank_count(dev, crtc); + + spin_lock_irqsave(&dev->event_lock, flags); + + list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { + if (e->pipe != crtc) + continue; + if ((seq - e->event.sequence) > (1<<23)) + continue; + + DRM_DEBUG("vblank event on %d, current %d\n", + e->event.sequence, seq); + + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + drm_vblank_put(dev, e->pipe); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + /** * drm_handle_vblank - handle a vblank event * @dev: DRM device @@ -651,7 +742,11 @@ done: */ void drm_handle_vblank(struct drm_device *dev, int crtc) { + if (!dev->num_crtcs) + return; + atomic_inc(&dev->_vblank_count[crtc]); DRM_WAKEUP(&dev->vbl_queue[crtc]); + drm_handle_vblank_events(dev, crtc); } EXPORT_SYMBOL(drm_handle_vblank); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 55bb8a82d612..adb864dfef3e 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -220,9 +220,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->vmalist); INIT_LIST_HEAD(&dev->maplist); + INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->count_lock); spin_lock_init(&dev->drw_lock); + spin_lock_init(&dev->event_lock); init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7f436ec075f6..2fa217862058 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -333,6 +333,7 @@ static struct drm_driver driver = { .mmap = drm_gem_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = i915_compat_ioctl, #endif diff --git a/include/drm/drm.h b/include/drm/drm.h index 7cb50bdde46d..fa6d9155873d 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -454,6 +454,7 @@ struct drm_irq_busid { enum drm_vblank_seq_type { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ + _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ @@ -461,8 +462,8 @@ enum drm_vblank_seq_type { }; #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) -#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ - _DRM_VBLANK_NEXTONMISS) +#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ + _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) struct drm_wait_vblank_request { enum drm_vblank_seq_type type; @@ -698,6 +699,34 @@ struct drm_gem_open { #define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_END 0xA0 +/** + * Header for events written back to userspace on the drm fd. The + * type defines the type of event, the length specifies the total + * length of the event (including the header), and user_data is + * typically a 64 bit value passed with the ioctl that triggered the + * event. A read on the drm fd will always only return complete + * events, that is, if for example the read buffer is 100 bytes, and + * there are two 64 byte events pending, only one will be returned. + * + * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and + * up are chipset specific. + */ +struct drm_event { + __u32 type; + __u32 length; +}; + +#define DRM_EVENT_VBLANK 0x01 + +struct drm_event_vblank { + struct drm_event base; + __u64 user_data; + __u32 tv_sec; + __u32 tv_usec; + __u32 sequence; + __u32 reserved; +}; + /* typedef area */ #ifndef __KERNEL__ typedef struct drm_clip_rect drm_clip_rect_t; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index c8e64bbadbcf..b0b36838ab11 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -426,6 +426,14 @@ struct drm_buf_entry { struct drm_freelist freelist; }; +/* Event queued up for userspace to read */ +struct drm_pending_event { + struct drm_event *event; + struct list_head link; + struct drm_file *file_priv; + void (*destroy)(struct drm_pending_event *event); +}; + /** File private data */ struct drm_file { int authenticated; @@ -449,6 +457,10 @@ struct drm_file { struct drm_master *master; /* master this node is currently associated with N.B. not always minor->master */ struct list_head fbs; + + wait_queue_head_t event_wait; + struct list_head event_list; + int event_space; }; /** Wait queue */ @@ -900,6 +912,12 @@ struct drm_minor { struct drm_mode_group mode_group; }; +struct drm_pending_vblank_event { + struct drm_pending_event base; + int pipe; + struct drm_event_vblank event; +}; + /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. @@ -999,6 +1017,12 @@ struct drm_device { u32 max_vblank_count; /**< size of vblank counter register */ + /** + * List of events + */ + struct list_head vblank_event_list; + spinlock_t event_lock; + /*@} */ cycles_t ctx_start; cycles_t lck_start; @@ -1135,6 +1159,8 @@ extern int drm_lastclose(struct drm_device *dev); extern int drm_open(struct inode *inode, struct file *filp); extern int drm_stub_open(struct inode *inode, struct file *filp); extern int drm_fasync(int fd, struct file *filp, int on); +extern ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); extern int drm_release(struct inode *inode, struct file *filp); /* Mapping support (drm_vm.h) */ -- cgit v1.2.3 From 420a457088669e055e767dfb8468909cd1799cf9 Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Tue, 17 Nov 2009 14:41:23 -0800 Subject: drm: kill some unused DRM_PROC macros from drmP.h i915_gem_proc.c appears to have been the last user of the DRM_PROC_* macros, and it has gone away. The macros should die as well. Signed-off-by: Andres Salomon Signed-off-by: Andrew Morton Signed-off-by: Dave Airlie --- include/drm/drmP.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'include') diff --git a/include/drm/drmP.h b/include/drm/drmP.h index b0b36838ab11..4977fa9038d9 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -245,16 +245,6 @@ extern void drm_ut_debug_printk(unsigned int request_level, #endif -#define DRM_PROC_LIMIT (PAGE_SIZE-80) - -#define DRM_PROC_PRINT(fmt, arg...) \ - len += sprintf(&buf[len], fmt , ##arg); \ - if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; } - -#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \ - len += sprintf(&buf[len], fmt , ##arg); \ - if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } - /*@}*/ /***********************************************************************/ -- cgit v1.2.3 From 156822f7175d9ceb9d7e808502d3c5de8841e047 Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Tue, 17 Nov 2009 14:41:23 -0800 Subject: drm: kill more unused DRM macros There are a few more macros in drmP.h that are unused; DRM_GET_PRIV_SAREA, DRM_ARRAY_SIZE, and DRM_WAITCOUNT can go away completely. Unfortunately, DRM_COPY is still used in one place, but we can at least move it to where it's used. It's an awful looking macro.. [akpm: fix overeagerness] Signed-off-by: Andres Salomon Cc: Dave Airlie Signed-off-by: Andrew Morton Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_drv.c | 12 ++++++++++++ include/drm/drmP.h | 23 ----------------------- 2 files changed, 12 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a75ca63deea6..43297ca45f3e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -365,6 +365,18 @@ static void __exit drm_core_exit(void) module_init(drm_core_init); module_exit(drm_core_exit); +/** + * Copy and IOCTL return string to user space + */ +#define DRM_COPY(name, value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + if (copy_to_user(name, value, len)) \ + return -EFAULT; \ + } + /** * Get version information * diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 4977fa9038d9..1b72a526ba64 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -255,19 +255,8 @@ extern void drm_ut_debug_printk(unsigned int request_level, #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) -#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) #define DRM_IF_VERSION(maj, min) (maj << 16 | min) -/** - * Get the private SAREA mapping. - * - * \param _dev DRM device. - * \param _ctx context number. - * \param _map output mapping. - */ -#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ - (_map) = (_dev)->context_sareas[_ctx]; \ -} while(0) /** * Test that the hardware lock is held by the caller, returning otherwise. @@ -286,18 +275,6 @@ do { \ } \ } while (0) -/** - * Copy and IOCTL return string to user space - */ -#define DRM_COPY( name, value ) \ - len = strlen( value ); \ - if ( len > name##_len ) len = name##_len; \ - name##_len = strlen( value ); \ - if ( len && name ) { \ - if ( copy_to_user( name, value, len ) ) \ - return -EFAULT; \ - } - /** * Ioctl function type. * -- cgit v1.2.3 From d91d8a3f88059d93e34ac70d059153ec69a9ffc7 Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Tue, 17 Nov 2009 12:43:55 -0500 Subject: drm/kms: add page flipping ioctl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a page flipping ioctl to the KMS API. The ioctl takes an fb ID and a ctrc ID and flips the crtc to the given fb at the next vblank. The ioctl returns immediately but the flip doesn't happen until after any rendering that's currently queued up against the new framebuffer is done. After submitting a page flip, any execbuffer involving the old front buffer will block until the flip is completed. Optionally, a vblank event can be generated when the swap eventually happens. Signed-off-by: Kristian Høgsberg Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_drv.c | 1 + include/drm/drm.h | 1 + include/drm/drm_crtc.h | 16 +++++++++++ include/drm/drm_mode.h | 33 ++++++++++++++++++++++ 5 files changed, 120 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5cae0b3eee9b..32756e67dd56 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2478,3 +2478,72 @@ out: mutex_unlock(&dev->mode_config.mutex); return ret; } + +int drm_mode_page_flip_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_crtc_page_flip *page_flip = data; + struct drm_mode_object *obj; + struct drm_crtc *crtc; + struct drm_framebuffer *fb; + struct drm_pending_vblank_event *e = NULL; + unsigned long flags; + int ret = -EINVAL; + + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || + page_flip->reserved != 0) + return -EINVAL; + + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) + goto out; + crtc = obj_to_crtc(obj); + + if (crtc->funcs->page_flip == NULL) + goto out; + + obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB); + if (!obj) + goto out; + fb = obj_to_fb(obj); + + if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { + ret = -ENOMEM; + spin_lock_irqsave(&dev->event_lock, flags); + if (file_priv->event_space < sizeof e->event) { + spin_unlock_irqrestore(&dev->event_lock, flags); + goto out; + } + file_priv->event_space -= sizeof e->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + + e = kzalloc(sizeof *e, GFP_KERNEL); + if (e == NULL) { + spin_lock_irqsave(&dev->event_lock, flags); + file_priv->event_space += sizeof e->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + goto out; + } + + e->event.base.type = DRM_EVENT_VBLANK; + e->event.base.length = sizeof e->event; + e->event.user_data = page_flip->user_data; + e->base.event = &e->event.base; + e->base.file_priv = file_priv; + e->base.destroy = + (void (*) (struct drm_pending_event *)) kfree; + } + + ret = crtc->funcs->page_flip(crtc, fb, e); + if (ret) { + spin_lock_irqsave(&dev->event_lock, flags); + file_priv->event_space += sizeof e->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(e); + } + +out: + mutex_unlock(&dev->mode_config.mutex); + return ret; +} diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 5bd3f9461e2d..bfaf59b02bda 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -145,6 +145,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/include/drm/drm.h b/include/drm/drm.h index fa6d9155873d..3919a4f792ae 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -687,6 +687,7 @@ struct drm_gem_open { #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) +#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) /** * Device specific ioctls should only be in their respective headers diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index b69347b8904f..4cc8a32dc4cf 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -290,6 +290,7 @@ struct drm_property { struct drm_crtc; struct drm_connector; struct drm_encoder; +struct drm_pending_vblank_event; /** * drm_crtc_funcs - control CRTCs for a given device @@ -333,6 +334,19 @@ struct drm_crtc_funcs { void (*destroy)(struct drm_crtc *crtc); int (*set_config)(struct drm_mode_set *set); + + /* + * Flip to the given framebuffer. This implements the page + * flip ioctl descibed in drm_mode.h, specifically, the + * implementation must return immediately and block all + * rendering to the current fb until the flip has completed. + * If userspace set the event flag in the ioctl, the event + * argument will point to an event to send back when the flip + * completes, otherwise it will be NULL. + */ + int (*page_flip)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event); }; /** @@ -756,6 +770,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern bool drm_detect_hdmi_monitor(struct edid *edid); +extern int drm_mode_page_flip_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool reduced, bool interlaced, bool margins); diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 1f908416aedb..68ddc6175ae7 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -268,4 +268,37 @@ struct drm_mode_crtc_lut { __u64 blue; }; +#define DRM_MODE_PAGE_FLIP_EVENT 0x01 +#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT + +/* + * Request a page flip on the specified crtc. + * + * This ioctl will ask KMS to schedule a page flip for the specified + * crtc. Once any pending rendering targeting the specified fb (as of + * ioctl time) has completed, the crtc will be reprogrammed to display + * that fb after the next vertical refresh. The ioctl returns + * immediately, but subsequent rendering to the current fb will block + * in the execbuffer ioctl until the page flip happens. If a page + * flip is already pending as the ioctl is called, EBUSY will be + * returned. + * + * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will + * request that drm sends back a vblank event (see drm.h: struct + * drm_event_vblank) when the page flip is done. The user_data field + * passed in with this ioctl will be returned as the user_data field + * in the vblank event struct. + * + * The reserved field must be zero until we figure out something + * clever to use it for. + */ + +struct drm_mode_crtc_page_flip { + __u32 crtc_id; + __u32 fb_id; + __u32 flags; + __u32 reserved; + __u64 user_data; +}; + #endif -- cgit v1.2.3 From 7a654158bdf9dc318fd451fbf606ed100d6ba25f Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Wed, 4 Nov 2009 09:42:56 +0100 Subject: drm: set the type of the drm_framebuffer::fbdev field The fbdev field of the drm_framebuffer structure is always used to store a pointer to a fb_info, so there is no reason for it to be void*. Signed-off-by: Clemens Ladisch Signed-off-by: Dave Airlie --- include/drm/drm_crtc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 4cc8a32dc4cf..d84fba15c9d8 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -256,7 +256,7 @@ struct drm_framebuffer { unsigned int depth; int bits_per_pixel; int flags; - void *fbdev; + struct fb_info *fbdev; u32 pseudo_palette[17]; struct list_head filp_head; /* if you are using the helper */ -- cgit v1.2.3 From 6266ed6e4164466177238b11ecb825a3a108a3e4 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Thu, 19 Nov 2009 12:55:22 -0800 Subject: RDMA/cma: Replace net_device pointer with index Provide the device interface when resolving route information to ensure that the correct outbound device is used. This will also simplify processing of sin6_scope_id for IPv6 support. Based on work from: David Wilder Jason Gunthorpe Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier --- drivers/infiniband/core/addr.c | 14 +++++++++++++- drivers/infiniband/core/cma.c | 2 +- include/rdma/ib_addr.h | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 373f1118d57b..788a02ef01dd 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -107,7 +107,7 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); if (dst_dev_addr) memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); - dev_addr->src_dev = dev; + dev_addr->bound_dev_if = dev->ifindex; return 0; } EXPORT_SYMBOL(rdma_copy_addr); @@ -117,6 +117,15 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) struct net_device *dev; int ret = -EADDRNOTAVAIL; + if (dev_addr->bound_dev_if) { + dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); + if (!dev) + return -ENODEV; + ret = rdma_copy_addr(dev_addr, dev, NULL); + dev_put(dev); + return ret; + } + switch (addr->sa_family) { case AF_INET: dev = ip_dev_find(&init_net, @@ -231,6 +240,8 @@ static int addr4_resolve_remote(struct sockaddr_in *src_in, memset(&fl, 0, sizeof fl); fl.nl_u.ip4_u.daddr = dst_ip; fl.nl_u.ip4_u.saddr = src_ip; + fl.oif = addr->bound_dev_if; + ret = ip_route_output_key(&init_net, &rt, &fl); if (ret) goto out; @@ -279,6 +290,7 @@ static int addr6_resolve_remote(struct sockaddr_in6 *src_in, memset(&fl, 0, sizeof fl); fl.nl_u.ip6_u.daddr = dst_in->sin6_addr; fl.nl_u.ip6_u.saddr = src_in->sin6_addr; + fl.oif = addr->bound_dev_if; dst = ip6_route_output(&init_net, NULL, &fl); if (!dst) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 052b4c01745d..699ad12b3a2f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2820,7 +2820,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id dev_addr = &id_priv->id.route.addr.dev_addr; - if ((dev_addr->src_dev == ndev) && + if ((dev_addr->bound_dev_if == ndev->ifindex) && memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", ndev->name, &id_priv->id); diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 483057b2f4b4..27f17cc2c919 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -61,7 +61,7 @@ struct rdma_dev_addr { unsigned char dst_dev_addr[MAX_ADDR_LEN]; unsigned char broadcast[MAX_ADDR_LEN]; enum rdma_node_type dev_type; - struct net_device *src_dev; + int bound_dev_if; }; /** -- cgit v1.2.3 From c4315d85f9b76834289fd503796c01b8311c4b84 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Thu, 19 Nov 2009 12:57:18 -0800 Subject: IB/addr: Store net_device type instead of translating to RDMA transport The struct rdma_dev_addr stores net_device address information: the source device address, destination hardware address, and broadcast address. For consistency, store the net_device type rather than converting it to the rdma_node_type. The type indicates the format of the various hardware addresses, which is what we're concerned with, and not the RDMA node type that the address may map to. Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier --- drivers/infiniband/core/addr.c | 13 +------------ drivers/infiniband/core/cma.c | 6 +++--- include/rdma/ib_addr.h | 3 ++- 3 files changed, 6 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index b59ba7ccef0e..de5fe161a1b9 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include @@ -92,17 +91,7 @@ EXPORT_SYMBOL(rdma_addr_unregister_client); int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, const unsigned char *dst_dev_addr) { - switch (dev->type) { - case ARPHRD_INFINIBAND: - dev_addr->dev_type = RDMA_NODE_IB_CA; - break; - case ARPHRD_ETHER: - dev_addr->dev_type = RDMA_NODE_RNIC; - break; - default: - return -EADDRNOTAVAIL; - } - + dev_addr->dev_type = dev->type; memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN); memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); if (dst_dev_addr) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 699ad12b3a2f..b305b5c17f8d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -330,11 +330,11 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) union ib_gid gid; int ret = -ENODEV; - switch (rdma_node_get_transport(dev_addr->dev_type)) { - case RDMA_TRANSPORT_IB: + switch (dev_addr->dev_type) { + case ARPHRD_INFINIBAND: ib_addr_get_sgid(dev_addr, &gid); break; - case RDMA_TRANSPORT_IWARP: + case ARPHRD_ETHER: iw_addr_get_sgid(dev_addr, &gid); break; default: diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 27f17cc2c919..3a39c55d2b9a 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -36,6 +36,7 @@ #include #include +#include #include #include #include @@ -60,7 +61,7 @@ struct rdma_dev_addr { unsigned char src_dev_addr[MAX_ADDR_LEN]; unsigned char dst_dev_addr[MAX_ADDR_LEN]; unsigned char broadcast[MAX_ADDR_LEN]; - enum rdma_node_type dev_type; + unsigned short dev_type; int bound_dev_if; }; -- cgit v1.2.3 From 6f8372b69c3198e06cecb1df2cb9682d0c55e657 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Thu, 19 Nov 2009 13:26:06 -0800 Subject: RDMA/cm: fix loopback address support The RDMA CM is intended to support the use of a loopback address when establishing a connection; however, the behavior of the CM when loopback addresses are used is confusing and does not always work, depending on whether loopback was specified by the server, the client, or both. The defined behavior of rdma_bind_addr is to associate an RDMA device with an rdma_cm_id, as long as the user specified a non- zero address. (ie they weren't just trying to reserve a port) Currently, if the loopback address is passed to rdam_bind_addr, no device is associated with the rdma_cm_id. Fix this. If a loopback address is specified by the client as the destination address for a connection, it will fail to establish a connection. This is true even if the server is listing across all addresses or on the loopback address itself. The issue is that the server tries to translate the IP address carried in the REQ message to a local net_device address, which fails. The translation is not needed in this case, since the REQ carries the actual HW address that should be used. Finally, cleanup loopback support to be more transport neutral. Replace separate calls to get/set the sgid and dgid from the device address to a single call that behaves correctly depending on the format of the device address. And support both IPv4 and IPv6 address formats. Signed-off-by: Sean Hefty [ Fixed RDS build by s/ib_addr_get/rdma_addr_get/ - Roland ] Signed-off-by: Roland Dreier --- drivers/infiniband/core/cma.c | 77 +++++++++++++++++++++++------------------- drivers/infiniband/core/ucma.c | 8 ++--- include/rdma/ib_addr.h | 31 ++++++----------- net/rds/ib.c | 4 +-- net/rds/iw.c | 4 +-- 5 files changed, 61 insertions(+), 63 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b305b5c17f8d..38867a46d39e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -330,17 +330,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) union ib_gid gid; int ret = -ENODEV; - switch (dev_addr->dev_type) { - case ARPHRD_INFINIBAND: - ib_addr_get_sgid(dev_addr, &gid); - break; - case ARPHRD_ETHER: - iw_addr_get_sgid(dev_addr, &gid); - break; - default: - return -ENODEV; - } - + rdma_addr_get_sgid(dev_addr, &gid); list_for_each_entry(cma_dev, &dev_list, list) { ret = ib_find_cached_gid(cma_dev->device, &gid, &id_priv->id.port_num, NULL); @@ -1032,11 +1022,17 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, if (rt->num_paths == 2) rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; - ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); - ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, - &id->route.addr.dev_addr); - if (ret) - goto destroy_id; + if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { + rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; + rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); + ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey); + } else { + ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, + &rt->addr.dev_addr); + if (ret) + goto destroy_id; + } + rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); id_priv = container_of(id, struct rdma_id_private, id); id_priv->state = CMA_CONNECT; @@ -1071,10 +1067,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, cma_save_net_info(&id->route.addr, &listen_id->route.addr, ip_ver, port, src, dst); - ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, - &id->route.addr.dev_addr); - if (ret) - goto err; + if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { + ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, + &id->route.addr.dev_addr); + if (ret) + goto err; + } id_priv = container_of(id, struct rdma_id_private, id); id_priv->state = CMA_CONNECT; @@ -1565,8 +1563,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, struct sockaddr_in6 *sin6; memset(&path_rec, 0, sizeof path_rec); - ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); - ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); + rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); + rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; @@ -1781,7 +1779,11 @@ port_found: if (ret) goto out; - ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); + id_priv->id.route.addr.dev_addr.dev_type = + (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ? + ARPHRD_INFINIBAND : ARPHRD_ETHER; + + rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); id_priv->id.port_num = p; cma_attach_to_dev(id_priv, cma_dev); @@ -1839,7 +1841,7 @@ out: static int cma_resolve_loopback(struct rdma_id_private *id_priv) { struct cma_work *work; - struct sockaddr_in *src_in, *dst_in; + struct sockaddr *src, *dst; union ib_gid gid; int ret; @@ -1853,14 +1855,19 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) goto err; } - ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); - ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); + rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); + rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); - if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) { - src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; - dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; - src_in->sin_family = dst_in->sin_family; - src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr; + src = (struct sockaddr *) &id_priv->id.route.addr.src_addr; + if (cma_zero_addr(src)) { + dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; + if ((src->sa_family = dst->sa_family) == AF_INET) { + ((struct sockaddr_in *) src)->sin_addr.s_addr = + ((struct sockaddr_in *) dst)->sin_addr.s_addr; + } else { + ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, + &((struct sockaddr_in6 *) dst)->sin6_addr); + } } work->id = id_priv; @@ -2089,7 +2096,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) return -EINVAL; - if (!cma_any_addr(addr)) { + if (cma_loopback_addr(addr)) { + ret = cma_bind_loopback(id_priv); + } else if (!cma_zero_addr(addr)) { ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); if (ret) goto err1; @@ -2108,7 +2117,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) return 0; err2: - if (!cma_any_addr(addr)) { + if (id_priv->cma_dev) { mutex_lock(&lock); cma_detach_from_dev(id_priv); mutex_unlock(&lock); @@ -2721,7 +2730,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); if (id_priv->id.ps == RDMA_PS_UDP) rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); - ib_addr_get_sgid(dev_addr, &rec.port_gid); + rdma_addr_get_sgid(dev_addr, &rec.port_gid); rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); rec.join_state = 1; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f1cbd26a9de0..b2e16c332d5b 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -563,10 +563,10 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, switch (route->num_paths) { case 0: dev_addr = &route->addr.dev_addr; - ib_addr_get_dgid(dev_addr, - (union ib_gid *) &resp->ib_route[0].dgid); - ib_addr_get_sgid(dev_addr, - (union ib_gid *) &resp->ib_route[0].sgid); + rdma_addr_get_dgid(dev_addr, + (union ib_gid *) &resp->ib_route[0].dgid); + rdma_addr_get_sgid(dev_addr, + (union ib_gid *) &resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); break; case 2: diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 3a39c55d2b9a..fa0d52b8e622 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -122,40 +122,29 @@ static inline void ib_addr_get_mgid(struct rdma_dev_addr *dev_addr, memcpy(gid, dev_addr->broadcast + 4, sizeof *gid); } -static inline void ib_addr_get_sgid(struct rdma_dev_addr *dev_addr, - union ib_gid *gid) +static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr) { - memcpy(gid, dev_addr->src_dev_addr + 4, sizeof *gid); + return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0; } -static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr, - union ib_gid *gid) +static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { - memcpy(dev_addr->src_dev_addr + 4, gid, sizeof *gid); + memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid); } -static inline void ib_addr_get_dgid(struct rdma_dev_addr *dev_addr, - union ib_gid *gid) +static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { - memcpy(gid, dev_addr->dst_dev_addr + 4, sizeof *gid); + memcpy(dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid); } -static inline void ib_addr_set_dgid(struct rdma_dev_addr *dev_addr, - union ib_gid *gid) +static inline void rdma_addr_get_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { - memcpy(dev_addr->dst_dev_addr + 4, gid, sizeof *gid); + memcpy(gid, dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid); } -static inline void iw_addr_get_sgid(struct rdma_dev_addr *dev_addr, - union ib_gid *gid) -{ - memcpy(gid, dev_addr->src_dev_addr, sizeof *gid); -} - -static inline void iw_addr_get_dgid(struct rdma_dev_addr *dev_addr, - union ib_gid *gid) +static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { - memcpy(gid, dev_addr->dst_dev_addr, sizeof *gid); + memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid); } #endif /* IB_ADDR_H */ diff --git a/net/rds/ib.c b/net/rds/ib.c index 536ebe5d3f6b..3b8992361042 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, ic = conn->c_transport_data; dev_addr = &ic->i_cm_id->route.addr.dev_addr; - ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); - ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); + rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); + rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); iinfo->max_send_wr = ic->i_send_ring.w_nr; diff --git a/net/rds/iw.c b/net/rds/iw.c index db224f7c2937..b28fa8525b24 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c @@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn, ic = conn->c_transport_data; dev_addr = &ic->i_cm_id->route.addr.dev_addr; - ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); - ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); + rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); + rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); iinfo->max_send_wr = ic->i_send_ring.w_nr; -- cgit v1.2.3 From 6600b9dd8e0d4a60c610f216b78d992a598bc52a Mon Sep 17 00:00:00 2001 From: Jiro SEKIBA Date: Mon, 9 Nov 2009 19:10:11 +0900 Subject: nilfs2: move definition of struct nilfs_btree_node This is a trivial patch to expose struct nilfs_fs_btree_node. The struct should be exposed outside of kernel, for it is disk format. Signed-off-by: Jiro SEKIBA Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.h | 22 ---------------------- include/linux/nilfs2_fs.h | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index 0e72bbbc6b64..4b82d84ade75 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h @@ -33,28 +33,6 @@ struct nilfs_btree; struct nilfs_btree_path; -/** - * struct nilfs_btree_node - B-tree node - * @bn_flags: flags - * @bn_level: level - * @bn_nchildren: number of children - * @bn_pad: padding - */ -struct nilfs_btree_node { - __u8 bn_flags; - __u8 bn_level; - __le16 bn_nchildren; - __le32 bn_pad; -}; - -/* flags */ -#define NILFS_BTREE_NODE_ROOT 0x01 - -/* level */ -#define NILFS_BTREE_LEVEL_DATA 0 -#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) -#define NILFS_BTREE_LEVEL_MAX 14 - /** * struct nilfs_btree - B-tree structure * @bt_bmap: bmap base structure diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index ce520402e840..72289d2bb341 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -402,6 +402,28 @@ struct nilfs_segment_summary { #define NILFS_SS_SYNDT 0x0008 /* includes data only updates */ #define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ +/** + * struct nilfs_btree_node - B-tree node + * @bn_flags: flags + * @bn_level: level + * @bn_nchildren: number of children + * @bn_pad: padding + */ +struct nilfs_btree_node { + __u8 bn_flags; + __u8 bn_level; + __le16 bn_nchildren; + __le32 bn_pad; +}; + +/* flags */ +#define NILFS_BTREE_NODE_ROOT 0x01 + +/* level */ +#define NILFS_BTREE_LEVEL_DATA 0 +#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) +#define NILFS_BTREE_LEVEL_MAX 14 + /** * struct nilfs_palloc_group_desc - block group descriptor * @pg_nfrees: number of free entries in block group -- cgit v1.2.3 From 0234576d041b9b2cc7043691ea61d2c2ca597aaa Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 20 Nov 2009 03:28:01 +0900 Subject: nilfs2: add norecovery mount option This adds "norecovery" mount option which disables temporal write access to read-only mounts or snapshots during mount/recovery. Without this option, write access will be even performed for those types of mounts; the temporal write access is needed to mount root file system read-only after an unclean shutdown. This option will be helpful when user wants to prevent any write access to the device. Signed-off-by: Ryusuke Konishi Cc: Eric Sandeen --- Documentation/filesystems/nilfs2.txt | 4 ++++ fs/nilfs2/super.c | 16 +++++++++++++++- fs/nilfs2/the_nilfs.c | 20 ++++++++++++++++++-- include/linux/nilfs2_fs.h | 2 ++ 4 files changed, 39 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index cbd877978c80..4949fcaa6b6a 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt @@ -70,6 +70,10 @@ order=strict Apply strict in-order semantics that preserves sequence blocks. That means, it is guaranteed that no overtaking of events occurs in the recovered file system after a crash. +norecovery Disable recovery of the filesystem on mount. + This disables every write access on the device for + read-only mounts or snapshots. This option will fail + for r/w mounts on an unclean volume. NILFS2 usage ============ diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 990ead43a833..5403b3ef3a42 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -479,6 +479,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_printf(seq, ",errors=panic"); if (nilfs_test_opt(sbi, STRICT_ORDER)) seq_printf(seq, ",order=strict"); + if (nilfs_test_opt(sbi, NORECOVERY)) + seq_printf(seq, ",norecovery"); return 0; } @@ -547,7 +549,7 @@ static const struct export_operations nilfs_export_ops = { enum { Opt_err_cont, Opt_err_panic, Opt_err_ro, - Opt_nobarrier, Opt_snapshot, Opt_order, + Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, Opt_err, }; @@ -558,6 +560,7 @@ static match_table_t tokens = { {Opt_nobarrier, "nobarrier"}, {Opt_snapshot, "cp=%u"}, {Opt_order, "order=%s"}, + {Opt_norecovery, "norecovery"}, {Opt_err, NULL} }; @@ -608,6 +611,9 @@ static int parse_options(char *options, struct super_block *sb) sbi->s_snapshot_cno = option; nilfs_set_opt(sbi, SNAPSHOT); break; + case Opt_norecovery: + nilfs_set_opt(sbi, NORECOVERY); + break; default: printk(KERN_ERR "NILFS: Unrecognized mount option \"%s\"\n", p); @@ -863,6 +869,14 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } + if (!nilfs_valid_fs(nilfs)) { + printk(KERN_WARNING "NILFS (device %s): couldn't " + "remount because the filesystem is in an " + "incomplete recovery state.\n", sb->s_id); + err = -EINVAL; + goto restore_opts; + } + if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) goto out; if (*flags & MS_RDONLY) { diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 890a8d3886cf..6241e1722efc 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -264,8 +264,14 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) int valid_fs = nilfs_valid_fs(nilfs); int err; - if (nilfs_loaded(nilfs)) - return 0; + if (nilfs_loaded(nilfs)) { + if (valid_fs || + ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY))) + return 0; + printk(KERN_ERR "NILFS: the filesystem is in an incomplete " + "recovery state.\n"); + return -EINVAL; + } if (!valid_fs) { printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n"); @@ -295,6 +301,11 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) goto skip_recovery; if (s_flags & MS_RDONLY) { + if (nilfs_test_opt(sbi, NORECOVERY)) { + printk(KERN_INFO "NILFS: norecovery option specified. " + "skipping roll-forward recovery\n"); + goto skip_recovery; + } if (really_read_only) { printk(KERN_ERR "NILFS: write access " "unavailable, cannot proceed.\n"); @@ -302,6 +313,11 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) goto failed_unload; } sbi->s_super->s_flags &= ~MS_RDONLY; + } else if (nilfs_test_opt(sbi, NORECOVERY)) { + printk(KERN_ERR "NILFS: recovery cancelled because norecovery " + "option was specified for a read/write mount\n"); + err = -EINVAL; + goto failed_unload; } err = nilfs_recover_logical_segments(nilfs, sbi, &ri); diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 72289d2bb341..3fe02cf8b65a 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -151,6 +151,8 @@ struct nilfs_super_root { #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ #define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order semantics also for data */ +#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during + mount-time recovery */ /** -- cgit v1.2.3 From a3f62bd2b20c769ddc989b242ddd274179e19ee6 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Sun, 18 Oct 2009 13:55:55 -0500 Subject: powerpc/fsl: Add PCI device ids for new QoirQ chips Signed-off-by: Kumar Gala --- arch/powerpc/sysdev/fsl_pci.c | 14 ++++++++++++++ include/linux/pci_ids.h | 14 ++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'include') diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index cf57a42c72af..4e3a3e345ab3 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -392,8 +392,22 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9ceb392cb984..f988a0d68636 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2288,6 +2288,20 @@ #define PCI_DEVICE_ID_MPC8536 0x0051 #define PCI_DEVICE_ID_P2020E 0x0070 #define PCI_DEVICE_ID_P2020 0x0071 +#define PCI_DEVICE_ID_P2010E 0x0078 +#define PCI_DEVICE_ID_P2010 0x0079 +#define PCI_DEVICE_ID_P1020E 0x0100 +#define PCI_DEVICE_ID_P1020 0x0101 +#define PCI_DEVICE_ID_P1011E 0x0108 +#define PCI_DEVICE_ID_P1011 0x0109 +#define PCI_DEVICE_ID_P1022E 0x0110 +#define PCI_DEVICE_ID_P1022 0x0111 +#define PCI_DEVICE_ID_P1013E 0x0118 +#define PCI_DEVICE_ID_P1013 0x0119 +#define PCI_DEVICE_ID_P4080E 0x0400 +#define PCI_DEVICE_ID_P4080 0x0401 +#define PCI_DEVICE_ID_P4040E 0x0408 +#define PCI_DEVICE_ID_P4040 0x0409 #define PCI_DEVICE_ID_MPC8641 0x7010 #define PCI_DEVICE_ID_MPC8641D 0x7011 #define PCI_DEVICE_ID_MPC8610 0x7018 -- cgit v1.2.3 From 78c210efdefe07131f91ed512a3308b15bb14e2f Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 6 Aug 2009 15:41:34 -0400 Subject: Revert "knfsd: avoid overloading the CPU scheduler with enormous load averages" This reverts commit 59a252ff8c0f2fa32c896f69d56ae33e641ce7ad. This helps in an entirely cached workload but not necessarily in workloads that require waiting on disk. Conflicts: include/linux/sunrpc/svc.h net/sunrpc/svc_xprt.c Reported-by: Simon Kirby Tested-by: Jesper Krogh Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc.h | 3 --- net/sunrpc/svc_xprt.c | 31 +++++++++---------------------- 2 files changed, 9 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 52e8cb0a7569..d1567d627557 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -29,7 +29,6 @@ struct svc_pool_stats { unsigned long packets; unsigned long sockets_queued; unsigned long threads_woken; - unsigned long overloads_avoided; unsigned long threads_timedout; }; @@ -50,7 +49,6 @@ struct svc_pool { struct list_head sp_sockets; /* pending sockets */ unsigned int sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ - int sp_nwaking; /* number of threads woken but not yet active */ struct svc_pool_stats sp_stats; /* statistics on pool operation */ } ____cacheline_aligned_in_smp; @@ -284,7 +282,6 @@ struct svc_rqst { * cache pages */ wait_queue_head_t rq_wait; /* synchronization */ struct task_struct *rq_task; /* service thread */ - int rq_waking; /* 1 if thread is being woken */ }; /* diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index df124f78ee48..2c58b75a236f 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -16,8 +16,6 @@ #define RPCDBG_FACILITY RPCDBG_SVCXPRT -#define SVC_MAX_WAKING 5 - static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); static int svc_deferred_recv(struct svc_rqst *rqstp); static struct cache_deferred_req *svc_defer(struct cache_req *req); @@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) struct svc_pool *pool; struct svc_rqst *rqstp; int cpu; - int thread_avail; if (!(xprt->xpt_flags & ((1<sp_lock); + if (!list_empty(&pool->sp_threads) && + !list_empty(&pool->sp_sockets)) + printk(KERN_ERR + "svc_xprt_enqueue: " + "threads and transports both waiting??\n"); + if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { /* Don't enqueue dead transports */ dprintk("svc: transport %p is dead, not enqueued\n", xprt); @@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) } process: - /* Work out whether threads are available */ - thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */ - if (pool->sp_nwaking >= SVC_MAX_WAKING) { - /* too many threads are runnable and trying to wake up */ - thread_avail = 0; - pool->sp_stats.overloads_avoided++; - } - - if (thread_avail) { + if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, rq_list); @@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) svc_xprt_get(xprt); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); - rqstp->rq_waking = 1; - pool->sp_nwaking++; pool->sp_stats.threads_woken++; BUG_ON(xprt->xpt_pool != pool); wake_up(&rqstp->rq_wait); @@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) return -EINTR; spin_lock_bh(&pool->sp_lock); - if (rqstp->rq_waking) { - rqstp->rq_waking = 0; - pool->sp_nwaking--; - BUG_ON(pool->sp_nwaking < 0); - } xprt = svc_xprt_dequeue(pool); if (xprt) { rqstp->rq_xprt = xprt; @@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) struct svc_pool *pool = p; if (p == SEQ_START_TOKEN) { - seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); + seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); return 0; } - seq_printf(m, "%u %lu %lu %lu %lu %lu\n", + seq_printf(m, "%u %lu %lu %lu %lu\n", pool->sp_id, pool->sp_stats.packets, pool->sp_stats.sockets_queued, pool->sp_stats.threads_woken, - pool->sp_stats.overloads_avoided, pool->sp_stats.threads_timedout); return 0; -- cgit v1.2.3 From 7d6709a20866a885916214590b7c394a21be9e25 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 24 Nov 2009 16:06:48 +1100 Subject: powerpc: Fix build of some FSL platforms Commit 87ec0e98cfdd8b68da6a7f9e70142ffc0e404fbb in kumar's next branch broke one of my test configs since it looks like Anton forgot about that mpc832x_rdb platform which still uses the old style probing for the SPI stuff. I'll let them do a cleaner fix that probably involves changing the probing method and getting rid of the platform device but for now this will do to fix it. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/83xx/mpc832x_rdb.c | 2 +- drivers/spi/spi_mpc8xxx.c | 5 ----- include/linux/fsl_devices.h | 5 +++++ 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index 567ded7c3b9b..17f99745f0e4 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -74,7 +74,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) - pdata.qe_mode = 1; + pdata.flags = SPI_QE_CPU_MODE; for (j = 0; j < num_board_infos; j++) { if (board_infos[j].bus_num == pdata.bus_num) diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index 394b6581e17f..930135dc73ba 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c @@ -163,11 +163,6 @@ struct mpc8xxx_spi { u32 tx_shift; /* TX data reg shift when in qe mode */ unsigned int flags; -#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */ -#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */ -#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */ -#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */ -#define SPI_QE (1 << 4) /* SPI unit is in QE block */ struct workqueue_struct *workqueue; struct work_struct work; diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 47188d512b8f..28e33fea5107 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -75,6 +75,11 @@ struct fsl_spi_platform_data { u32 initial_spmode; /* initial SPMODE value */ s16 bus_num; unsigned int flags; +#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */ +#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */ +#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */ +#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */ +#define SPI_QE (1 << 4) /* SPI unit is in QE block */ /* board specific information */ u16 max_chipselect; void (*cs_control)(struct spi_device *spi, bool on); -- cgit v1.2.3 From e2f74f355e9e2914483db10c05d70e69e0b7ae04 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Thu, 19 Nov 2009 12:31:01 +0100 Subject: [ACPI/CPUFREQ] Introduce bios_limit per cpu cpufreq sysfs interface This interface is mainly intended (and implemented) for ACPI _PPC BIOS frequency limitations, but other cpufreq drivers can also use it for similar use-cases. Why is this needed: Currently it's not obvious why cpufreq got limited. People see cpufreq/scaling_max_freq reduced, but this could have happened by: - any userspace prog writing to scaling_max_freq - thermal limitations - hardware (_PPC in ACPI case) limitiations Therefore export bios_limit (in kHz) to: - Point the user that it's the BIOS (broken or intended) which limits frequency - Export it as a sysfs interface for userspace progs. While this was a rarely used feature on laptops, there will appear more and more server implemenations providing "Green IT" features like allowing the service processor to limit the frequency. People want to know about HW/BIOS frequency limitations. All ACPI P-state driven cpufreq drivers are covered with this patch: - powernow-k8 - powernow-k7 - acpi-cpufreq Tested with a patched DSDT which limits the first two cores (_PPC returns 1) via _PPC, exposed by bios_limit: # echo 2200000 >cpu2/cpufreq/scaling_max_freq # cat cpu*/cpufreq/scaling_max_freq 2600000 2600000 2200000 2200000 # #scaling_max_freq shows general user/thermal/BIOS limitations # cat cpu*/cpufreq/bios_limit 2600000 2600000 2800000 2800000 # #bios_limit only shows the HW/BIOS limitation CC: Pallipadi Venkatesh CC: Len Brown CC: davej@codemonkey.org.uk CC: linux@dominikbrodowski.net Signed-off-by: Thomas Renninger Signed-off-by: Dave Jones --- Documentation/cpu-freq/user-guide.txt | 11 +++++++++++ arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 17 +++++++++-------- arch/x86/kernel/cpu/cpufreq/powernow-k7.c | 19 +++++++++++-------- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 17 +++++++++-------- drivers/acpi/processor_perflib.c | 13 +++++++++++++ drivers/cpufreq/cpufreq.c | 21 +++++++++++++++++++++ include/acpi/processor.h | 6 ++++++ include/linux/cpufreq.h | 1 + 8 files changed, 81 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt index 2a5b850847c0..04f6b32993e6 100644 --- a/Documentation/cpu-freq/user-guide.txt +++ b/Documentation/cpu-freq/user-guide.txt @@ -203,6 +203,17 @@ scaling_cur_freq : Current frequency of the CPU as determined by the frequency the kernel thinks the CPU runs at. +bios_limit : If the BIOS tells the OS to limit a CPU to + lower frequencies, the user can read out the + maximum available frequency from this file. + This typically can happen through (often not + intended) BIOS settings, restrictions + triggered through a service processor or other + BIOS/HW based implementations. + This does not cover thermal ACPI limitations + which can be detected through the generic + thermal driver. + If you have selected the "userspace" governor which allows you to set the CPU operating frequency to a specific value, you can read out the current frequency in diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8b581d3905cb..d2e7c77c1ea4 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -764,14 +764,15 @@ static struct freq_attr *acpi_cpufreq_attr[] = { }; static struct cpufreq_driver acpi_cpufreq_driver = { - .verify = acpi_cpufreq_verify, - .target = acpi_cpufreq_target, - .init = acpi_cpufreq_cpu_init, - .exit = acpi_cpufreq_cpu_exit, - .resume = acpi_cpufreq_resume, - .name = "acpi-cpufreq", - .owner = THIS_MODULE, - .attr = acpi_cpufreq_attr, + .verify = acpi_cpufreq_verify, + .target = acpi_cpufreq_target, + .bios_limit = acpi_processor_get_bios_limit, + .init = acpi_cpufreq_cpu_init, + .exit = acpi_cpufreq_cpu_exit, + .resume = acpi_cpufreq_resume, + .name = "acpi-cpufreq", + .owner = THIS_MODULE, + .attr = acpi_cpufreq_attr, }; static int __init acpi_cpufreq_init(void) diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index d47c775eb0ab..9a97116f89e5 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -714,14 +714,17 @@ static struct freq_attr *powernow_table_attr[] = { }; static struct cpufreq_driver powernow_driver = { - .verify = powernow_verify, - .target = powernow_target, - .get = powernow_get, - .init = powernow_cpu_init, - .exit = powernow_cpu_exit, - .name = "powernow-k7", - .owner = THIS_MODULE, - .attr = powernow_table_attr, + .verify = powernow_verify, + .target = powernow_target, + .get = powernow_get, +#ifdef CONFIG_X86_POWERNOW_K7_ACPI + .bios_limit = acpi_processor_get_bios_limit, +#endif + .init = powernow_cpu_init, + .exit = powernow_cpu_exit, + .name = "powernow-k7", + .owner = THIS_MODULE, + .attr = powernow_table_attr, }; static int __init powernow_init(void) diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index f30d25383940..a9df9441a9a2 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1398,14 +1398,15 @@ static struct freq_attr *powernow_k8_attr[] = { }; static struct cpufreq_driver cpufreq_amd64_driver = { - .verify = powernowk8_verify, - .target = powernowk8_target, - .init = powernowk8_cpu_init, - .exit = __devexit_p(powernowk8_cpu_exit), - .get = powernowk8_get, - .name = "powernow-k8", - .owner = THIS_MODULE, - .attr = powernow_k8_attr, + .verify = powernowk8_verify, + .target = powernowk8_target, + .bios_limit = acpi_processor_get_bios_limit, + .init = powernowk8_cpu_init, + .exit = __devexit_p(powernowk8_cpu_exit), + .get = powernowk8_get, + .name = "powernow-k8", + .owner = THIS_MODULE, + .attr = powernow_k8_attr, }; /* driver entry point for init */ diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 8ba0ed0b9ddb..01e366d2b6fb 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -167,6 +167,19 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr) return cpufreq_update_policy(pr->id); } +int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) +{ + struct acpi_processor *pr; + + pr = per_cpu(processors, cpu); + if (!pr || !pr->performance || !pr->performance->state_count) + return -ENODEV; + *limit = pr->performance->states[pr->performance_platform_limit]. + core_frequency * 1000; + return 0; +} +EXPORT_SYMBOL(acpi_processor_get_bios_limit); + void acpi_processor_ppc_init(void) { if (!cpufreq_register_notifier diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 5b9b1c8c4950..f20668c09ce0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -647,6 +647,21 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) return policy->governor->show_setspeed(policy, buf); } +/** + * show_scaling_driver - show the current cpufreq HW/BIOS limitation + */ +static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) +{ + unsigned int limit; + int ret; + if (cpufreq_driver->bios_limit) { + ret = cpufreq_driver->bios_limit(policy->cpu, &limit); + if (!ret) + return sprintf(buf, "%u\n", limit); + } + return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); +} + #define define_one_ro(_name) \ static struct freq_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) @@ -666,6 +681,7 @@ define_one_ro(cpuinfo_transition_latency); define_one_ro(scaling_available_governors); define_one_ro(scaling_driver); define_one_ro(scaling_cur_freq); +define_one_ro(bios_limit); define_one_ro(related_cpus); define_one_ro(affected_cpus); define_one_rw(scaling_min_freq); @@ -905,6 +921,11 @@ static int cpufreq_add_dev_interface(unsigned int cpu, if (ret) goto err_out_kobj_put; } + if (cpufreq_driver->bios_limit) { + ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); + if (ret) + goto err_out_kobj_put; + } spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) { diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 740ac3ad8fd0..8b668ead6d6e 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -295,6 +295,7 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx void acpi_processor_ppc_init(void); void acpi_processor_ppc_exit(void); int acpi_processor_ppc_has_changed(struct acpi_processor *pr); +extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); #else static inline void acpi_processor_ppc_init(void) { @@ -316,6 +317,11 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr) } return 0; } +static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) +{ + return -ENODEV; +} + #endif /* CONFIG_CPU_FREQ */ /* in processor_throttling.c */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 79a2340d83cd..4de02b10007f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -232,6 +232,7 @@ struct cpufreq_driver { /* optional */ unsigned int (*getavg) (struct cpufreq_policy *policy, unsigned int cpu); + int (*bios_limit) (int cpu, unsigned int *limit); int (*exit) (struct cpufreq_policy *policy); int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); -- cgit v1.2.3 From d7b7e60526d54da4c94afe5f137714cee7d05c41 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 11 Nov 2009 14:29:54 +0900 Subject: PCI: introduce pci_pcie_cap() Introduce pci_pcie_cap() API that returns saved PCIe capability offset (currently it is saved in 'pcie_cap' field in the struct PCI dev). Using pci_pcie_cap() instead of pci_find_capability() avoids unnecessary search in PCI configuration space. Signed-off-by: Kenji Kaneshige Signed-off-by: Jesse Barnes --- include/linux/pci.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include') diff --git a/include/linux/pci.h b/include/linux/pci.h index 233b3a092035..15f37f102dd3 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1301,5 +1301,21 @@ extern void pci_hp_create_module_link(struct pci_slot *pci_slot); extern void pci_hp_remove_module_link(struct pci_slot *pci_slot); #endif +/** + * pci_pcie_cap - get the saved PCIe capability offset + * @dev: PCI device + * + * PCIe capability offset is calculated at PCI device initialization + * time and saved in the data structure. This function returns saved + * PCIe capability offset. Using this instead of pci_find_capability() + * reduces unnecessary search in the PCI configuration space. If you + * need to calculate PCIe capability offset from raw device for some + * reasons, please use pci_find_capability() instead. + */ +static inline int pci_pcie_cap(struct pci_dev *dev) +{ + return dev->pcie_cap; +} + #endif /* __KERNEL__ */ #endif /* LINUX_PCI_H */ -- cgit v1.2.3 From 7eb776c42e75d17bd8107a1359068d8c742639d1 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 11 Nov 2009 14:35:22 +0900 Subject: PCI: introduce pci_is_pcie() Introduce pci_is_pcie() which returns true if the specified PCI device is PCI Express capable, false otherwise. The purpose of pci_is_pcie() is removing 'is_pcie' flag in the struct pci_dev, which is not needed because we can check it using 'pcie_cap' field. To remove 'is_pcie', we need to update user of 'is_pcie' to use pci_is_pcie() instead first. Signed-off-by: Kenji Kaneshige Signed-off-by: Jesse Barnes --- include/linux/pci.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/pci.h b/include/linux/pci.h index 15f37f102dd3..2891c3d3e51a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1317,5 +1317,16 @@ static inline int pci_pcie_cap(struct pci_dev *dev) return dev->pcie_cap; } +/** + * pci_is_pcie - check if the PCI device is PCI Express capable + * @dev: PCI device + * + * Retrun true if the PCI device is PCI Express capable, false otherwise. + */ +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} + #endif /* __KERNEL__ */ #endif /* LINUX_PCI_H */ -- cgit v1.2.3 From 3b034b0d084221596bf35c8d893e1d4d5477b9cc Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Tue, 24 Nov 2009 15:50:03 +0900 Subject: percpu: Fix kdump failure if booted with percpu_alloc=page o kdump functionality reserves a per cpu area at boot time and exports the physical address of that area to user space through sys interface. This area stores some dump related information like cpu register states etc at the time of crash. o We were assuming that per cpu area always come from linearly mapped meory region and using __pa() to determine physical address. With percpu_alloc=page, per cpu area can come from vmalloc region also and __pa() breaks. o This patch implments a new function to convert per cpu address to physical address. Before the patch, crash_notes addresses looked as follows. cpu0 60fffff49800 cpu1 60fffff60800 cpu2 60fffff77800 These are bogus phsyical addresses. After the patch, address are following. cpu0 13eb44000 cpu1 13eb43000 cpu2 13eb42000 cpu3 13eb41000 These look fine. I got 4G of memory and /proc/iomem tell me following. 100000000-13fffffff : System RAM tj: * added missing asm/io.h include reported by Stephen Rothwell * repositioned per_cpu_ptr_phys() in percpu.c and added comment. Signed-off-by: Vivek Goyal Signed-off-by: Tejun Heo Cc: Stephen Rothwell --- drivers/base/cpu.c | 2 +- include/linux/percpu.h | 1 + mm/percpu.c | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e62a4ccea54d..69ee5b7517ec 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -97,7 +97,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute * boot up and this data does not change there after. Hence this * operation should be safe. No locking required. */ - addr = __pa(per_cpu_ptr(crash_notes, cpunum)); + addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); rc = sprintf(buf, "%Lx\n", addr); return rc; } diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 878836ca999c..6ac984fa34f8 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -154,6 +154,7 @@ struct percpu_data { extern void *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void *__pdata); +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void __init setup_per_cpu_areas(void); diff --git a/mm/percpu.c b/mm/percpu.c index 5adfc268b408..008fbd9e6fa4 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -74,6 +74,7 @@ #include #include #include +#include #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ @@ -1302,6 +1303,27 @@ void free_percpu(void *ptr) } EXPORT_SYMBOL_GPL(free_percpu); +/** + * per_cpu_ptr_to_phys - convert translated percpu address to physical address + * @addr: the address to be converted to physical address + * + * Given @addr which is dereferenceable address obtained via one of + * percpu access macros, this function translates it into its physical + * address. The caller is responsible for ensuring @addr stays valid + * until this function finishes. + * + * RETURNS: + * The physical address for @addr. + */ +phys_addr_t per_cpu_ptr_to_phys(void *addr) +{ + if ((unsigned long)addr < VMALLOC_START || + (unsigned long)addr >= VMALLOC_END) + return __pa(addr); + else + return page_to_phys(vmalloc_to_page(addr)); +} + static inline size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size, ssize_t *dyn_sizep) -- cgit v1.2.3 From c8e0f93a381d1d76135e567f13a4418fce66fd95 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Sun, 22 Nov 2009 03:49:37 +0100 Subject: drm/i915: Replace a calloc followed by copying data over it with malloc. Execbufs involve quite a bit of payload, to the extent that cache misses show up in the profiles here, and a suspicion that some of those cachelines may get evicted and then reloaded in the subsequent copy. This is still abstracted like drm_calloc_large since we want to check for size overflow, and because we want to choose between kmalloc and vmalloc on the fly. cairo's interface for malloc-with-calloc's-args was used as the model. Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- include/drm/drmP.h | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2065b8f7e875..481c0ab888c8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3563,8 +3563,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } /* Copy in the exec list from userland */ - exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); - object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); + exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); + object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); if (exec_list == NULL || object_list == NULL) { DRM_ERROR("Failed to allocate exec or object list " "for %d buffers\n", diff --git a/include/drm/drmP.h b/include/drm/drmP.h index b0b36838ab11..1b807d0f6cdb 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1545,14 +1545,27 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map) static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) { + if (size != 0 && nmemb > ULONG_MAX / size) + return NULL; + if (size * nmemb <= PAGE_SIZE) return kcalloc(nmemb, size, GFP_KERNEL); + return __vmalloc(size * nmemb, + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); +} + +/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ +static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) +{ if (size != 0 && nmemb > ULONG_MAX / size) return NULL; + if (size * nmemb <= PAGE_SIZE) + return kmalloc(nmemb * size, GFP_KERNEL); + return __vmalloc(size * nmemb, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } static __inline void drm_free_large(void *ptr) -- cgit v1.2.3 From 3cf602532c535ec655725e9833378e04c9fd7783 Mon Sep 17 00:00:00 2001 From: Amul Kumar Saha Date: Wed, 21 Oct 2009 17:00:05 +0530 Subject: mtd: OneNAND OTP support rework What is OTP in OneNAND? The device includes, 1. one block-sized OTP (One Time Programmable) area and 2. user-controlled 1st block OTP(Block 0) that can be used to increase system security or to provide identification capabilities. What is done? In OneNAND, one block of the NAND Array is set aside as an OTP memory area, and 1st Block (Block 0) can be used as OTP area. This area, available to the user, can be configured and locked with secured user information. The OTP block can be read, programmed and locked using the same operations as any other NAND Flash Array memory block. After issuing an OTP-Lock, OTP block cannot be erased. OTP block is fully-guaranteed to be a good block. Why it is done? Locking the 1st Block OTP has the effect of a 'Write-protect' to guard against accidental re-programming of data stored in the 1st block and OTP Block. Which problem it solves? OTP support is provided in the existing implementation of OneNAND/Flex-OneNAND driver, but it is not working with OneNAND devices. Have observed the following in current OTP OneNAND Implmentation, 1. DataSheet specific sequence to lock the OTP Area is not followed. 2. Certain functions are quiet generic to cope with OTP specific activity. This patch re-implements OTP support for OneNAND device. How it is done? For all blocks, 8th word is available to the user. However, in case of OTP Block, 8th word of sector 0, page 0 is reserved as OTP Locking Bit area. Therefore, in case of OTP Block, user usage on this area is prohibited. Condition specific values are entered in the 8th word, sector0, page 0 of the OTP block during the process of issuing an OTP-Lock. The possible conditions are: 1. Only 1st Block Lock 2. Only OTP Block Lock 3. Lock both the 1st Block and the OTP Block What Other feature additions have been done in this patch? This patch adds feature for: 1. Only 1st Block Lock 2. Lock both the 1st Block and the OTP Blocks Re-implemented OTP support for OneNAND Added following features to OneNAND 1. Lock only 1st Block in OneNAND 2. Lock BOTH 1st Block and OTP Block in OneNAND [comments were slightly tweaked by Artem] Signed-off-by: Amul Kumar Saha Reviewed-by: Adrian Hunter Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/onenand/onenand_base.c | 292 +++++++++++++++++++++++++++++++++---- include/linux/mtd/onenand.h | 4 +- 2 files changed, 264 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 6e250f3a4a16..7bd6ad3ff30a 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -1,17 +1,19 @@ /* * linux/drivers/mtd/onenand/onenand_base.c * - * Copyright (C) 2005-2007 Samsung Electronics + * Copyright © 2005-2009 Samsung Electronics + * Copyright © 2007 Nokia Corporation + * * Kyungmin Park * * Credits: * Adrian Hunter : * auto-placement support, read-while load support, various fixes - * Copyright (C) Nokia Corporation, 2007 * * Vishak G , Rohit Hagargundgi * Flex-OneNAND support - * Copyright (C) Samsung Electronics, 2008 + * Amul Kumar Saha + * OTP support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -43,6 +45,18 @@ MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND" " : 0->Set boundary in unlocked status" " : 1->Set boundary in locked status"); +/* Default OneNAND/Flex-OneNAND OTP options*/ +static int otp; + +module_param(otp, int, 0400); +MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP" + "Syntax : otp=LOCK_TYPE" + "LOCK_TYPE : Keys issued, for specific OTP Lock type" + " : 0 -> Default (No Blocks Locked)" + " : 1 -> OTP Block lock" + " : 2 -> 1st Block lock" + " : 3 -> BOTH OTP Block and 1st Block lock"); + /** * onenand_oob_128 - oob info for Flex-Onenand with 4KB page * For now, we expose only 64 out of 80 ecc bytes @@ -2591,6 +2605,208 @@ static void onenand_unlock_all(struct mtd_info *mtd) #ifdef CONFIG_MTD_ONENAND_OTP +/** + * onenand_otp_command - Send OTP specific command to OneNAND device + * @param mtd MTD device structure + * @param cmd the command to be sent + * @param addr offset to read from or write to + * @param len number of bytes to read or write + */ +static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr, + size_t len) +{ + struct onenand_chip *this = mtd->priv; + int value, block, page; + + /* Address translation */ + switch (cmd) { + case ONENAND_CMD_OTP_ACCESS: + block = (int) (addr >> this->erase_shift); + page = -1; + break; + + default: + block = (int) (addr >> this->erase_shift); + page = (int) (addr >> this->page_shift); + + if (ONENAND_IS_2PLANE(this)) { + /* Make the even block number */ + block &= ~1; + /* Is it the odd plane? */ + if (addr & this->writesize) + block++; + page >>= 1; + } + page &= this->page_mask; + break; + } + + if (block != -1) { + /* Write 'DFS, FBA' of Flash */ + value = onenand_block_address(this, block); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS1); + } + + if (page != -1) { + /* Now we use page size operation */ + int sectors = 4, count = 4; + int dataram; + + switch (cmd) { + default: + if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG) + cmd = ONENAND_CMD_2X_PROG; + dataram = ONENAND_CURRENT_BUFFERRAM(this); + break; + } + + /* Write 'FPA, FSA' of Flash */ + value = onenand_page_address(page, sectors); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS8); + + /* Write 'BSA, BSC' of DataRAM */ + value = onenand_buffer_address(dataram, sectors, count); + this->write_word(value, this->base + ONENAND_REG_START_BUFFER); + } + + /* Interrupt clear */ + this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT); + + /* Write command */ + this->write_word(cmd, this->base + ONENAND_REG_COMMAND); + + return 0; +} + +/** + * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP + * @param mtd MTD device structure + * @param to offset to write to + * @param len number of bytes to write + * @param retlen pointer to variable to store the number of written bytes + * @param buf the data to write + * + * OneNAND write out-of-band only for OTP + */ +static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + struct onenand_chip *this = mtd->priv; + int column, ret = 0, oobsize; + int written = 0; + u_char *oobbuf; + size_t len = ops->ooblen; + const u_char *buf = ops->oobbuf; + int block, value, status; + + to += ops->ooboffs; + + /* Initialize retlen, in case of early exit */ + ops->oobretlen = 0; + + oobsize = mtd->oobsize; + + column = to & (mtd->oobsize - 1); + + oobbuf = this->oob_buf; + + /* Loop until all data write */ + while (written < len) { + int thislen = min_t(int, oobsize, len - written); + + cond_resched(); + + block = (int) (to >> this->erase_shift); + /* + * Write 'DFS, FBA' of Flash + * Add: F100h DQ=DFS, FBA + */ + + value = onenand_block_address(this, block); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS1); + + /* + * Select DataRAM for DDP + * Add: F101h DQ=DBS + */ + + value = onenand_bufferram_address(this, block); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS2); + ONENAND_SET_NEXT_BUFFERRAM(this); + + /* + * Enter OTP access mode + */ + this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); + this->wait(mtd, FL_OTPING); + + /* We send data to spare ram with oobsize + * to prevent byte access */ + memcpy(oobbuf + column, buf, thislen); + + /* + * Write Data into DataRAM + * Add: 8th Word + * in sector0/spare/page0 + * DQ=XXFCh + */ + this->write_bufferram(mtd, ONENAND_SPARERAM, + oobbuf, 0, mtd->oobsize); + + onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize); + onenand_update_bufferram(mtd, to, 0); + if (ONENAND_IS_2PLANE(this)) { + ONENAND_SET_BUFFERRAM1(this); + onenand_update_bufferram(mtd, to + this->writesize, 0); + } + + ret = this->wait(mtd, FL_WRITING); + if (ret) { + printk(KERN_ERR "%s: write failed %d\n", __func__, ret); + break; + } + + /* Exit OTP access mode */ + this->command(mtd, ONENAND_CMD_RESET, 0, 0); + this->wait(mtd, FL_RESETING); + + status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); + status &= 0x60; + + if (status == 0x60) { + printk(KERN_DEBUG "\nBLOCK\tSTATUS\n"); + printk(KERN_DEBUG "1st Block\tLOCKED\n"); + printk(KERN_DEBUG "OTP Block\tLOCKED\n"); + } else if (status == 0x20) { + printk(KERN_DEBUG "\nBLOCK\tSTATUS\n"); + printk(KERN_DEBUG "1st Block\tLOCKED\n"); + printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n"); + } else if (status == 0x40) { + printk(KERN_DEBUG "\nBLOCK\tSTATUS\n"); + printk(KERN_DEBUG "1st Block\tUN-LOCKED\n"); + printk(KERN_DEBUG "OTP Block\tLOCKED\n"); + } else { + printk(KERN_DEBUG "Reboot to check\n"); + } + + written += thislen; + if (written == len) + break; + + to += mtd->writesize; + buf += thislen; + column = 0; + } + + ops->oobretlen = written; + + return ret; +} + /* Internal OTP operation */ typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, size_t *retlen, u_char *buf); @@ -2693,11 +2909,11 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, struct mtd_oob_ops ops; int ret; - /* Enter OTP access mode */ - this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); - this->wait(mtd, FL_OTPING); - if (FLEXONENAND(this)) { + + /* Enter OTP access mode */ + this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); + this->wait(mtd, FL_OTPING); /* * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of * main area of page 49. @@ -2708,19 +2924,19 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, ops.oobbuf = NULL; ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops); *retlen = ops.retlen; + + /* Exit OTP access mode */ + this->command(mtd, ONENAND_CMD_RESET, 0, 0); + this->wait(mtd, FL_RESETING); } else { ops.mode = MTD_OOB_PLACE; ops.ooblen = len; ops.oobbuf = buf; ops.ooboffs = 0; - ret = onenand_write_oob_nolock(mtd, from, &ops); + ret = onenand_otp_write_oob_nolock(mtd, from, &ops); *retlen = ops.oobretlen; } - /* Exit OTP access mode */ - this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); - return ret; } @@ -2751,16 +2967,21 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, if (density < ONENAND_DEVICE_DENSITY_512Mb) otp_pages = 20; else - otp_pages = 10; + otp_pages = 50; if (mode == MTD_OTP_FACTORY) { from += mtd->writesize * otp_pages; - otp_pages = 64 - otp_pages; + otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages; } /* Check User/Factory boundary */ - if (((mtd->writesize * otp_pages) - (from + len)) < 0) - return 0; + if (mode == MTD_OTP_USER) { + if (((mtd->writesize * otp_pages) - (from + len)) < 0) + return 0; + } else { + if (((mtd->writesize * otp_pages) - len) < 0) + return 0; + } onenand_get_device(mtd, FL_OTPING); while (len > 0 && otp_pages > 0) { @@ -2783,13 +3004,12 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, *retlen += sizeof(struct otp_info); } else { size_t tmp_retlen; - int size = len; ret = action(mtd, from, len, &tmp_retlen, buf); - buf += size; - len -= size; - *retlen += size; + buf += tmp_retlen; + len -= tmp_retlen; + *retlen += tmp_retlen; if (ret) break; @@ -2902,20 +3122,10 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf; size_t retlen; int ret; + unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET; memset(buf, 0xff, FLEXONENAND(this) ? this->writesize : mtd->oobsize); - /* - * Note: OTP lock operation - * OTP block : 0xXXFC - * 1st block : 0xXXF3 (If chip support) - * Both : 0xXXF0 (If chip support) - */ - if (FLEXONENAND(this)) - buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC; - else - buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC; - /* * Write lock mark to 8th word of sector0 of page0 of the spare0. * We write 16 bytes spare area instead of 2 bytes. @@ -2926,10 +3136,30 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, from = 0; len = FLEXONENAND(this) ? mtd->writesize : 16; + /* + * Note: OTP lock operation + * OTP block : 0xXXFC XX 1111 1100 + * 1st block : 0xXXF3 (If chip support) XX 1111 0011 + * Both : 0xXXF0 (If chip support) XX 1111 0000 + */ + if (FLEXONENAND(this)) + otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET; + + /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */ + if (otp == 1) + buf[otp_lock_offset] = 0xFC; + else if (otp == 2) + buf[otp_lock_offset] = 0xF3; + else if (otp == 3) + buf[otp_lock_offset] = 0xF0; + else if (otp != 0) + printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n"); + ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER); return ret ? : retlen; } + #endif /* CONFIG_MTD_ONENAND_OTP */ /** diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index f57e29e17bb0..5509eb06b326 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -1,7 +1,7 @@ /* * linux/include/linux/mtd/onenand.h * - * Copyright (C) 2005-2007 Samsung Electronics + * Copyright © 2005-2009 Samsung Electronics * Kyungmin Park * * This program is free software; you can redistribute it and/or modify @@ -137,6 +137,8 @@ struct onenand_chip { /* * Helper macros */ +#define ONENAND_PAGES_PER_BLOCK (1<<6) + #define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) #define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) #define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) -- cgit v1.2.3 From 1c63aca32903efc219fb9df72bae5344f3e54ed5 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Thu, 22 Oct 2009 16:53:32 +0900 Subject: mtd: Add __nand_calculate_ecc() to NAND ECC functions Add __nand_calculate_ecc() which does not take struct mtd_info. The built-in 256/512 software ECC calculation and correction tester will use it. Signed-off-by: Akinobu Mita Acked-by: Vimal Singh Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_ecc.c | 25 ++++++++++++++++++++----- include/linux/mtd/nand_ecc.h | 10 ++++++++-- 2 files changed, 28 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index db7ae9d6a296..809fb53304ae 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -150,20 +150,19 @@ static const char addressbits[256] = { }; /** - * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte + * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte * block - * @mtd: MTD block structure * @buf: input buffer with raw data + * @eccsize: data bytes per ecc step (256 or 512) * @code: output buffer with ECC */ -int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, +void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, unsigned char *code) { int i; const uint32_t *bp = (uint32_t *)buf; /* 256 or 512 bytes/ecc */ - const uint32_t eccsize_mult = - (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; + const uint32_t eccsize_mult = eccsize >> 8; uint32_t cur; /* current value in buffer */ /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; @@ -412,6 +411,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, (invparity[par & 0x55] << 2) | (invparity[rp17] << 1) | (invparity[rp16] << 0); +} +EXPORT_SYMBOL(__nand_calculate_ecc); + +/** + * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte + * block + * @mtd: MTD block structure + * @buf: input buffer with raw data + * @code: output buffer with ECC + */ +int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, + unsigned char *code) +{ + __nand_calculate_ecc(buf, + ((struct nand_chip *)mtd->priv)->ecc.size, code); + return 0; } EXPORT_SYMBOL(nand_calculate_ecc); diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 052ea8ca2434..41bc013571d0 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -16,7 +16,13 @@ struct mtd_info; /* - * Calculate 3 byte ECC code for 256 byte block + * Calculate 3 byte ECC code for eccsize byte block + */ +void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, + u_char *ecc_code); + +/* + * Calculate 3 byte ECC code for 256/512 byte block */ int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); @@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, unsigned int eccsize); /* - * Detect and correct a 1 bit error for 256 byte block + * Detect and correct a 1 bit error for 256/512 byte block */ int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); -- cgit v1.2.3 From 72073027ee95d059eb5a064da4a978efab36d4ab Mon Sep 17 00:00:00 2001 From: Mika Korhonen Date: Fri, 23 Oct 2009 07:50:43 +0200 Subject: mtd: OneNAND: multiblock erase support Add support for multiblock erase command. OneNANDs (excluding Flex-OneNAND) are capable of simultaneous erase of up to 64 eraseblocks which is much faster. This changes the erase requests for regions covering multiple eraseblocks to be performed using multiblock erase. Signed-off-by: Mika Korhonen Reviewed-by: Adrian Hunter Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/onenand/omap2.c | 22 ++++- drivers/mtd/onenand/onenand_base.c | 173 ++++++++++++++++++++++++++++++++++++- include/linux/mtd/flashchip.h | 4 +- include/linux/mtd/onenand_regs.h | 2 + 4 files changed, 194 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 0108ed42e877..2dafd0949be5 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -112,10 +112,24 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) unsigned long timeout; u32 syscfg; - if (state == FL_RESETING) { - int i; + if (state == FL_RESETING || state == FL_PREPARING_ERASE || + state == FL_VERIFYING_ERASE) { + int i = 21; + unsigned int intr_flags = ONENAND_INT_MASTER; + + switch (state) { + case FL_RESETING: + intr_flags |= ONENAND_INT_RESET; + break; + case FL_PREPARING_ERASE: + intr_flags |= ONENAND_INT_ERASE; + break; + case FL_VERIFYING_ERASE: + i = 101; + break; + } - for (i = 0; i < 20; i++) { + while (--i) { udelay(1); intr = read_reg(c, ONENAND_REG_INTERRUPT); if (intr & ONENAND_INT_MASTER) @@ -126,7 +140,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) wait_err("controller error", state, ctrl, intr); return -EIO; } - if (!(intr & ONENAND_INT_RESET)) { + if ((intr & intr_flags) != intr_flags) { wait_err("timeout", state, ctrl, intr); return -EIO; } diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 894ebadc3e69..266f471901e5 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -34,6 +34,13 @@ #include +/* + * Multiblock erase if number of blocks to erase is 2 or more. + * Maximum number of blocks for simultaneous erase is 64. + */ +#define MB_ERASE_MIN_BLK_COUNT 2 +#define MB_ERASE_MAX_BLK_COUNT 64 + /* Default Flex-OneNAND boundary and lock respectively */ static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 }; @@ -353,6 +360,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le break; case ONENAND_CMD_ERASE: + case ONENAND_CMD_MULTIBLOCK_ERASE: + case ONENAND_CMD_ERASE_VERIFY: case ONENAND_CMD_BUFFERRAM: case ONENAND_CMD_OTP_ACCESS: block = onenand_block(this, addr); @@ -497,7 +506,7 @@ static int onenand_wait(struct mtd_info *mtd, int state) if (interrupt & flags) break; - if (state != FL_READING) + if (state != FL_READING && state != FL_PREPARING_ERASE) cond_resched(); } /* To get correct interrupt status in timeout case */ @@ -530,6 +539,18 @@ static int onenand_wait(struct mtd_info *mtd, int state) return -EIO; } + if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) { + printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n", + __func__, ctrl, interrupt); + return -EIO; + } + + if (!(interrupt & ONENAND_INT_MASTER)) { + printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n", + __func__, ctrl, interrupt); + return -EIO; + } + /* If there's controller error, it's a real error */ if (ctrl & ONENAND_CTRL_ERROR) { printk(KERN_ERR "%s: controller error = 0x%04x\n", @@ -2182,6 +2203,148 @@ static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allo return bbm->isbad_bbt(mtd, ofs, allowbbt); } + +static int onenand_multiblock_erase_verify(struct mtd_info *mtd, + struct erase_info *instr) +{ + struct onenand_chip *this = mtd->priv; + loff_t addr = instr->addr; + int len = instr->len; + unsigned int block_size = (1 << this->erase_shift); + int ret = 0; + + while (len) { + this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size); + ret = this->wait(mtd, FL_VERIFYING_ERASE); + if (ret) { + printk(KERN_ERR "%s: Failed verify, block %d\n", + __func__, onenand_block(this, addr)); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = addr; + return -1; + } + len -= block_size; + addr += block_size; + } + return 0; +} + +/** + * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase + * @param mtd MTD device structure + * @param instr erase instruction + * @param region erase region + * + * Erase one or more blocks up to 64 block at a time + */ +static int onenand_multiblock_erase(struct mtd_info *mtd, + struct erase_info *instr, + unsigned int block_size) +{ + struct onenand_chip *this = mtd->priv; + loff_t addr = instr->addr; + int len = instr->len; + int eb_count = 0; + int ret = 0; + int bdry_block = 0; + + instr->state = MTD_ERASING; + + if (ONENAND_IS_DDP(this)) { + loff_t bdry_addr = this->chipsize >> 1; + if (addr < bdry_addr && (addr + len) > bdry_addr) + bdry_block = bdry_addr >> this->erase_shift; + } + + /* Pre-check bbs */ + while (len) { + /* Check if we have a bad block, we do not erase bad blocks */ + if (onenand_block_isbad_nolock(mtd, addr, 0)) { + printk(KERN_WARNING "%s: attempt to erase a bad block " + "at addr 0x%012llx\n", + __func__, (unsigned long long) addr); + instr->state = MTD_ERASE_FAILED; + return -EIO; + } + len -= block_size; + addr += block_size; + } + + len = instr->len; + addr = instr->addr; + + /* loop over 64 eb batches */ + while (len) { + struct erase_info verify_instr = *instr; + int max_eb_count = MB_ERASE_MAX_BLK_COUNT; + + verify_instr.addr = addr; + verify_instr.len = 0; + + /* do not cross chip boundary */ + if (bdry_block) { + int this_block = (addr >> this->erase_shift); + + if (this_block < bdry_block) { + max_eb_count = min(max_eb_count, + (bdry_block - this_block)); + } + } + + eb_count = 0; + + while (len > block_size && eb_count < (max_eb_count - 1)) { + this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE, + addr, block_size); + onenand_invalidate_bufferram(mtd, addr, block_size); + + ret = this->wait(mtd, FL_PREPARING_ERASE); + if (ret) { + printk(KERN_ERR "%s: Failed multiblock erase, " + "block %d\n", __func__, + onenand_block(this, addr)); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -EIO; + } + + len -= block_size; + addr += block_size; + eb_count++; + } + + /* last block of 64-eb series */ + cond_resched(); + this->command(mtd, ONENAND_CMD_ERASE, addr, block_size); + onenand_invalidate_bufferram(mtd, addr, block_size); + + ret = this->wait(mtd, FL_ERASING); + /* Check if it is write protected */ + if (ret) { + printk(KERN_ERR "%s: Failed erase, block %d\n", + __func__, onenand_block(this, addr)); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -EIO; + } + + len -= block_size; + addr += block_size; + eb_count++; + + /* verify */ + verify_instr.len = eb_count * block_size; + if (onenand_multiblock_erase_verify(mtd, &verify_instr)) { + instr->state = verify_instr.state; + instr->fail_addr = verify_instr.fail_addr; + return -EIO; + } + + } + return 0; +} + + /** * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase * @param mtd MTD device structure @@ -2315,7 +2478,13 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) /* Grab the lock and see if the device is available */ onenand_get_device(mtd, FL_ERASING); - ret = onenand_block_by_block_erase(mtd, instr, region, block_size); + if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) { + /* region is set for Flex-OneNAND (no mb erase) */ + ret = onenand_block_by_block_erase(mtd, instr, + region, block_size); + } else { + ret = onenand_multiblock_erase(mtd, instr, block_size); + } /* Deselect and wake up anyone waiting on the device */ onenand_release_device(mtd); diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index f350a4879f75..d0bf422ae374 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -41,9 +41,11 @@ typedef enum { /* These 2 come from nand_state_t, which has been unified here */ FL_READING, FL_CACHEDPRG, - /* These 2 come from onenand_state_t, which has been unified here */ + /* These 4 come from onenand_state_t, which has been unified here */ FL_RESETING, FL_OTPING, + FL_PREPARING_ERASE, + FL_VERIFYING_ERASE, FL_UNKNOWN } flstate_t; diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index acadbf53a69f..cd6f3b431195 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -131,6 +131,8 @@ #define ONENAND_CMD_LOCK_TIGHT (0x2C) #define ONENAND_CMD_UNLOCK_ALL (0x27) #define ONENAND_CMD_ERASE (0x94) +#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95) +#define ONENAND_CMD_ERASE_VERIFY (0x71) #define ONENAND_CMD_RESET (0xF0) #define ONENAND_CMD_OTP_ACCESS (0x65) #define ONENAND_CMD_READID (0x90) -- cgit v1.2.3 From b1c6e6db5bb7acad82e1c64914c6a9404dae3ee1 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 2 Nov 2009 18:12:33 +0000 Subject: mtd: nand: add option to quieten off the no device found messgae Add NAND_SCAN_SILENT_NODEV to chip->options to the user-worrying messages 'No NAND device found!!!'. This message often worries users (was three exclamation marks really necessary?) and especially in systems such as the Simtec Osiris where there may be optional NAND devices which are not known until probe time. Revised version of the original NAND_PROBE_SPECULATIVE patch after comments by Artem Bityutskiy about adding a whole new call. Signed-off-by: Ben Dooks Signed-off-by: Simtec Linux Team Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_base.c | 3 ++- include/linux/mtd/nand.h | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index ba06473326d1..724cb2c9ad3f 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2756,7 +2756,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips) type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); if (IS_ERR(type)) { - printk(KERN_WARNING "No NAND device found!!!\n"); + if (!(chip->options & NAND_SCAN_SILENT_NODEV)) + printk(KERN_WARNING "No NAND device found.\n"); chip->select_chip(mtd, -1); return PTR_ERR(type); } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 2476078a032f..ccab9dfc5217 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -170,7 +170,6 @@ typedef enum { /* Chip does not allow subpage writes */ #define NAND_NO_SUBPAGE_WRITE 0x00000200 - /* Options valid for Samsung large page devices */ #define NAND_SAMSUNG_LP_OPTIONS \ (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) @@ -196,6 +195,9 @@ typedef enum { /* This option is defined if the board driver allocates its own buffers (e.g. because it needs them DMA-coherent */ #define NAND_OWN_BUFFERS 0x00040000 +/* Chip may not exist, so silence any errors in scan */ +#define NAND_SCAN_SILENT_NODEV 0x00080000 + /* Options set by nand scan */ /* Nand scan has allocated controller struct */ #define NAND_CONTROLLER_ALLOC 0x80000000 -- cgit v1.2.3 From b2ef1a2bb2eb49cd7c75b22f1ea40ead0bdfdb8a Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Thu, 5 Nov 2009 15:53:43 +0100 Subject: mtd: move manufacturer to the common cfi.h header file This patch moves the MANUFACTURER_ST and MANUFACTURER_INTEL to the include/linux/mtd/cfi.h header file and renames them to CFI_MFR_ST and CFI_MFR_INTEL. CFI_MFR_ST was already present there. All references in drivers/mtd/chips/cfi_cmdset_0001.c are updated to reflect this. Signed-off-by: Hans-Christian Egtvedt Acked-by: Nicolas Pitre Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/chips/cfi_cmdset_0001.c | 20 ++++++++++---------- include/linux/mtd/cfi.h | 9 +++++---- 2 files changed, 15 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 94be67e880a7..5fbf29e1e64f 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -43,11 +43,11 @@ // debugging, turns off buffer write mode if set to 1 #define FORCE_WORD_WRITE 0 -#define MANUFACTURER_INTEL 0x0089 +/* Intel chips */ #define I82802AB 0x00ad #define I82802AC 0x00ac #define PF38F4476 0x881c -#define MANUFACTURER_ST 0x0020 +/* STMicroelectronics chips */ #define M50LPW080 0x002F #define M50FLW080A 0x0080 #define M50FLW080B 0x0081 @@ -308,16 +308,16 @@ static struct cfi_fixup cfi_fixup_table[] = { #endif { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, - { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, + { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, { 0, 0, NULL, NULL } }; static struct cfi_fixup jedec_fixup_table[] = { - { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, { 0, 0, NULL, NULL } }; static struct cfi_fixup fixup_table[] = { @@ -333,7 +333,7 @@ static struct cfi_fixup fixup_table[] = { static void cfi_fixup_major_minor(struct cfi_private *cfi, struct cfi_pri_intelext *extp) { - if (cfi->mfr == MANUFACTURER_INTEL && + if (cfi->mfr == CFI_MFR_INTEL && cfi->id == PF38F4476 && extp->MinorVersion == '3') extp->MinorVersion = '1'; } @@ -2249,7 +2249,7 @@ static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, /* Some chips have OTP located in the _top_ partition only. For example: Intel 28F256L18T (T means top-parameter device) */ - if (cfi->mfr == MANUFACTURER_INTEL) { + if (cfi->mfr == CFI_MFR_INTEL) { switch (cfi->id) { case 0x880b: case 0x880c: diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 88d3d8fbf9f2..df89f4275232 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -518,10 +518,11 @@ struct cfi_fixup { #define CFI_MFR_ANY 0xffff #define CFI_ID_ANY 0xffff -#define CFI_MFR_AMD 0x0001 -#define CFI_MFR_ATMEL 0x001F -#define CFI_MFR_SAMSUNG 0x00EC -#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_AMD 0x0001 +#define CFI_MFR_INTEL 0x0089 +#define CFI_MFR_ATMEL 0x001F +#define CFI_MFR_SAMSUNG 0x00EC +#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); -- cgit v1.2.3 From 456b565cc52fbcdaa2e19ffdf40d9dd3b726d603 Mon Sep 17 00:00:00 2001 From: Simon Kagstrom Date: Fri, 16 Oct 2009 14:09:18 +0200 Subject: core: Add kernel message dumper to call on oopses and panics The core functionality is implemented as per Linus suggestion from http://lists.infradead.org/pipermail/linux-mtd/2009-October/027620.html (with the kmsg_dump implementation by Linus). A struct kmsg_dumper has been added which contains a callback to dump the kernel log buffers on crashes. The kmsg_dump function gets called from oops_exit() and panic() and invokes this callbacks with the crash reason. [dwmw2: Fix log_end handling] Signed-off-by: Simon Kagstrom Reviewed-by: Anders Grafstrom Reviewed-by: Linus Torvalds Acked-by: Ingo Molnar Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- include/linux/kmsg_dump.h | 44 +++++++++++++++++ kernel/panic.c | 3 ++ kernel/printk.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 166 insertions(+) create mode 100644 include/linux/kmsg_dump.h (limited to 'include') diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h new file mode 100644 index 000000000000..7f089ec5ef32 --- /dev/null +++ b/include/linux/kmsg_dump.h @@ -0,0 +1,44 @@ +/* + * linux/include/kmsg_dump.h + * + * Copyright (C) 2009 Net Insight AB + * + * Author: Simon Kagstrom + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#ifndef _LINUX_KMSG_DUMP_H +#define _LINUX_KMSG_DUMP_H + +#include + +enum kmsg_dump_reason { + KMSG_DUMP_OOPS, + KMSG_DUMP_PANIC, +}; + +/** + * struct kmsg_dumper - kernel crash message dumper structure + * @dump: The callback which gets called on crashes. The buffer is passed + * as two sections, where s1 (length l1) contains the older + * messages and s2 (length l2) contains the newer. + * @list: Entry in the dumper list (private) + * @registered: Flag that specifies if this is already registered + */ +struct kmsg_dumper { + void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason, + const char *s1, unsigned long l1, + const char *s2, unsigned long l2); + struct list_head list; + int registered; +}; + +void kmsg_dump(enum kmsg_dump_reason reason); + +int kmsg_dump_register(struct kmsg_dumper *dumper); + +int kmsg_dump_unregister(struct kmsg_dumper *dumper); + +#endif /* _LINUX_KMSG_DUMP_H */ diff --git a/kernel/panic.c b/kernel/panic.c index bcdef26e3332..8c43226a544d 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -10,6 +10,7 @@ */ #include #include +#include #include #include #include @@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...) dump_stack(); #endif + kmsg_dump(KMSG_DUMP_PANIC); /* * If we have crashed and we have a crash kernel loaded let it handle * everything else. @@ -338,6 +340,7 @@ void oops_exit(void) { do_oops_enter_exit(); print_oops_end_marker(); + kmsg_dump(KMSG_DUMP_OOPS); } #ifdef WANT_WARN_ON_SLOWPATH diff --git a/kernel/printk.c b/kernel/printk.c index f38b07f78a4e..051d1f50648f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -33,6 +33,7 @@ #include #include #include +#include #include @@ -1405,3 +1406,121 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies, } EXPORT_SYMBOL(printk_timed_ratelimit); #endif + +static DEFINE_SPINLOCK(dump_list_lock); +static LIST_HEAD(dump_list); + +/** + * kmsg_dump_register - register a kernel log dumper. + * @dump: pointer to the kmsg_dumper structure + * + * Adds a kernel log dumper to the system. The dump callback in the + * structure will be called when the kernel oopses or panics and must be + * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. + */ +int kmsg_dump_register(struct kmsg_dumper *dumper) +{ + unsigned long flags; + int err = -EBUSY; + + /* The dump callback needs to be set */ + if (!dumper->dump) + return -EINVAL; + + spin_lock_irqsave(&dump_list_lock, flags); + /* Don't allow registering multiple times */ + if (!dumper->registered) { + dumper->registered = 1; + list_add_tail(&dumper->list, &dump_list); + err = 0; + } + spin_unlock_irqrestore(&dump_list_lock, flags); + + return err; +} +EXPORT_SYMBOL_GPL(kmsg_dump_register); + +/** + * kmsg_dump_unregister - unregister a kmsg dumper. + * @dump: pointer to the kmsg_dumper structure + * + * Removes a dump device from the system. Returns zero on success and + * %-EINVAL otherwise. + */ +int kmsg_dump_unregister(struct kmsg_dumper *dumper) +{ + unsigned long flags; + int err = -EINVAL; + + spin_lock_irqsave(&dump_list_lock, flags); + if (dumper->registered) { + dumper->registered = 0; + list_del(&dumper->list); + err = 0; + } + spin_unlock_irqrestore(&dump_list_lock, flags); + + return err; +} +EXPORT_SYMBOL_GPL(kmsg_dump_unregister); + +static const char const *kmsg_reasons[] = { + [KMSG_DUMP_OOPS] = "oops", + [KMSG_DUMP_PANIC] = "panic", +}; + +static const char *kmsg_to_str(enum kmsg_dump_reason reason) +{ + if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0) + return "unknown"; + + return kmsg_reasons[reason]; +} + +/** + * kmsg_dump - dump kernel log to kernel message dumpers. + * @reason: the reason (oops, panic etc) for dumping + * + * Iterate through each of the dump devices and call the oops/panic + * callbacks with the log buffer. + */ +void kmsg_dump(enum kmsg_dump_reason reason) +{ + unsigned long end; + unsigned chars; + struct kmsg_dumper *dumper; + const char *s1, *s2; + unsigned long l1, l2; + unsigned long flags; + + /* Theoretically, the log could move on after we do this, but + there's not a lot we can do about that. The new messages + will overwrite the start of what we dump. */ + spin_lock_irqsave(&logbuf_lock, flags); + end = log_end & LOG_BUF_MASK; + chars = logged_chars; + spin_unlock_irqrestore(&logbuf_lock, flags); + + if (logged_chars > end) { + s1 = log_buf + log_buf_len - logged_chars + end; + l1 = logged_chars - end; + + s2 = log_buf; + l2 = end; + } else { + s1 = ""; + l1 = 0; + + s2 = log_buf + end - logged_chars; + l2 = logged_chars; + } + + if (!spin_trylock_irqsave(&dump_list_lock, flags)) { + printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n", + kmsg_to_str(reason)); + return; + } + list_for_each_entry(dumper, &dump_list, list) + dumper->dump(dumper, reason, s1, l1, s2, l2); + spin_unlock_irqrestore(&dump_list_lock, flags); +} -- cgit v1.2.3 From 7bd4d7be5c8c35a401b1589201e5d43a64d3f05b Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 19 Nov 2009 10:50:22 -0800 Subject: drm: use page flip event to signal flip completion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't actually know which frame number the flip will complete on, so userspace needs a specific flip notification to tell it when the last flip completed. Signed-off-by: Jesse Barnes Signed-off-by: Eric Anholt Acked-by: Kristian Høgsberg --- drivers/gpu/drm/drm_crtc.c | 2 +- include/drm/drm.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index ac2fa193072b..3bc870d38a97 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2527,7 +2527,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; } - e->event.base.type = DRM_EVENT_VBLANK; + e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof e->event; e->event.user_data = page_flip->user_data; e->base.event = &e->event.base; diff --git a/include/drm/drm.h b/include/drm/drm.h index 3919a4f792ae..309d0a5ed68d 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -718,6 +718,7 @@ struct drm_event { }; #define DRM_EVENT_VBLANK 0x01 +#define DRM_EVENT_FLIP_COMPLETE 0x02 struct drm_event_vblank { struct drm_event base; -- cgit v1.2.3 From e9560f7cb20722e0e7db46bbb6f43c2194a238d5 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 19 Nov 2009 10:49:07 -0800 Subject: drm/i915: add GETPARAM request for page flipping Add a GETPARAM request for checking if page flipping is supported. Useful for the 2D driver to enable the flipping path. Signed-off-by: Jesse Barnes Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ include/drm/i915_drm.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a8efe78e5c57..fe89d0c723e6 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -810,6 +810,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_OVERLAY: value = dev_priv->overlay ? 1 : 0; break; + case I915_PARAM_HAS_PAGEFLIPPING: + value = 1; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index c900499f2f63..1b4f3a565d78 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -271,6 +271,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_GEM 5 #define I915_PARAM_NUM_FENCES_AVAIL 6 #define I915_PARAM_HAS_OVERLAY 7 +#define I915_PARAM_HAS_PAGEFLIPPING 8 typedef struct drm_i915_getparam { int param; -- cgit v1.2.3 From 778c902640530371a169ad1c03566e7c51b09874 Mon Sep 17 00:00:00 2001 From: Li Peng Date: Mon, 9 Nov 2009 12:51:22 +0800 Subject: drm/i915: Fix sync to vblank when VGA output is turned off In current vblank-wait implementation, if we turn off VGA output, drm_wait_vblank will still wait on the disabled pipe until timeout, because vblank on the pipe is assumed be enabled. This would cause slow system response on some system such as moblin. This patch resolve the issue by adding a drm helper function drm_vblank_off which explicitly clear vblank_enabled[crtc], wake up any waiting queue and save last vblank counter before turning off crtc. It also slightly change drm_vblank_get to ensure that we will will return immediately if trying to wait on a disabled pipe. Signed-off-by: Li Peng Reviewed-by: Jesse Barnes [anholt: hand-applied for conflicts with overlay changes] Signed-off-by: Eric Anholt --- drivers/gpu/drm/drm_irq.c | 34 ++++++++++++++++++++++++++-------- drivers/gpu/drm/i915/intel_display.c | 1 + include/drm/drmP.h | 1 + 3 files changed, 28 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 6b3ce6d38848..7998ee66b317 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc) spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ - if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && - !dev->vblank_enabled[crtc]) { - ret = dev->driver->enable_vblank(dev, crtc); - DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); - if (ret) + if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { + if (!dev->vblank_enabled[crtc]) { + ret = dev->driver->enable_vblank(dev, crtc); + DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); + if (ret) + atomic_dec(&dev->vblank_refcount[crtc]); + else { + dev->vblank_enabled[crtc] = 1; + drm_update_vblank_count(dev, crtc); + } + } + } else { + if (!dev->vblank_enabled[crtc]) { atomic_dec(&dev->vblank_refcount[crtc]); - else { - dev->vblank_enabled[crtc] = 1; - drm_update_vblank_count(dev, crtc); + ret = -EINVAL; } } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); @@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc) } EXPORT_SYMBOL(drm_vblank_put); +void drm_vblank_off(struct drm_device *dev, int crtc) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev->vbl_lock, irqflags); + DRM_WAKEUP(&dev->vbl_queue[crtc]); + dev->vblank_enabled[crtc] = 0; + dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); +} +EXPORT_SYMBOL(drm_vblank_off); + /** * drm_vblank_pre_modeset - account for vblanks across mode sets * @dev: DRM device diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 267adc6fbfc1..65b76fffd9e3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1924,6 +1924,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, false); + drm_vblank_off(dev, pipe); if (dev_priv->cfb_plane == plane && dev_priv->display.disable_fbc) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index febf6c530c66..fd919959d146 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1288,6 +1288,7 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern void drm_handle_vblank(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); +extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_cleanup(struct drm_device *dev); /* Modesetting support */ extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); -- cgit v1.2.3 From 04b2d218001bdddb41b84c9f78d6bb3cd3b5b31c Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Fri, 6 Nov 2009 08:39:18 -0500 Subject: drm/i915: Fix typo in ioctl struct name. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kristian Høgsberg Signed-off-by: Eric Anholt --- include/drm/i915_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 1b4f3a565d78..02045ed08eda 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -223,7 +223,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) -#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) +#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image) #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) -- cgit v1.2.3 From ee0a6efc1897ef817e177e669f5c5d211194df24 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 2 Dec 2009 08:36:58 +0900 Subject: percpu: add missing per_cpu_ptr_to_phys() definition for UP Commit 3b034b0d084221596bf35c8d893e1d4d5477b9cc implemented per_cpu_ptr_to_phys() but forgot to add UP definition. Add UP definition which is simple wrapper around __pa(). Signed-off-by: Tejun Heo Cc: Vivek Goyal Reported-by: Randy Dunlap --- include/linux/percpu.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 6ac984fa34f8..8e4ead6435fb 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -180,6 +180,11 @@ static inline void free_percpu(void *p) kfree(p); } +static inline phys_addr_t per_cpu_ptr_to_phys(void *addr) +{ + return __pa(addr); +} + static inline void __init setup_per_cpu_areas(void) { } static inline void *pcpu_lpage_remapped(void *kaddr) -- cgit v1.2.3 From 595dd3d8bf953254d8d2f30f99c54fe09c470040 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 1 Dec 2009 10:52:02 -0800 Subject: kmsg_dump: fix build for CONFIG_PRINTK=n kmsg_dump() fails to build when CONFIG_PRINTK=n; provide stubs for the kmsg_dump*() functions when CONFIG_PRINTK=n. kernel/printk.c: In function 'kmsg_dump': kernel/printk.c:1501: error: 'log_buf_len' undeclared (first use in this function) kernel/printk.c:1502: error: 'logged_chars' undeclared (first use in this function) kernel/printk.c:1506: error: 'log_buf' undeclared (first use in this function) Signed-off-by: Randy Dunlap Acked-by: Simon Kagstrom Signed-off-by: David Woodhouse --- include/linux/kmsg_dump.h | 16 ++++++++++++++++ kernel/printk.c | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 7f089ec5ef32..e32aa268efac 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -35,10 +35,26 @@ struct kmsg_dumper { int registered; }; +#ifdef CONFIG_PRINTK void kmsg_dump(enum kmsg_dump_reason reason); int kmsg_dump_register(struct kmsg_dumper *dumper); int kmsg_dump_unregister(struct kmsg_dumper *dumper); +#else +static inline void kmsg_dump(enum kmsg_dump_reason reason) +{ +} + +static inline int kmsg_dump_register(struct kmsg_dumper *dumper) +{ + return -EINVAL; +} + +static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) +{ + return -EINVAL; +} +#endif #endif /* _LINUX_KMSG_DUMP_H */ diff --git a/kernel/printk.c b/kernel/printk.c index 051d1f50648f..2a564570f822 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1405,7 +1405,6 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies, return false; } EXPORT_SYMBOL(printk_timed_ratelimit); -#endif static DEFINE_SPINLOCK(dump_list_lock); static LIST_HEAD(dump_list); @@ -1524,3 +1523,4 @@ void kmsg_dump(enum kmsg_dump_reason reason) dumper->dump(dumper, reason, s1, l1, s2, l2); spin_unlock_irqrestore(&dump_list_lock, flags); } +#endif -- cgit v1.2.3 From a01878aac57eac6eb4bf194788ab2cc440490d0f Mon Sep 17 00:00:00 2001 From: Richard Kennedy Date: Thu, 3 Dec 2009 15:58:56 -0500 Subject: NFS: reorder nfs4_sequence_regs to remove 8 bytes of padding on 64 bits reorder nfs4_sequence_args to remove 8 bytes of padding on 64 bit builds. The size of this structure drops to 24 bytes from 32 and reduces the text size of nfs.ko. On my x86_64 size reports text data bss 2.6.32-rc5 200996 8512 432 209940 33414 nfs.ko +patch 200884 8512 432 209828 333a4 nfs.ko Signed-off-by: Richard Kennedy Signed-off-by: Trond Myklebust --- include/linux/nfs_xdr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 62f63fb0c4c8..8c880372536e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -170,8 +170,8 @@ struct nfs4_sequence_args { struct nfs4_sequence_res { struct nfs4_session *sr_session; u8 sr_slotid; /* slot used to send request */ - unsigned long sr_renewal_time; int sr_status; /* sequence operation status */ + unsigned long sr_renewal_time; }; struct nfs4_get_lease_time_args { -- cgit v1.2.3 From 09a21c4102c8f7893368553273d39c0cadedf9af Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 3 Dec 2009 15:58:56 -0500 Subject: SUNRPC: Allow RPCs to fail quickly if the server is unreachable The kernel sometimes makes RPC calls to services that aren't running. Because the kernel's RPC client always assumes the hard retry semantic when reconnecting a connection-oriented RPC transport, the underlying reconnect logic takes a long while to time out, even though the remote may have responded immediately with ECONNREFUSED. In certain cases, like upcalls to our local rpcbind daemon, or for NFS mount requests, we'd like the kernel to fail immediately if the remote service isn't reachable. This allows another transport to be tried immediately, or the pending request can be abandoned quickly. Introduce a per-request flag which controls how call_transmit_status() behaves when request transmission fails because the server cannot be reached. We don't want soft connection semantics to apply to other errors. The default case of the switch statement in call_transmit_status() no longer falls through; the fall through code is copied to the default case, and a "break;" is added. The transport's connection re-establishment timeout is also ignored for such requests. We want the request to fail immediately, so the reconnect delay is skipped. Additionally, we don't want a connect failure here to further increase the reconnect timeout value, since this request will not be retried. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 2 ++ net/sunrpc/clnt.c | 11 +++++++++-- net/sunrpc/xprtsock.c | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 401097781fc0..1906782ec86b 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -130,12 +130,14 @@ struct rpc_task_setup { #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ #define RPC_TASK_KILLED 0x0100 /* task was killed */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ +#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) +#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 7bcd931e06ee..68a23583f44c 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1197,6 +1197,8 @@ call_transmit_status(struct rpc_task *task) default: dprint_status(task); xprt_end_transmit(task); + rpc_task_force_reencode(task); + break; /* * Special cases: if we've been waiting on the * socket's write_space() callback, or if the @@ -1204,11 +1206,16 @@ call_transmit_status(struct rpc_task *task) * then hold onto the transport lock. */ case -ECONNREFUSED: - case -ECONNRESET: - case -ENOTCONN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: + if (RPC_IS_SOFTCONN(task)) { + xprt_end_transmit(task); + rpc_exit(task, task->tk_status); + break; + } + case -ECONNRESET: + case -ENOTCONN: case -EPIPE: rpc_task_force_reencode(task); } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 37c5475ba258..ff312f8b018d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2033,7 +2033,7 @@ static void xs_connect(struct rpc_task *task) if (xprt_test_and_set_connecting(xprt)) return; - if (transport->sock != NULL) { + if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { dprintk("RPC: xs_connect delayed xprt %p for %lu " "seconds\n", xprt, xprt->reestablish_timeout / HZ); -- cgit v1.2.3 From 7ac96a9cb4982140e206bf3b58236efb2498ab3f Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Thu, 3 Dec 2009 17:44:37 -0500 Subject: drm/modes: Add drm_mode_hsync() Signed-off-by: Adam Jackson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_modes.c | 28 +++++++++++++++++++++++++++- include/drm/drm_crtc.h | 7 ++++--- 2 files changed, 31 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 51f677215f1d..6d81a02463a3 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode) } EXPORT_SYMBOL(drm_mode_height); +/** drm_mode_hsync - get the hsync of a mode + * @mode: mode + * + * LOCKING: + * None. + * + * Return @modes's hsync rate in kHz, rounded to the nearest int. + */ +int drm_mode_hsync(struct drm_display_mode *mode) +{ + unsigned int calc_val; + + if (mode->hsync) + return mode->hsync; + + if (mode->htotal < 0) + return 0; + + calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ + calc_val += 500; /* round to 1000Hz */ + calc_val /= 1000; /* truncate to kHz */ + + return calc_val; +} +EXPORT_SYMBOL(drm_mode_hsync); + /** * drm_mode_vrefresh - get the vrefresh of a mode * @mode: mode @@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height); * LOCKING: * None. * - * Return @mode's vrefresh rate or calculate it if necessary. + * Return @mode's vrefresh rate in Hz or calculate it if necessary. * * FIXME: why is this needed? shouldn't vrefresh be set already? * diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index d84fba15c9d8..938f327a2a3b 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -123,7 +123,7 @@ struct drm_display_mode { int type; /* Proposed mode values */ - int clock; + int clock; /* in kHz */ int hdisplay; int hsync_start; int hsync_end; @@ -164,8 +164,8 @@ struct drm_display_mode { int *private; int private_flags; - int vrefresh; - float hsync; + int vrefresh; /* in Hz */ + int hsync; /* in kHz */ }; enum drm_connector_status { @@ -681,6 +681,7 @@ extern void drm_mode_validate_size(struct drm_device *dev, extern void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); extern void drm_mode_sort(struct list_head *mode_list); +extern int drm_mode_hsync(struct drm_display_mode *mode); extern int drm_mode_vrefresh(struct drm_display_mode *mode); extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags); -- cgit v1.2.3 From 2dbdc52c8162291aa7541b8ba6e1c1587f50c1dd Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Thu, 3 Dec 2009 17:44:39 -0500 Subject: drm/edid: Add new detailed block types from EDID 1.4 Signed-off-by: Adam Jackson Signed-off-by: Dave Airlie --- include/drm/drm_edid.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 7d6c9a2dfcbb..9087557fd83d 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -120,6 +120,9 @@ struct detailed_non_pixel { } data; } __attribute__((packed)); +#define EDID_DETAIL_EST_TIMINGS 0xf7 +#define EDID_DETAIL_CVT_3BYTE 0xf8 +#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9 #define EDID_DETAIL_STD_MODES 0xfa #define EDID_DETAIL_MONITOR_CPDATA 0xfb #define EDID_DETAIL_MONITOR_NAME 0xfc -- cgit v1.2.3 From 9340d8cfeacd16cef1cbe94527f7baaed7640669 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Thu, 3 Dec 2009 17:44:40 -0500 Subject: drm/edid: Decode 3-byte CVT codes from EDID 1.4 Signed-off-by: Adam Jackson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_edid.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_edid.h | 5 +++++ 2 files changed, 53 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index cc8e69682714..30af8f3e6427 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -897,6 +897,51 @@ static int drm_gtf_modes_for_range(struct drm_connector *connector, return modes; } +static int drm_cvt_modes(struct drm_connector *connector, + struct detailed_timing *timing) +{ + int i, j, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + struct cvt_timing *cvt; + const int rates[] = { 60, 85, 75, 60, 50 }; + + for (i = 0; i < 4; i++) { + int width, height; + cvt = &(timing->data.other_data.data.cvt[i]); + + height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; + switch (cvt->code[1] & 0xc0) { + case 0x00: + width = height * 4 / 3; + break; + case 0x40: + width = height * 16 / 9; + break; + case 0x80: + width = height * 16 / 10; + break; + case 0xc0: + width = height * 15 / 9; + break; + } + + for (j = 1; j < 5; j++) { + if (cvt->code[2] & (1 << j)) { + newmode = drm_cvt_mode(dev, width, height, + rates[j], j == 0, + false, false); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; + } + } + } + } + + return modes; +} + static int add_detailed_modes(struct drm_connector *connector, struct detailed_timing *timing, struct edid *edid, u32 quirks, int preferred) @@ -941,6 +986,9 @@ static int add_detailed_modes(struct drm_connector *connector, } } break; + case EDID_DETAIL_CVT_3BYTE: + modes += drm_cvt_modes(connector, timing); + break; default: break; } diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 9087557fd83d..d33c3e038606 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -106,6 +106,10 @@ struct detailed_data_color_point { u8 wpindex2[3]; } __attribute__((packed)); +struct cvt_timing { + u8 code[3]; +} __attribute__((packed)); + struct detailed_non_pixel { u8 pad1; u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name @@ -117,6 +121,7 @@ struct detailed_non_pixel { struct detailed_data_monitor_range range; struct detailed_data_wpindex color; struct std_timing timings[5]; + struct cvt_timing cvt[4]; } data; } __attribute__((packed)); -- cgit v1.2.3 From 862302ffe422378a5213f558fc5cdf62c37050a9 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 2 Dec 2009 18:15:25 +0000 Subject: drm: Add support for drm master_[set|drop] callbacks. The vmwgfx driver has a per master rw lock around TTM, to guarantee mutual exclusion when needed. This is typically when all evictable buffers are evicted due to 1) vt switch 2) master switch 3) suspend / resume. In the multi-master case, on master switch the new master takes the previously active master lock in write mode, and then evicts all buffers. Any clients to previous masters will then block on that lock when trying to validate a buffer. fbdev also acts as a virtual master wrt this. Signed-off-by: Thomas Hellstrom Signed-off-by: Jakob Bornecrantz Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 14 ++++++++++++++ drivers/gpu/drm/drm_stub.c | 11 +++++++++++ include/drm/drmP.h | 9 +++++++++ 3 files changed, 34 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 8ac7fbf6b2b7..08d14df3bb42 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -300,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp, goto out_free; } } + mutex_lock(&dev->struct_mutex); + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, priv, true); + if (ret) { + /* drop both references if this fails */ + drm_master_put(&priv->minor->master); + drm_master_put(&priv->master); + mutex_unlock(&dev->struct_mutex); + goto out_free; + } + } + mutex_unlock(&dev->struct_mutex); } else { /* get a reference to the master */ priv->master = drm_master_get(priv->minor->master); @@ -533,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp) if (file_priv->minor->master == file_priv->master) { /* drop the reference held my the minor */ + if (dev->driver->master_drop) + dev->driver->master_drop(dev, file_priv, true); drm_master_put(&file_priv->minor->master); } } diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index adb864dfef3e..2c1b52847e9e 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -174,6 +174,8 @@ void drm_master_put(struct drm_master **master) int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + int ret = 0; + if (file_priv->is_master) return 0; @@ -188,6 +190,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); file_priv->minor->master = drm_master_get(file_priv->master); file_priv->is_master = 1; + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, file_priv, false); + if (unlikely(ret != 0)) { + file_priv->is_master = 0; + drm_master_put(&file_priv->minor->master); + } + } mutex_unlock(&dev->struct_mutex); } @@ -204,6 +213,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, return -EINVAL; mutex_lock(&dev->struct_mutex); + if (dev->driver->master_drop) + dev->driver->master_drop(dev, file_priv, false); drm_master_put(&file_priv->minor->master); file_priv->is_master = 0; mutex_unlock(&dev->struct_mutex); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1b72a526ba64..770772c014aa 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -774,6 +774,15 @@ struct drm_driver { /* Master routines */ int (*master_create)(struct drm_device *dev, struct drm_master *master); void (*master_destroy)(struct drm_device *dev, struct drm_master *master); + /** + * master_set is called whenever the minor master is set. + * master_drop is called whenever the minor master is dropped. + */ + + int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, + bool from_open); + void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, + bool from_release); int (*proc_init)(struct drm_minor *minor); void (*proc_cleanup)(struct drm_minor *minor); -- cgit v1.2.3 From 1a95916f5465ad6c91398f17924949db7e0b5c36 Mon Sep 17 00:00:00 2001 From: Kristian Høgsberg Date: Wed, 2 Dec 2009 12:13:48 -0500 Subject: drm: Add compatibility #ifdefs for *BSD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This let's use use the linux drm headers as the canonical source for libdrm on all platforms. Signed-off-by: Kristian Høgsberg Signed-off-by: Dave Airlie --- include/drm/drm.h | 29 +++++++++++++++++++---------- include/drm/drmP.h | 3 +++ include/drm/drm_mode.h | 3 --- include/drm/i915_drm.h | 4 ++-- include/drm/mga_drm.h | 2 +- include/drm/radeon_drm.h | 2 +- include/drm/via_drm.h | 2 +- 7 files changed, 27 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/drm/drm.h b/include/drm/drm.h index 3919a4f792ae..0114ac94f969 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -36,17 +36,27 @@ #ifndef _DRM_H_ #define _DRM_H_ +#if defined(__linux__) + #include -#include /* For _IO* macros */ -#define DRM_IOCTL_NR(n) _IOC_NR(n) -#define DRM_IOC_VOID _IOC_NONE -#define DRM_IOC_READ _IOC_READ -#define DRM_IOC_WRITE _IOC_WRITE -#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE -#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) +#include +typedef unsigned int drm_handle_t; + +#else /* One of the BSDs */ -#define DRM_MAJOR 226 -#define DRM_MAX_MINOR 15 +#include +#include +typedef int8_t __s8; +typedef uint8_t __u8; +typedef int16_t __s16; +typedef uint16_t __u16; +typedef int32_t __s32; +typedef uint32_t __u32; +typedef int64_t __s64; +typedef uint64_t __u64; +typedef unsigned long drm_handle_t; + +#endif #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ @@ -59,7 +69,6 @@ #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) -typedef unsigned int drm_handle_t; typedef unsigned int drm_context_t; typedef unsigned int drm_drawable_t; typedef unsigned int drm_magic_t; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 770772c014aa..db56a6add5de 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -289,6 +289,9 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data, typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, unsigned long arg); +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_MAJOR 226 + #define DRM_AUTH 0x1 #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 68ddc6175ae7..09ca6adf4dd5 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -27,9 +27,6 @@ #ifndef _DRM_MODE_H #define _DRM_MODE_H -#include -#include - #define DRM_DISPLAY_INFO_LEN 32 #define DRM_CONNECTOR_NAME_LEN 32 #define DRM_DISPLAY_MODE_LEN 32 diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7e0cb1da92e6..a04c3ab1d726 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -27,11 +27,11 @@ #ifndef _I915_DRM_H_ #define _I915_DRM_H_ +#include "drm.h" + /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. */ -#include -#include "drm.h" /* Each region is a minimum of 16k, and there are at most 255 of them. */ diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h index 325fd6fb4a42..3ffbc4798afa 100644 --- a/include/drm/mga_drm.h +++ b/include/drm/mga_drm.h @@ -35,7 +35,7 @@ #ifndef __MGA_DRM_H__ #define __MGA_DRM_H__ -#include +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (mga_sarea.h) diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 3b9932ab1756..39537f3cf98a 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -33,7 +33,7 @@ #ifndef __RADEON_DRM_H__ #define __RADEON_DRM_H__ -#include +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the X server file (radeon_sarea.h) diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h index 170786e5c2ff..fd11a5bd892d 100644 --- a/include/drm/via_drm.h +++ b/include/drm/via_drm.h @@ -24,7 +24,7 @@ #ifndef _VIA_DRM_H_ #define _VIA_DRM_H_ -#include +#include "drm.h" /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. -- cgit v1.2.3 From c3a73ba13bac7fd96030f39202b2d37fb19c46a6 Mon Sep 17 00:00:00 2001 From: Martin Michlmayr Date: Thu, 19 Nov 2009 16:29:45 +0000 Subject: drm/ttm: Fix build failure due to missing struct page drm/ttm fails to build on MIPS because "struct page" is not known: | In file included from drivers/gpu/drm/ttm/ttm_memory.c:28: | include/drm/ttm/ttm_memory.h:154: warning: 'struct page' declared inside parameter list | include/drm/ttm/ttm_memory.h:154: warning: its scope is only this definition or declaration, which is probably not what you want | include/drm/ttm/ttm_memory.h:156: warning: 'struct page' declared inside parameter list | drivers/gpu/drm/ttm/ttm_memory.c:540: error: conflicting types for 'ttm_mem_global_alloc_page' | include/drm/ttm/ttm_memory.h:154: error: previous declaration of 'ttm_mem_global_alloc_page' was here | drivers/gpu/drm/ttm/ttm_memory.c:561: error: conflicting types for 'ttm_mem_global_free_page' | include/drm/ttm/ttm_memory.h:156: error: previous declaration of 'ttm_mem_global_free_page' was here Signed-off-by: Martin Michlmayr Cc: stable@kernel.org Acked-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- include/drm/ttm/ttm_memory.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 6983a7cf4da4..b199170b3c2c 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -33,6 +33,7 @@ #include #include #include +#include /** * struct ttm_mem_shrink - callback to shrink TTM memory usage. -- cgit v1.2.3 From 884840aa3ce3214259e69557be5b4ce0d781ffa4 Mon Sep 17 00:00:00 2001 From: Jakob Bornecrantz Date: Thu, 3 Dec 2009 23:25:47 +0000 Subject: drm: Add dirty ioctl and property This commit adds a ioctl and property to allow userspace to notify the kernel that a framebuffer has changed. Instead of snooping the command stream this allows finer grained tracking of which areas have changed. The primary user for this functionality is virtual hardware like the vmware svga device, but also Xen hardware likes to be notify. There is also real hardware like DisplayLink and DisplayPort that might take advantage of this ioctl. Signed-off-by: Jakob Bornecrantz Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc.c | 104 +++++++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_drv.c | 1 + include/drm/drm.h | 1 + include/drm/drm_crtc.h | 19 +++++++++ include/drm/drm_mode.h | 44 +++++++++++++++++++ 5 files changed, 169 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 32756e67dd56..4fe321dc900c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) +static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { + { DRM_MODE_DIRTY_OFF, "Off" }, + { DRM_MODE_DIRTY_ON, "On" }, + { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, +}; + +DRM_ENUM_NAME_FN(drm_get_dirty_info_name, + drm_dirty_info_enum_list) + struct drm_conn_prop_enum_list { int type; char *name; @@ -801,6 +810,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev) } EXPORT_SYMBOL(drm_mode_create_dithering_property); +/** + * drm_mode_create_dirty_property - create dirty property + * @dev: DRM device + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + */ +int drm_mode_create_dirty_info_property(struct drm_device *dev) +{ + struct drm_property *dirty_info; + int i; + + if (dev->mode_config.dirty_info_property) + return 0; + + dirty_info = + drm_property_create(dev, DRM_MODE_PROP_ENUM | + DRM_MODE_PROP_IMMUTABLE, + "dirty", + ARRAY_SIZE(drm_dirty_info_enum_list)); + for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++) + drm_property_add_enum(dirty_info, i, + drm_dirty_info_enum_list[i].type, + drm_dirty_info_enum_list[i].name); + dev->mode_config.dirty_info_property = dirty_info; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_dirty_info_property); + /** * drm_mode_config_init - initialize DRM mode_configuration structure * @dev: DRM device @@ -1753,6 +1792,71 @@ out: return ret; } +int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_clip_rect __user *clips_ptr; + struct drm_clip_rect *clips = NULL; + struct drm_mode_fb_dirty_cmd *r = data; + struct drm_mode_object *obj; + struct drm_framebuffer *fb; + unsigned flags; + int num_clips; + int ret = 0; + + mutex_lock(&dev->mode_config.mutex); + obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); + if (!obj) { + DRM_ERROR("invalid framebuffer id\n"); + ret = -EINVAL; + goto out_err1; + } + fb = obj_to_fb(obj); + + num_clips = r->num_clips; + clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; + + if (!num_clips != !clips_ptr) { + ret = -EINVAL; + goto out_err1; + } + + flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; + + /* If userspace annotates copy, clips must come in pairs */ + if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { + ret = -EINVAL; + goto out_err1; + } + + if (num_clips && clips_ptr) { + clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); + if (!clips) { + ret = -ENOMEM; + goto out_err1; + } + + ret = copy_from_user(clips, clips_ptr, + num_clips * sizeof(*clips)); + if (ret) + goto out_err2; + } + + if (fb->funcs->dirty) { + ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips); + } else { + ret = -ENOSYS; + goto out_err2; + } + +out_err2: + kfree(clips); +out_err1: + mutex_unlock(&dev->mode_config.mutex); + return ret; +} + + /** * drm_fb_release - remove and free the FBs on this file * @filp: file * from the ioctl diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index bfaf59b02bda..ff2f1042cb44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -146,6 +146,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/include/drm/drm.h b/include/drm/drm.h index 0114ac94f969..43a35b092f04 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -697,6 +697,7 @@ struct drm_gem_open { #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) +#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) /** * Device specific ioctls should only be in their respective headers diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 938f327a2a3b..219f075d2733 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -242,6 +242,21 @@ struct drm_framebuffer_funcs { int (*create_handle)(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle); + /** + * Optinal callback for the dirty fb ioctl. + * + * Userspace can notify the driver via this callback + * that a area of the framebuffer has changed and should + * be flushed to the display hardware. + * + * See documentation in drm_mode.h for the struct + * drm_mode_fb_dirty_cmd for more information as all + * the semantics and arguments have a one to one mapping + * on this function. + */ + int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags, + unsigned color, struct drm_clip_rect *clips, + unsigned num_clips); }; struct drm_framebuffer { @@ -610,6 +625,7 @@ struct drm_mode_config { /* Optional properties */ struct drm_property *scaling_mode_property; struct drm_property *dithering_mode_property; + struct drm_property *dirty_info_property; }; #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) @@ -718,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats char *formats[]); extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); extern int drm_mode_create_dithering_property(struct drm_device *dev); +extern int drm_mode_create_dirty_info_property(struct drm_device *dev); extern char *drm_get_encoder_name(struct drm_encoder *encoder); extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, @@ -745,6 +762,8 @@ extern int drm_mode_rmfb(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); extern int drm_mode_addmode_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_rmmode_ioctl(struct drm_device *dev, diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 09ca6adf4dd5..43009bc2e757 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -75,6 +75,11 @@ #define DRM_MODE_DITHERING_OFF 0 #define DRM_MODE_DITHERING_ON 1 +/* Dirty info options */ +#define DRM_MODE_DIRTY_OFF 0 +#define DRM_MODE_DIRTY_ON 1 +#define DRM_MODE_DIRTY_ANNOTATE 2 + struct drm_mode_modeinfo { __u32 clock; __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; @@ -222,6 +227,45 @@ struct drm_mode_fb_cmd { __u32 handle; }; +#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 +#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 +#define DRM_MODE_FB_DIRTY_FLAGS 0x03 + +/* + * Mark a region of a framebuffer as dirty. + * + * Some hardware does not automatically update display contents + * as a hardware or software draw to a framebuffer. This ioctl + * allows userspace to tell the kernel and the hardware what + * regions of the framebuffer have changed. + * + * The kernel or hardware is free to update more then just the + * region specified by the clip rects. The kernel or hardware + * may also delay and/or coalesce several calls to dirty into a + * single update. + * + * Userspace may annotate the updates, the annotates are a + * promise made by the caller that the change is either a copy + * of pixels or a fill of a single color in the region specified. + * + * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then + * the number of updated regions are half of num_clips given, + * where the clip rects are paired in src and dst. The width and + * height of each one of the pairs must match. + * + * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller + * promises that the region specified of the clip rects is filled + * completely with a single color as given in the color argument. + */ + +struct drm_mode_fb_dirty_cmd { + __u32 fb_id; + __u32 flags; + __u32 color; + __u32 num_clips; + __u64 clips_ptr; +}; + struct drm_mode_mode_cmd { __u32 connector_id; struct drm_mode_modeinfo mode; -- cgit v1.2.3 From ea028ac92541ac30bf202ed94cb53eec2ea0c9d6 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Fri, 4 Dec 2009 15:55:38 -0500 Subject: nfs41: nfs41: fix state manager deadlock in session reset If the session is reset during state recovery, the state manager thread can sleep on the slot_tbl_waitq causing a deadlock. Add a completion framework to the session. Have the state manager thread set a new session state (NFS4CLNT_SESSION_DRAINING) and wait for the session slot table to drain. Signal the state manager thread in nfs41_sequence_free_slot when the NFS4CLNT_SESSION_DRAINING bit is set and the session is drained. Reported-by: Trond Myklebust Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4proc.c | 26 +++++++++++++++++--------- fs/nfs/nfs4state.c | 15 +++++++++++++++ include/linux/nfs_fs_sb.h | 1 + 4 files changed, 34 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index e9ecd6bf9257..5c7740178a08 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -45,6 +45,7 @@ enum nfs4_client_state { NFS4CLNT_RECLAIM_NOGRACE, NFS4CLNT_DELEGRETURN, NFS4CLNT_SESSION_RESET, + NFS4CLNT_SESSION_DRAINING, }; /* diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fb62919f7f2c..c1bc9cad5e85 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -361,6 +361,16 @@ void nfs41_sequence_free_slot(const struct nfs_client *clp, } nfs4_free_slot(tbl, res->sr_slotid); res->sr_slotid = NFS4_MAX_SLOT_TABLE; + + /* Signal state manager thread if session is drained */ + if (test_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) { + spin_lock(&tbl->slot_tbl_lock); + if (tbl->highest_used_slotid == -1) { + dprintk("%s COMPLETE: Session Drained\n", __func__); + complete(&clp->cl_session->complete); + } + spin_unlock(&tbl->slot_tbl_lock); + } } static void nfs41_sequence_done(struct nfs_client *clp, @@ -457,15 +467,11 @@ static int nfs41_setup_sequence(struct nfs4_session *session, spin_lock(&tbl->slot_tbl_lock); if (test_bit(NFS4CLNT_SESSION_RESET, &session->clp->cl_state)) { - if (tbl->highest_used_slotid != -1) { - rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); - spin_unlock(&tbl->slot_tbl_lock); - dprintk("<-- %s: Session reset: draining\n", __func__); - return -EAGAIN; - } - - /* The slot table is empty; start the reset thread */ - dprintk("%s Session Reset\n", __func__); + /* + * The state manager will wait until the slot table is empty. + * Schedule the reset thread + */ + dprintk("%s Schedule Session Reset\n", __func__); rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); nfs4_schedule_state_manager(session->clp); spin_unlock(&tbl->slot_tbl_lock); @@ -4506,6 +4512,7 @@ static int nfs4_reset_slot_tables(struct nfs4_session *session) 1); if (status) return status; + init_completion(&session->complete); status = nfs4_reset_slot_table(&session->bc_slot_table, session->bc_attrs.max_reqs, @@ -4608,6 +4615,7 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) * nfs_client struct */ clp->cl_cons_state = NFS_CS_SESSION_INITING; + init_completion(&session->complete); tbl = &session->fc_slot_table; spin_lock_init(&tbl->slot_tbl_lock); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f56f6be5e314..3c1433598b60 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1181,8 +1181,23 @@ static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err) static int nfs4_reset_session(struct nfs_client *clp) { + struct nfs4_session *ses = clp->cl_session; + struct nfs4_slot_table *tbl = &ses->fc_slot_table; int status; + INIT_COMPLETION(ses->complete); + spin_lock(&tbl->slot_tbl_lock); + if (tbl->highest_used_slotid != -1) { + set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state); + spin_unlock(&tbl->slot_tbl_lock); + status = wait_for_completion_interruptible(&ses->complete); + clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state); + if (status) /* -ERESTARTSYS */ + goto out; + } else { + spin_unlock(&tbl->slot_tbl_lock); + } + status = nfs4_proc_destroy_session(clp->cl_session); if (status && status != -NFS4ERR_BADSESSION && status != -NFS4ERR_DEADSESSION) { diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 320569eabe3b..34fc6be5bfcf 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -209,6 +209,7 @@ struct nfs4_session { unsigned long session_state; u32 hash_alg; u32 ssv_len; + struct completion complete; /* The fore and back channel */ struct nfs4_channel_attrs fc_attrs; -- cgit v1.2.3 From dc5351784eb36f1fec4efa88e01581be72c0b711 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 25 Nov 2009 21:04:00 +0900 Subject: PCI: portdrv: cleanup service irqs initialization This patch cleans up the service irqs initialization as follows: - Remove 'irq_mode' field in pcie_port_data and related definitions, which is not needed because we can get the same information from 'is_msix', 'is_msi' and 'pin' fields in struct pci_dev. - Change the name of 'vectors' argument of assign_interrupt_mode() to 'irqs' because it holds irq numbers actually. People might confuse it with CPU vector or MSI/MSI-X vector. - Change function name assign_interrupt_mode() to init_service_irqs() becasuse we no longer have 'irq_mode' data structure, and new name is more straightforward (IMO). Signed-off-by: Kenji Kaneshige Signed-off-by: Jesse Barnes --- drivers/pci/pcie/portdrv_core.c | 71 +++++++++++++++++------------------------ include/linux/pcieport_if.h | 7 ---- 2 files changed, 29 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 52c4e6fd6fd4..d208dc2d62fd 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -177,37 +177,32 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) } /** - * assign_interrupt_mode - choose interrupt mode for PCI Express port services - * (INTx, MSI-X, MSI) and set up vectors + * init_service_irqs - initialize irqs for PCI Express port services * @dev: PCI Express port to handle - * @vectors: Array of interrupt vectors to populate + * @irqs: Array of irqs to populate * @mask: Bitmask of port capabilities returned by get_port_device_capability() * * Return value: Interrupt mode associated with the port */ -static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) +static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) { - int irq, interrupt_mode = PCIE_PORT_NO_IRQ; - int i; + int i, irq; /* Try to use MSI-X if supported */ - if (!pcie_port_enable_msix(dev, vectors, mask)) - return PCIE_PORT_MSIX_MODE; - + if (!pcie_port_enable_msix(dev, irqs, mask)) + return 0; /* We're not going to use MSI-X, so try MSI and fall back to INTx */ - if (!pci_enable_msi(dev)) - interrupt_mode = PCIE_PORT_MSI_MODE; + irq = -1; + if (!pci_enable_msi(dev) || dev->pin) + irq = dev->irq; - if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin) - interrupt_mode = PCIE_PORT_INTx_MODE; - - irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1; for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) - vectors[i] = irq; - - vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1; + irqs[i] = irq; + irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; - return interrupt_mode; + if (irq < 0) + return -ENODEV; + return 0; } /** @@ -294,8 +289,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) int pcie_port_device_register(struct pci_dev *dev) { struct pcie_port_data *port_data; - int status, capabilities, irq_mode, i, nr_serv; - int vectors[PCIE_PORT_DEVICE_MAXSERVICES]; + int status, capabilities, i, nr_serv; + int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; capabilities = get_port_device_capability(dev); if (!capabilities) @@ -304,23 +299,19 @@ int pcie_port_device_register(struct pci_dev *dev) port_data = kzalloc(sizeof(*port_data), GFP_KERNEL); if (!port_data) return -ENOMEM; - pci_set_drvdata(dev, port_data); - port_data->port_type = dev->pcie_type; + pci_set_drvdata(dev, port_data); - irq_mode = assign_interrupt_mode(dev, vectors, capabilities); - if (irq_mode == PCIE_PORT_NO_IRQ) { - /* - * Don't use service devices that require interrupts if there is - * no way to generate them. - */ - if (!(capabilities & PCIE_PORT_SERVICE_VC)) { - status = -ENODEV; + /* + * Initialize service irqs. Don't use service devices that + * require interrupts if there is no way to generate them. + */ + status = init_service_irqs(dev, irqs, capabilities); + if (status) { + capabilities &= PCIE_PORT_SERVICE_VC; + if (!capabilities) goto Error; - } - capabilities = PCIE_PORT_SERVICE_VC; } - port_data->port_irq_mode = irq_mode; status = pci_enable_device(dev); if (status) @@ -334,7 +325,7 @@ int pcie_port_device_register(struct pci_dev *dev) if (!(capabilities & service)) continue; - status = pcie_device_init(dev, service, vectors[i]); + status = pcie_device_init(dev, service, irqs[i]); if (!status) nr_serv++; } @@ -418,17 +409,13 @@ void pcie_port_device_remove(struct pci_dev *dev) struct pcie_port_data *port_data = pci_get_drvdata(dev); device_for_each_child(&dev->dev, NULL, remove_iter); - pci_disable_device(dev); - switch (port_data->port_irq_mode) { - case PCIE_PORT_MSIX_MODE: + if (dev->msix_enabled) pci_disable_msix(dev); - break; - case PCIE_PORT_MSI_MODE: + else if (dev->msi_enabled) pci_disable_msi(dev); - break; - } + pci_disable_device(dev); kfree(port_data); } diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index b4c79545330b..ec2e1eb1bacc 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h @@ -25,15 +25,8 @@ #define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ #define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) -/* Root/Upstream/Downstream Port's Interrupt Mode */ -#define PCIE_PORT_NO_IRQ (-1) -#define PCIE_PORT_INTx_MODE 0 -#define PCIE_PORT_MSI_MODE 1 -#define PCIE_PORT_MSIX_MODE 2 - struct pcie_port_data { int port_type; /* Type of the port */ - int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ }; struct pcie_device { -- cgit v1.2.3 From 694f88ef7ada0d99e304f687ba92e268a594358b Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 25 Nov 2009 21:06:15 +0900 Subject: PCI: portdrv: remove unnecessary struct pcie_port_data Remove 'port_type' field in struct pcie_port_data(), because we can get port type information from struct pci_dev. With this change, this patch also does followings: - Remove struct pcie_port_data because it no longer has any field. - Remove portdrv private definitions about port type (PCIE_RC_PORT, PCIE_SW_UPSTREAM_PORT and PCIE_SW_DOWNSTREAM_PORT), and use generic definitions instead. Signed-off-by: Kenji Kaneshige Signed-off-by: Jesse Barnes --- drivers/pci/pcie/aer/aerdrv.c | 2 +- drivers/pci/pcie/aer/aerdrv_core.c | 11 +++++------ drivers/pci/pcie/portdrv_bus.c | 7 ++----- drivers/pci/pcie/portdrv_core.c | 15 +-------------- include/linux/pcieport_if.h | 9 +-------- 5 files changed, 10 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 6d30e795a10d..97a345927b55 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -53,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = { static struct pcie_port_service_driver aerdriver = { .name = "aer", - .port_type = PCIE_RC_PORT, + .port_type = PCI_EXP_TYPE_ROOT_PORT, .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 5391a9b412e5..2fbbcee033a7 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -123,9 +123,9 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data) { bool enable = *((bool *)data); - if (dev->pcie_type == PCIE_RC_PORT || - dev->pcie_type == PCIE_SW_UPSTREAM_PORT || - dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) { + if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || + (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || + (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { if (enable) pci_enable_pcie_error_reporting(dev); else @@ -437,10 +437,9 @@ static int find_aer_service_iter(struct device *device, void *data) result = (struct find_aer_service_data *) data; if (device->bus == &pcie_port_bus_type) { - struct pcie_port_data *port_data; + struct pcie_device *pcie = to_pcie_device(device); - port_data = pci_get_drvdata(to_pcie_device(device)->port); - if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT) + if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) result->is_downstream = 1; driver = device->driver; diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index ef3a4eeaebb4..18bf90f748f6 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c @@ -26,7 +26,6 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type); static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) { struct pcie_device *pciedev; - struct pcie_port_data *port_data; struct pcie_port_service_driver *driver; if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) @@ -38,10 +37,8 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) if (driver->service != pciedev->service) return 0; - port_data = pci_get_drvdata(pciedev->port); - - if (driver->port_type != PCIE_ANY_PORT - && driver->port_type != port_data->port_type) + if ((driver->port_type != PCIE_ANY_PORT) && + (driver->port_type != pciedev->port->pcie_type)) return 0; return 1; diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 758e3d339287..9318b96bb255 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -296,7 +296,6 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) */ int pcie_port_device_register(struct pci_dev *dev) { - struct pcie_port_data *port_data; int status, capabilities, i, nr_service; int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; @@ -305,17 +304,10 @@ int pcie_port_device_register(struct pci_dev *dev) if (!capabilities) return -ENODEV; - /* Allocate driver data for port device */ - port_data = kzalloc(sizeof(*port_data), GFP_KERNEL); - if (!port_data) - return -ENOMEM; - port_data->port_type = dev->pcie_type; - pci_set_drvdata(dev, port_data); - /* Enable PCI Express port device */ status = pci_enable_device(dev); if (status) - goto error_kfree; + return status; pci_set_master(dev); /* * Initialize service irqs. Don't use service devices that @@ -347,8 +339,6 @@ error_cleanup_irqs: cleanup_service_irqs(dev); error_disable: pci_disable_device(dev); -error_kfree: - kfree(port_data); return status; } @@ -416,12 +406,9 @@ static int remove_iter(struct device *dev, void *data) */ void pcie_port_device_remove(struct pci_dev *dev) { - struct pcie_port_data *port_data = pci_get_drvdata(dev); - device_for_each_child(&dev->dev, NULL, remove_iter); cleanup_service_irqs(dev); pci_disable_device(dev); - kfree(port_data); } /** diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index ec2e1eb1bacc..6775532b92a9 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h @@ -10,10 +10,7 @@ #define _PCIEPORT_IF_H_ /* Port Type */ -#define PCIE_RC_PORT 4 /* Root port of RC */ -#define PCIE_SW_UPSTREAM_PORT 5 /* Upstream port of Switch */ -#define PCIE_SW_DOWNSTREAM_PORT 6 /* Downstream port of Switch */ -#define PCIE_ANY_PORT 7 +#define PCIE_ANY_PORT (~0) /* Service Type */ #define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ @@ -25,10 +22,6 @@ #define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ #define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) -struct pcie_port_data { - int port_type; /* Type of the port */ -}; - struct pcie_device { int irq; /* Service IRQ/MSI/MSI-X Vector */ struct pci_dev *port; /* Root/Upstream/Downstream Port */ -- cgit v1.2.3 From 5d990b627537e59a3a2f039ff588a4750e9c1a6a Mon Sep 17 00:00:00 2001 From: Chris Wright Date: Fri, 4 Dec 2009 12:15:21 -0800 Subject: PCI: add pci_request_acs Commit ae21ee65e8bc228416bbcc8a1da01c56a847a60c "PCI: acs p2p upsteram forwarding enabling" doesn't actually enable ACS. Add a function to pci core to allow an IOMMU to request that ACS be enabled. The existing mechanism of using iommu_found() in the pci core to know when ACS should be enabled doesn't actually work due to initialization order; iommu has only been detected not initialized. Have Intel and AMD IOMMUs request ACS, and Xen does as well during early init of dom0. Cc: Allen Kay Cc: David Woodhouse Cc: Jeremy Fitzhardinge Cc: Joerg Roedel Signed-off-by: Chris Wright Signed-off-by: Jesse Barnes --- arch/x86/kernel/amd_iommu_init.c | 2 ++ arch/x86/xen/enlighten.c | 5 +++++ drivers/pci/dmar.c | 5 ++++- drivers/pci/pci.c | 13 +++++++++++++ drivers/pci/probe.c | 5 +---- include/linux/pci.h | 2 ++ 6 files changed, 27 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index b4b61d462dcc..e60530a5f524 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -1330,6 +1330,8 @@ void __init amd_iommu_detect(void) gart_iommu_aperture_disabled = 1; gart_iommu_aperture = 0; #endif + /* Make sure ACS will be enabled */ + pci_request_acs(); } } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 5bccd706232c..e2511bccbc8d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -1170,7 +1171,11 @@ asmlinkage void __init xen_start_kernel(void) add_preferred_console("xenboot", 0, NULL); add_preferred_console("tty", 0, NULL); add_preferred_console("hvc", 0, NULL); + } else { + /* Make sure ACS will be enabled */ + pci_request_acs(); } + xen_raw_console_write("about to get started...\n"); diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index e01ca4d6b3e6..0e98f6b6f515 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -614,8 +614,11 @@ void __init detect_intel_iommu(void) #endif #ifdef CONFIG_DMAR if (ret && !no_iommu && !iommu_detected && !swiotlb && - !dmar_disabled) + !dmar_disabled) { iommu_detected = 1; + /* Make sure ACS will be enabled */ + pci_request_acs(); + } #endif } early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6af212c509c5..cd9b375f49d5 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1550,6 +1550,16 @@ void pci_enable_ari(struct pci_dev *dev) bridge->ari_enabled = 1; } +static int pci_acs_enable; + +/** + * pci_request_acs - ask for ACS to be enabled if supported + */ +void pci_request_acs(void) +{ + pci_acs_enable = 1; +} + /** * pci_enable_acs - enable ACS if hardware support it * @dev: the PCI device @@ -1560,6 +1570,9 @@ void pci_enable_acs(struct pci_dev *dev) u16 cap; u16 ctrl; + if (!pci_acs_enable) + return; + if (!pci_is_pcie(dev)) return; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2fdffc02a308..98ffb2de22e9 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -10,9 +10,7 @@ #include #include #include -#include #include -#include #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ @@ -1029,8 +1027,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_iov_init(dev); /* Enable ACS P2P upstream forwarding */ - if (iommu_found() || xen_initial_domain()) - pci_enable_acs(dev); + pci_enable_acs(dev); } void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) diff --git a/include/linux/pci.h b/include/linux/pci.h index 2891c3d3e51a..04771b9c3316 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1328,5 +1328,7 @@ static inline bool pci_is_pcie(struct pci_dev *dev) return !!pci_pcie_cap(dev); } +void pci_request_acs(void); + #endif /* __KERNEL__ */ #endif /* LINUX_PCI_H */ -- cgit v1.2.3 From 0629e370dd5819efa5cf8d418a8e6729efe388ef Mon Sep 17 00:00:00 2001 From: Alexandros Batsakis Date: Sat, 5 Dec 2009 13:46:14 -0500 Subject: nfs41: check SEQUENCE status flag the server can indicate a number of error conditions by setting the appropriate bits in the SEQUENCE operation. The client re-establishes state with the server when it receives one of those, with the action depending on the specific case. Signed-off-by: Alexandros Batsakis Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4proc.c | 2 ++ fs/nfs/nfs4state.c | 22 ++++++++++++++++++++++ fs/nfs/nfs4xdr.c | 4 ++-- include/linux/nfs4.h | 2 ++ include/linux/nfs_xdr.h | 1 + 6 files changed, 30 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 0d25a82451d9..9a866368896a 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -270,6 +270,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_schedule_state_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); +extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d897b9e34f12..71993f122214 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -405,6 +405,8 @@ static void nfs41_sequence_done(struct nfs_client *clp, if (time_before(clp->cl_last_renewal, timestamp)) clp->cl_last_renewal = timestamp; spin_unlock(&clp->cl_lock); + /* Check sequence flags */ + nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); return; } out: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 46c69e2248e3..a86f3acf3212 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1173,6 +1173,28 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) } #ifdef CONFIG_NFS_V4_1 +void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) +{ + if (!flags) + return; + else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) { + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + nfs4_state_start_reclaim_reboot(clp); + nfs4_schedule_state_recovery(clp); + } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | + SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | + SEQ4_STATUS_ADMIN_STATE_REVOKED | + SEQ4_STATUS_RECALLABLE_STATE_REVOKED | + SEQ4_STATUS_LEASE_MOVED)) { + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + nfs4_state_start_reclaim_nograce(clp); + nfs4_schedule_state_recovery(clp); + } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | + SEQ4_STATUS_BACKCHANNEL_FAULT | + SEQ4_STATUS_CB_PATH_DOWN_SESSION)) + nfs_expire_all_delegations(clp); +} + static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err) { switch (err) { diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4ddd04a1113e..f740472370aa 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4614,8 +4614,8 @@ static int decode_sequence(struct xdr_stream *xdr, dummy = be32_to_cpup(p++); /* target highest slot id - currently not processed */ dummy = be32_to_cpup(p++); - /* result flags - currently not processed */ - dummy = be32_to_cpup(p); + /* result flags */ + res->sr_status_flags = be32_to_cpup(p); status = 0; out_err: res->sr_status = status; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index c4c060208109..89404bfb72b6 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -128,6 +128,8 @@ #define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040 #define SEQ4_STATUS_LEASE_MOVED 0x00000080 #define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100 +#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200 +#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400 #define NFS4_MAX_UINT64 (~(u64)0) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 8c880372536e..ea32db30c289 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -172,6 +172,7 @@ struct nfs4_sequence_res { u8 sr_slotid; /* slot used to send request */ int sr_status; /* sequence operation status */ unsigned long sr_renewal_time; + u32 sr_status_flags; }; struct nfs4_get_lease_time_args { -- cgit v1.2.3 From 180197536b15d5862b389ce90b46ec8d004056f6 Mon Sep 17 00:00:00 2001 From: Ricardo Labiaga Date: Sat, 5 Dec 2009 16:08:40 -0500 Subject: nfs41: RECLAIM_COMPLETE XDR functionality XDR encoding and decoding for RECLAIM_COMPLETE. Implements the necessary encoding to indicate reclaim complete for the entire client. In the future, it can be extended to provide reclaim complete functionality for a single file system after migration. Signed-off-by: Ricardo Labiaga Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/nfs4.h | 1 + include/linux/nfs_xdr.h | 10 ++++++++ 3 files changed, 77 insertions(+) (limited to 'include') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index bc8711d30296..e437fd6a819f 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -301,6 +301,8 @@ static int nfs4_stat_to_errno(int); XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4) #define decode_sequence_maxsz (op_decode_hdr_maxsz + \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) +#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4) +#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4) #else /* CONFIG_NFS_V4_1 */ #define encode_sequence_maxsz 0 #define decode_sequence_maxsz 0 @@ -678,6 +680,12 @@ static int nfs4_stat_to_errno(int); decode_sequence_maxsz + \ decode_putrootfh_maxsz + \ decode_fsinfo_maxsz) +#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_reclaim_complete_maxsz) +#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_reclaim_complete_maxsz) const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH + compound_encode_hdr_maxsz + @@ -1623,6 +1631,19 @@ static void encode_destroy_session(struct xdr_stream *xdr, hdr->nops++; hdr->replen += decode_destroy_session_maxsz; } + +static void encode_reclaim_complete(struct xdr_stream *xdr, + struct nfs41_reclaim_complete_args *args, + struct compound_hdr *hdr) +{ + __be32 *p; + + p = reserve_space(xdr, 8); + *p++ = cpu_to_be32(OP_RECLAIM_COMPLETE); + *p++ = cpu_to_be32(args->one_fs); + hdr->nops++; + hdr->replen += decode_reclaim_complete_maxsz; +} #endif /* CONFIG_NFS_V4_1 */ static void encode_sequence(struct xdr_stream *xdr, @@ -2451,6 +2472,26 @@ static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p, encode_nops(&hdr); return 0; } + +/* + * a RECLAIM_COMPLETE request + */ +static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p, + struct nfs41_reclaim_complete_args *args) +{ + struct xdr_stream xdr; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args) + }; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_compound_hdr(&xdr, req, &hdr); + encode_sequence(&xdr, &args->seq_args, &hdr); + encode_reclaim_complete(&xdr, args, &hdr); + encode_nops(&hdr); + return 0; +} + #endif /* CONFIG_NFS_V4_1 */ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) @@ -4559,6 +4600,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy) { return decode_op_hdr(xdr, OP_DESTROY_SESSION); } + +static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy) +{ + return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE); +} #endif /* CONFIG_NFS_V4_1 */ static int decode_sequence(struct xdr_stream *xdr, @@ -5622,6 +5668,25 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p, status = decode_fsinfo(&xdr, res->lr_fsinfo); return status; } + +/* + * Decode RECLAIM_COMPLETE response + */ +static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p, + struct nfs41_reclaim_complete_res *res) +{ + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (!status) + status = decode_sequence(&xdr, &res->seq_res, rqstp); + if (!status) + status = decode_reclaim_complete(&xdr, (void *)NULL); + return status; +} #endif /* CONFIG_NFS_V4_1 */ __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus) @@ -5798,6 +5863,7 @@ struct rpc_procinfo nfs4_procedures[] = { PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), PROC(SEQUENCE, enc_sequence, dec_sequence), PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), + PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), #endif /* CONFIG_NFS_V4_1 */ }; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 89404bfb72b6..9b8299af3741 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -530,6 +530,7 @@ enum { NFSPROC4_CLNT_DESTROY_SESSION, NFSPROC4_CLNT_SEQUENCE, NFSPROC4_CLNT_GET_LEASE_TIME, + NFSPROC4_CLNT_RECLAIM_COMPLETE, }; /* nfs41 types */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index ea32db30c289..51071b335751 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -939,6 +939,16 @@ struct nfs41_create_session_args { struct nfs41_create_session_res { struct nfs_client *client; }; + +struct nfs41_reclaim_complete_args { + /* In the future extend to include curr_fh for use with migration */ + unsigned char one_fs:1; + struct nfs4_sequence_args seq_args; +}; + +struct nfs41_reclaim_complete_res { + struct nfs4_sequence_res seq_res; +}; #endif /* CONFIG_NFS_V4_1 */ struct nfs_page; -- cgit v1.2.3 From ed54d0f98000ee03310150aa396e9ff8bcb394ce Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 3 Dec 2009 22:08:26 +0100 Subject: hw-breakpoints: Add two reserved fields for future extensions Add two reserved fields for future extensions in the hardware breakpoints interface. Further needs may arise. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Benjamin Herrenschmidt Cc: "K. Prasad" --- include/linux/perf_event.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 43adbd7f0010..a61e4de3448b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -220,6 +220,8 @@ struct perf_event_attr { __u64 bp_addr; __u32 bp_type; __u32 bp_len; + __u64 __bp_reserved_1; + __u64 __bp_reserved_2; }; }; -- cgit v1.2.3 From 189f202ed197dc25d627e8660de27ece325e9f68 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 3 Dec 2009 23:16:07 +0100 Subject: perf: Remove pointless union that wraps the hw breakpoint fields It stands to anonymize a structure, but structures can already anonymize by themselves. Reported-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: "K. Prasad" --- include/linux/perf_event.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a61e4de3448b..53230e99e9ea 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -215,14 +215,12 @@ struct perf_event_attr { __u32 wakeup_watermark; /* bytes before wakeup */ }; - union { - struct { /* Hardware breakpoint info */ - __u64 bp_addr; - __u32 bp_type; - __u32 bp_len; - __u64 __bp_reserved_1; - __u64 __bp_reserved_2; - }; + struct { /* Hardware breakpoint info */ + __u64 bp_addr; + __u32 bp_type; + __u32 bp_len; + __u64 __bp_reserved_1; + __u64 __bp_reserved_2; }; __u32 __reserved_2; -- cgit v1.2.3 From 9cef30815b0f5b76e94a58d7674fcbf824d95579 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 3 Dec 2009 23:59:31 +0100 Subject: perf: Remove unused struct perf_event::event_callback This field might result from an older manual rebasing mistake. We don't use it. Reported-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: "K. Prasad" --- include/linux/perf_event.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 53230e99e9ea..84bd28a0ffab 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -670,8 +670,6 @@ struct perf_event { perf_callback_t callback; - perf_callback_t event_callback; - #endif /* CONFIG_PERF_EVENTS */ }; -- cgit v1.2.3 From 2f0993e0fb663c49e4d1e02654f6203246be4817 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 5 Dec 2009 07:06:10 +0100 Subject: hw-breakpoints: Drop callback and task parameters from modify helper Drop the callback and task parameters from modify_user_hw_breakpoint(). For now we have no user that need to modify a breakpoint to the point of changing its handler or its task context. Signed-off-by: Frederic Weisbecker Cc: "K. Prasad" --- arch/x86/kernel/ptrace.c | 4 ++-- include/linux/hw_breakpoint.h | 9 ++------- kernel/hw_breakpoint.c | 7 +++---- 3 files changed, 7 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 04d182a7cfdb..dbb395572ae2 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -618,7 +618,7 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, attr.bp_type = gen_type; attr.disabled = disabled; - return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); + return modify_user_hw_breakpoint(bp, &attr); } /* @@ -740,7 +740,7 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, attr = bp->attr; attr.bp_addr = addr; - bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); + bp = modify_user_hw_breakpoint(bp, &attr); } /* * CHECKME: the previous code returned -EIO if the addr wasn't a diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index a03daed08c59..d33096e0dbd4 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -57,10 +57,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, /* FIXME: only change from the attr, and don't unregister */ extern struct perf_event * -modify_user_hw_breakpoint(struct perf_event *bp, - struct perf_event_attr *attr, - perf_callback_t triggered, - struct task_struct *tsk); +modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); /* * Kernel breakpoints are not associated with any particular thread. @@ -97,9 +94,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, struct task_struct *tsk) { return NULL; } static inline struct perf_event * modify_user_hw_breakpoint(struct perf_event *bp, - struct perf_event_attr *attr, - perf_callback_t triggered, - struct task_struct *tsk) { return NULL; } + struct perf_event_attr *attr) { return NULL; } static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_callback_t triggered, diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index cf5ee1628411..2d10b012828f 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -312,9 +312,7 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); * @tsk: pointer to 'task_struct' of the process to which the address belongs */ struct perf_event * -modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, - perf_callback_t triggered, - struct task_struct *tsk) +modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { /* * FIXME: do it without unregistering @@ -323,7 +321,8 @@ modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, */ unregister_hw_breakpoint(bp); - return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); + return perf_event_create_kernel_counter(attr, -1, bp->ctx->task->pid, + bp->callback); } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); -- cgit v1.2.3 From b326e9560a28fc3e950637ef51847ed8f05c1335 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 5 Dec 2009 09:44:31 +0100 Subject: hw-breakpoints: Use overflow handler instead of the event callback struct perf_event::event callback was called when a breakpoint triggers. But this is a rather opaque callback, pretty tied-only to the breakpoint API and not really integrated into perf as it triggers even when we don't overflow. We prefer to use overflow_handler() as it fits into the perf events rules, being called only when we overflow. Reported-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: "K. Prasad" --- arch/x86/kernel/hw_breakpoint.c | 5 ++--- arch/x86/kernel/ptrace.c | 9 ++++++--- include/linux/hw_breakpoint.h | 25 +++++++++++-------------- include/linux/perf_event.h | 13 +++++++------ kernel/hw_breakpoint.c | 17 +++++------------ kernel/perf_event.c | 24 +++++++++--------------- kernel/trace/trace_ksym.c | 5 +++-- samples/hw_breakpoint/data_breakpoint.c | 7 +++++-- 8 files changed, 48 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index d42f65ac4927..05d5fec64a94 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -362,8 +362,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, return ret; } - if (bp->callback) - ret = arch_store_info(bp); + ret = arch_store_info(bp); if (ret < 0) return ret; @@ -519,7 +518,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) break; } - (bp->callback)(bp, args->regs); + perf_bp_event(bp, args->regs); rcu_read_unlock(); } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index dbb395572ae2..b361d28061d0 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -555,7 +555,9 @@ static int genregs_set(struct task_struct *target, return ret; } -static void ptrace_triggered(struct perf_event *bp, void *data) +static void ptrace_triggered(struct perf_event *bp, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) { int i; struct thread_struct *thread = &(current->thread); @@ -599,7 +601,7 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, { int err; int gen_len, gen_type; - DEFINE_BREAKPOINT_ATTR(attr); + struct perf_event_attr attr; /* * We shoud have at least an inactive breakpoint at this @@ -721,9 +723,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, { struct perf_event *bp; struct thread_struct *t = &tsk->thread; - DEFINE_BREAKPOINT_ATTR(attr); + struct perf_event_attr attr; if (!t->ptrace_bps[nr]) { + hw_breakpoint_init(&attr); /* * Put stub len and type to register (reserve) an inactive but * correct bp diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index d33096e0dbd4..4d14a384a01e 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -20,19 +20,16 @@ enum { #ifdef CONFIG_HAVE_HW_BREAKPOINT -/* As it's for in-kernel or ptrace use, we want it to be pinned */ -#define DEFINE_BREAKPOINT_ATTR(name) \ -struct perf_event_attr name = { \ - .type = PERF_TYPE_BREAKPOINT, \ - .size = sizeof(name), \ - .pinned = 1, \ -}; - static inline void hw_breakpoint_init(struct perf_event_attr *attr) { attr->type = PERF_TYPE_BREAKPOINT; attr->size = sizeof(*attr); + /* + * As it's for in-kernel or ptrace use, we want it to be pinned + * and to call its callback every hits. + */ attr->pinned = 1; + attr->sample_period = 1; } static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) @@ -52,7 +49,7 @@ static inline int hw_breakpoint_len(struct perf_event *bp) extern struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, - perf_callback_t triggered, + perf_overflow_handler_t triggered, struct task_struct *tsk); /* FIXME: only change from the attr, and don't unregister */ @@ -64,12 +61,12 @@ modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); */ extern struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, - perf_callback_t triggered, + perf_overflow_handler_t triggered, int cpu); extern struct perf_event ** register_wide_hw_breakpoint(struct perf_event_attr *attr, - perf_callback_t triggered); + perf_overflow_handler_t triggered); extern int register_perf_hw_breakpoint(struct perf_event *bp); extern int __register_perf_hw_breakpoint(struct perf_event *bp); @@ -90,18 +87,18 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) static inline struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, - perf_callback_t triggered, + perf_overflow_handler_t triggered, struct task_struct *tsk) { return NULL; } static inline struct perf_event * modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { return NULL; } static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, - perf_callback_t triggered, + perf_overflow_handler_t triggered, int cpu) { return NULL; } static inline struct perf_event ** register_wide_hw_breakpoint(struct perf_event_attr *attr, - perf_callback_t triggered) { return NULL; } + perf_overflow_handler_t triggered) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline int diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 84bd28a0ffab..d2f2667430da 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -565,10 +565,13 @@ struct perf_pending_entry { void (*func)(struct perf_pending_entry *); }; -typedef void (*perf_callback_t)(struct perf_event *, void *); - struct perf_sample_data; +typedef void (*perf_callback_t)(struct perf_event *, void *); +typedef void (*perf_overflow_handler_t)(struct perf_event *, int, + struct perf_sample_data *, + struct pt_regs *regs); + /** * struct perf_event - performance event kernel representation: */ @@ -660,9 +663,7 @@ struct perf_event { struct pid_namespace *ns; u64 id; - void (*overflow_handler)(struct perf_event *event, - int nmi, struct perf_sample_data *data, - struct pt_regs *regs); + perf_overflow_handler_t overflow_handler; #ifdef CONFIG_EVENT_PROFILE struct event_filter *filter; @@ -779,7 +780,7 @@ extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, pid_t pid, - perf_callback_t callback); + perf_overflow_handler_t callback); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 2d10b012828f..b600fc27f161 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -259,7 +259,7 @@ void release_bp_slot(struct perf_event *bp) } -int __register_perf_hw_breakpoint(struct perf_event *bp) +int register_perf_hw_breakpoint(struct perf_event *bp) { int ret; @@ -276,19 +276,12 @@ int __register_perf_hw_breakpoint(struct perf_event *bp) * This is a quick hack that will be removed soon, once we remove * the tmp breakpoints from ptrace */ - if (!bp->attr.disabled || bp->callback == perf_bp_event) + if (!bp->attr.disabled || !bp->overflow_handler) ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); return ret; } -int register_perf_hw_breakpoint(struct perf_event *bp) -{ - bp->callback = perf_bp_event; - - return __register_perf_hw_breakpoint(bp); -} - /** * register_user_hw_breakpoint - register a hardware breakpoint for user space * @attr: breakpoint attributes @@ -297,7 +290,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp) */ struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, - perf_callback_t triggered, + perf_overflow_handler_t triggered, struct task_struct *tsk) { return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); @@ -322,7 +315,7 @@ modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) unregister_hw_breakpoint(bp); return perf_event_create_kernel_counter(attr, -1, bp->ctx->task->pid, - bp->callback); + bp->overflow_handler); } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); @@ -347,7 +340,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); */ struct perf_event ** register_wide_hw_breakpoint(struct perf_event_attr *attr, - perf_callback_t triggered) + perf_overflow_handler_t triggered) { struct perf_event **cpu_events, **pevent, *bp; long err; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 6b7ddba1dd64..fd43ff4ac860 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4286,15 +4286,8 @@ static void bp_perf_event_destroy(struct perf_event *event) static const struct pmu *bp_perf_event_init(struct perf_event *bp) { int err; - /* - * The breakpoint is already filled if we haven't created the counter - * through perf syscall - * FIXME: manage to get trigerred to NULL if it comes from syscalls - */ - if (!bp->callback) - err = register_perf_hw_breakpoint(bp); - else - err = __register_perf_hw_breakpoint(bp); + + err = register_perf_hw_breakpoint(bp); if (err) return ERR_PTR(err); @@ -4390,7 +4383,7 @@ perf_event_alloc(struct perf_event_attr *attr, struct perf_event_context *ctx, struct perf_event *group_leader, struct perf_event *parent_event, - perf_callback_t callback, + perf_overflow_handler_t overflow_handler, gfp_t gfpflags) { const struct pmu *pmu; @@ -4433,10 +4426,10 @@ perf_event_alloc(struct perf_event_attr *attr, event->state = PERF_EVENT_STATE_INACTIVE; - if (!callback && parent_event) - callback = parent_event->callback; + if (!overflow_handler && parent_event) + overflow_handler = parent_event->overflow_handler; - event->callback = callback; + event->overflow_handler = overflow_handler; if (attr->disabled) event->state = PERF_EVENT_STATE_OFF; @@ -4776,7 +4769,8 @@ err_put_context: */ struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, - pid_t pid, perf_callback_t callback) + pid_t pid, + perf_overflow_handler_t overflow_handler) { struct perf_event *event; struct perf_event_context *ctx; @@ -4793,7 +4787,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, } event = perf_event_alloc(attr, cpu, ctx, NULL, - NULL, callback, GFP_KERNEL); + NULL, overflow_handler, GFP_KERNEL); if (IS_ERR(event)) { err = PTR_ERR(event); goto err_put_context; diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index ddfa0fd43bc0..acb87d4a4ac1 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -79,11 +79,12 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) } #endif /* CONFIG_PROFILE_KSYM_TRACER */ -void ksym_hbp_handler(struct perf_event *hbp, void *data) +void ksym_hbp_handler(struct perf_event *hbp, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) { struct ring_buffer_event *event; struct ksym_trace_entry *entry; - struct pt_regs *regs = data; struct ring_buffer *buffer; int pc; diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c index 29525500df00..c69cbe9b2426 100644 --- a/samples/hw_breakpoint/data_breakpoint.c +++ b/samples/hw_breakpoint/data_breakpoint.c @@ -41,7 +41,9 @@ module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO); MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" " write operations on the kernel symbol"); -static void sample_hbp_handler(struct perf_event *temp, void *data) +static void sample_hbp_handler(struct perf_event *bp, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) { printk(KERN_INFO "%s value is changed\n", ksym_name); dump_stack(); @@ -51,8 +53,9 @@ static void sample_hbp_handler(struct perf_event *temp, void *data) static int __init hw_break_module_init(void) { int ret; - DEFINE_BREAKPOINT_ATTR(attr); + struct perf_event_attr attr; + hw_breakpoint_init(&attr); attr.bp_addr = kallsyms_lookup_name(ksym_name); attr.bp_len = HW_BREAKPOINT_LEN_4; attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; -- cgit v1.2.3 From c0dfb2feb632537cf0a9d2ce3c29bcf5778fec59 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 5 Dec 2009 09:53:28 +0100 Subject: perf: Remove the "event" callback from perf events As it is not used anymore and has been superseded by overflow_handler. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: "K. Prasad" --- include/linux/perf_event.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d2f2667430da..89098e35a036 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -567,7 +567,6 @@ struct perf_pending_entry { struct perf_sample_data; -typedef void (*perf_callback_t)(struct perf_event *, void *); typedef void (*perf_overflow_handler_t)(struct perf_event *, int, struct perf_sample_data *, struct pt_regs *regs); @@ -669,8 +668,6 @@ struct perf_event { struct event_filter *filter; #endif - perf_callback_t callback; - #endif /* CONFIG_PERF_EVENTS */ }; -- cgit v1.2.3 From 6ad4c18884e864cf4c77f9074d3d1816063f99cd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Nov 2009 13:31:39 +0100 Subject: sched: Fix balance vs hotplug race Since (e761b77: cpu hotplug, sched: Introduce cpu_active_map and redo sched domain managment) we have cpu_active_mask which is suppose to rule scheduler migration and load-balancing, except it never (fully) did. The particular problem being solved here is a crash in try_to_wake_up() where select_task_rq() ends up selecting an offline cpu because select_task_rq_fair() trusts the sched_domain tree to reflect the current state of affairs, similarly select_task_rq_rt() trusts the root_domain. However, the sched_domains are updated from CPU_DEAD, which is after the cpu is taken offline and after stop_machine is done. Therefore it can race perfectly well with code assuming the domains are right. Cure this by building the domains from cpu_active_mask on CPU_DOWN_PREPARE. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/cpumask.h | 2 ++ kernel/cpu.c | 18 +++++++++++++----- kernel/cpuset.c | 16 +++++++++------- kernel/sched.c | 32 +++++++++++++++++--------------- 4 files changed, 41 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 789cf5f920ce..d77b54733c5b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask; #define num_online_cpus() cpumask_weight(cpu_online_mask) #define num_possible_cpus() cpumask_weight(cpu_possible_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask) +#define num_active_cpus() cpumask_weight(cpu_active_mask) #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) @@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask; #define num_online_cpus() 1 #define num_possible_cpus() 1 #define num_present_cpus() 1 +#define num_active_cpus() 1 #define cpu_online(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0) diff --git a/kernel/cpu.c b/kernel/cpu.c index 6ba0f1ecb212..b21688640377 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { + set_cpu_active(cpu, true); + nr_calls--; __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); @@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) /* Ensure that we are not runnable on dying cpu */ cpumask_copy(old_allowed, ¤t->cpus_allowed); - set_cpus_allowed_ptr(current, - cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); + set_cpus_allowed_ptr(current, cpu_active_mask); err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { + set_cpu_active(cpu, true); /* CPU didn't die: tell everyone. Can't complain. */ if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, hcpu) == NOTIFY_BAD) @@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu) err = _cpu_down(cpu, 0); - if (cpu_online(cpu)) - set_cpu_active(cpu, true); - out: cpu_maps_update_done(); stop_machine_destroy(); @@ -387,6 +386,15 @@ int disable_nonboot_cpus(void) * with the userspace trying to use the CPU hotplug at the same time */ cpumask_clear(frozen_cpus); + + for_each_online_cpu(cpu) { + if (cpu == first_cpu) + continue; + set_cpu_active(cpu, false); + } + + synchronize_sched(); + printk("Disabling non-boot CPUs ...\n"); for_each_online_cpu(cpu) { if (cpu == first_cpu) diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 43fb7e800028..ba401fab459f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -872,7 +872,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) return retval; - if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask)) + if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask)) return -EINVAL; } retval = validate_change(cs, trialcs); @@ -2010,7 +2010,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) } /* Continue past cpusets with all cpus, mems online */ - if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) && + if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) && nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) continue; @@ -2019,7 +2019,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) /* Remove offline cpus and mems from this cpuset. */ mutex_lock(&callback_mutex); cpumask_and(cp->cpus_allowed, cp->cpus_allowed, - cpu_online_mask); + cpu_active_mask); nodes_and(cp->mems_allowed, cp->mems_allowed, node_states[N_HIGH_MEMORY]); mutex_unlock(&callback_mutex); @@ -2057,8 +2057,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, switch (phase) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: break; default: @@ -2067,7 +2069,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, cgroup_lock(); mutex_lock(&callback_mutex); - cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); + cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); mutex_unlock(&callback_mutex); scan_for_empty_cpusets(&top_cpuset); ndoms = generate_sched_domains(&doms, &attr); @@ -2114,7 +2116,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self, void __init cpuset_init_smp(void) { - cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); + cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; hotcpu_notifier(cpuset_track_online_cpus, 0); diff --git a/kernel/sched.c b/kernel/sched.c index aa31244caa9f..281da29d0801 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4134,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, unsigned long flags; struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); - cpumask_copy(cpus, cpu_online_mask); + cpumask_copy(cpus, cpu_active_mask); /* * When power savings policy is enabled for the parent domain, idle @@ -4297,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) int all_pinned = 0; struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); - cpumask_copy(cpus, cpu_online_mask); + cpumask_copy(cpus, cpu_active_mask); /* * When power savings policy is enabled for the parent domain, idle @@ -4694,7 +4694,7 @@ int select_nohz_load_balancer(int stop_tick) cpumask_set_cpu(cpu, nohz.cpu_mask); /* time for ilb owner also to sleep */ - if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { + if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { if (atomic_read(&nohz.load_balancer) == cpu) atomic_set(&nohz.load_balancer, -1); return 0; @@ -7093,7 +7093,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) int ret = 0; rq = task_rq_lock(p, &flags); - if (!cpumask_intersects(new_mask, cpu_online_mask)) { + if (!cpumask_intersects(new_mask, cpu_active_mask)) { ret = -EINVAL; goto out; } @@ -7115,7 +7115,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; - if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { + if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { /* Need help from migration thread: drop lock and wait. */ struct task_struct *mt = rq->migration_thread; @@ -7269,19 +7269,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) again: /* Look for allowed, online CPU in same node. */ - for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) + for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) goto move; /* Any allowed, online CPU? */ - dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); + dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); if (dest_cpu < nr_cpu_ids) goto move; /* No more Mr. Nice Guy. */ if (dest_cpu >= nr_cpu_ids) { cpuset_cpus_allowed_locked(p, &p->cpus_allowed); - dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); + dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); /* * Don't tell them about moving exiting tasks or @@ -7310,7 +7310,7 @@ move: */ static void migrate_nr_uninterruptible(struct rq *rq_src) { - struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); + struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); unsigned long flags; local_irq_save(flags); @@ -7564,7 +7564,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) static struct ctl_table_header *sd_sysctl_header; static void register_sched_domain_sysctl(void) { - int i, cpu_num = num_online_cpus(); + int i, cpu_num = num_possible_cpus(); struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); char buf[32]; @@ -7574,7 +7574,7 @@ static void register_sched_domain_sysctl(void) if (entry == NULL) return; - for_each_online_cpu(i) { + for_each_possible_cpu(i) { snprintf(buf, 32, "cpu%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); entry->mode = 0555; @@ -9100,7 +9100,7 @@ match1: if (doms_new == NULL) { ndoms_cur = 0; doms_new = &fallback_doms; - cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map); + cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); WARN_ON_ONCE(dattr_new); } @@ -9231,8 +9231,10 @@ static int update_sched_domains(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: partition_sched_domains(1, NULL, NULL); return NOTIFY_OK; @@ -9279,7 +9281,7 @@ void __init sched_init_smp(void) #endif get_online_cpus(); mutex_lock(&sched_domains_mutex); - arch_init_sched_domains(cpu_online_mask); + arch_init_sched_domains(cpu_active_mask); cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); if (cpumask_empty(non_isolated_cpus)) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); -- cgit v1.2.3 From 88071539a3f5195f9e9dae38a3e35b3ce4b9f9fc Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 6 Dec 2009 21:46:24 +0100 Subject: drm/ttm: Add user-space objects. Add objects needed for user-space to maintain reference counts on ttm objects. This is used by the vmwgfx driver which allows user-space to maintain map-counts on dma buffers, lock-counts on the ttm lock and ref-counts on gpu surfaces, gpu contexts and dma buffer. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/Makefile | 3 +- drivers/gpu/drm/ttm/ttm_object.c | 452 +++++++++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_object.h | 267 +++++++++++++++++++++++ 3 files changed, 721 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/ttm/ttm_object.c create mode 100644 include/drm/ttm/ttm_object.h (limited to 'include') diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b0a9de7a57c2..a9cc9f895360 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -3,6 +3,7 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ - ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o + ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ + ttm_object.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c new file mode 100644 index 000000000000..1099abac824b --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -0,0 +1,452 @@ +/************************************************************************** + * + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +/** @file ttm_ref_object.c + * + * Base- and reference object implementation for the various + * ttm objects. Implements reference counting, minimal security checks + * and release on file close. + */ + +/** + * struct ttm_object_file + * + * @tdev: Pointer to the ttm_object_device. + * + * @lock: Lock that protects the ref_list list and the + * ref_hash hash tables. + * + * @ref_list: List of ttm_ref_objects to be destroyed at + * file release. + * + * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, + * for fast lookup of ref objects given a base object. + */ + +#include "ttm/ttm_object.h" +#include "ttm/ttm_module.h" +#include +#include +#include +#include +#include + +struct ttm_object_file { + struct ttm_object_device *tdev; + rwlock_t lock; + struct list_head ref_list; + struct drm_open_hash ref_hash[TTM_REF_NUM]; + struct kref refcount; +}; + +/** + * struct ttm_object_device + * + * @object_lock: lock that protects the object_hash hash table. + * + * @object_hash: hash table for fast lookup of object global names. + * + * @object_count: Per device object count. + * + * This is the per-device data structure needed for ttm object management. + */ + +struct ttm_object_device { + rwlock_t object_lock; + struct drm_open_hash object_hash; + atomic_t object_count; + struct ttm_mem_global *mem_glob; +}; + +/** + * struct ttm_ref_object + * + * @hash: Hash entry for the per-file object reference hash. + * + * @head: List entry for the per-file list of ref-objects. + * + * @kref: Ref count. + * + * @obj: Base object this ref object is referencing. + * + * @ref_type: Type of ref object. + * + * This is similar to an idr object, but it also has a hash table entry + * that allows lookup with a pointer to the referenced object as a key. In + * that way, one can easily detect whether a base object is referenced by + * a particular ttm_object_file. It also carries a ref count to avoid creating + * multiple ref objects if a ttm_object_file references the same base + * object more than once. + */ + +struct ttm_ref_object { + struct drm_hash_item hash; + struct list_head head; + struct kref kref; + struct ttm_base_object *obj; + enum ttm_ref_type ref_type; + struct ttm_object_file *tfile; +}; + +static inline struct ttm_object_file * +ttm_object_file_ref(struct ttm_object_file *tfile) +{ + kref_get(&tfile->refcount); + return tfile; +} + +static void ttm_object_file_destroy(struct kref *kref) +{ + struct ttm_object_file *tfile = + container_of(kref, struct ttm_object_file, refcount); + + kfree(tfile); +} + + +static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) +{ + struct ttm_object_file *tfile = *p_tfile; + + *p_tfile = NULL; + kref_put(&tfile->refcount, ttm_object_file_destroy); +} + + +int ttm_base_object_init(struct ttm_object_file *tfile, + struct ttm_base_object *base, + bool shareable, + enum ttm_object_type object_type, + void (*refcount_release) (struct ttm_base_object **), + void (*ref_obj_release) (struct ttm_base_object *, + enum ttm_ref_type ref_type)) +{ + struct ttm_object_device *tdev = tfile->tdev; + int ret; + + base->shareable = shareable; + base->tfile = ttm_object_file_ref(tfile); + base->refcount_release = refcount_release; + base->ref_obj_release = ref_obj_release; + base->object_type = object_type; + write_lock(&tdev->object_lock); + kref_init(&base->refcount); + ret = drm_ht_just_insert_please(&tdev->object_hash, + &base->hash, + (unsigned long)base, 31, 0, 0); + write_unlock(&tdev->object_lock); + if (unlikely(ret != 0)) + goto out_err0; + + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + if (unlikely(ret != 0)) + goto out_err1; + + ttm_base_object_unref(&base); + + return 0; +out_err1: + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); +out_err0: + return ret; +} +EXPORT_SYMBOL(ttm_base_object_init); + +static void ttm_release_base(struct kref *kref) +{ + struct ttm_base_object *base = + container_of(kref, struct ttm_base_object, refcount); + struct ttm_object_device *tdev = base->tfile->tdev; + + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); + write_unlock(&tdev->object_lock); + if (base->refcount_release) { + ttm_object_file_unref(&base->tfile); + base->refcount_release(&base); + } + write_lock(&tdev->object_lock); +} + +void ttm_base_object_unref(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct ttm_object_device *tdev = base->tfile->tdev; + + *p_base = NULL; + + /* + * Need to take the lock here to avoid racing with + * users trying to look up the object. + */ + + write_lock(&tdev->object_lock); + (void)kref_put(&base->refcount, &ttm_release_base); + write_unlock(&tdev->object_lock); +} +EXPORT_SYMBOL(ttm_base_object_unref); + +struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, + uint32_t key) +{ + struct ttm_object_device *tdev = tfile->tdev; + struct ttm_base_object *base; + struct drm_hash_item *hash; + int ret; + + read_lock(&tdev->object_lock); + ret = drm_ht_find_item(&tdev->object_hash, key, &hash); + + if (likely(ret == 0)) { + base = drm_hash_entry(hash, struct ttm_base_object, hash); + kref_get(&base->refcount); + } + read_unlock(&tdev->object_lock); + + if (unlikely(ret != 0)) + return NULL; + + if (tfile != base->tfile && !base->shareable) { + printk(KERN_ERR TTM_PFX + "Attempted access of non-shareable object.\n"); + ttm_base_object_unref(&base); + return NULL; + } + + return base; +} +EXPORT_SYMBOL(ttm_base_object_lookup); + +int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, + enum ttm_ref_type ref_type, bool *existed) +{ + struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct ttm_ref_object *ref; + struct drm_hash_item *hash; + struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; + int ret = -EINVAL; + + if (existed != NULL) + *existed = true; + + while (ret == -EINVAL) { + read_lock(&tfile->lock); + ret = drm_ht_find_item(ht, base->hash.key, &hash); + + if (ret == 0) { + ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + kref_get(&ref->kref); + read_unlock(&tfile->lock); + break; + } + + read_unlock(&tfile->lock); + ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), + false, false); + if (unlikely(ret != 0)) + return ret; + ref = kmalloc(sizeof(*ref), GFP_KERNEL); + if (unlikely(ref == NULL)) { + ttm_mem_global_free(mem_glob, sizeof(*ref)); + return -ENOMEM; + } + + ref->hash.key = base->hash.key; + ref->obj = base; + ref->tfile = tfile; + ref->ref_type = ref_type; + kref_init(&ref->kref); + + write_lock(&tfile->lock); + ret = drm_ht_insert_item(ht, &ref->hash); + + if (likely(ret == 0)) { + list_add_tail(&ref->head, &tfile->ref_list); + kref_get(&base->refcount); + write_unlock(&tfile->lock); + if (existed != NULL) + *existed = false; + break; + } + + write_unlock(&tfile->lock); + BUG_ON(ret != -EINVAL); + + ttm_mem_global_free(mem_glob, sizeof(*ref)); + kfree(ref); + } + + return ret; +} +EXPORT_SYMBOL(ttm_ref_object_add); + +static void ttm_ref_object_release(struct kref *kref) +{ + struct ttm_ref_object *ref = + container_of(kref, struct ttm_ref_object, kref); + struct ttm_base_object *base = ref->obj; + struct ttm_object_file *tfile = ref->tfile; + struct drm_open_hash *ht; + struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; + + ht = &tfile->ref_hash[ref->ref_type]; + (void)drm_ht_remove_item(ht, &ref->hash); + list_del(&ref->head); + write_unlock(&tfile->lock); + + if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) + base->ref_obj_release(base, ref->ref_type); + + ttm_base_object_unref(&ref->obj); + ttm_mem_global_free(mem_glob, sizeof(*ref)); + kfree(ref); + write_lock(&tfile->lock); +} + +int ttm_ref_object_base_unref(struct ttm_object_file *tfile, + unsigned long key, enum ttm_ref_type ref_type) +{ + struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct ttm_ref_object *ref; + struct drm_hash_item *hash; + int ret; + + write_lock(&tfile->lock); + ret = drm_ht_find_item(ht, key, &hash); + if (unlikely(ret != 0)) { + write_unlock(&tfile->lock); + return -EINVAL; + } + ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + kref_put(&ref->kref, ttm_ref_object_release); + write_unlock(&tfile->lock); + return 0; +} +EXPORT_SYMBOL(ttm_ref_object_base_unref); + +void ttm_object_file_release(struct ttm_object_file **p_tfile) +{ + struct ttm_ref_object *ref; + struct list_head *list; + unsigned int i; + struct ttm_object_file *tfile = *p_tfile; + + *p_tfile = NULL; + write_lock(&tfile->lock); + + /* + * Since we release the lock within the loop, we have to + * restart it from the beginning each time. + */ + + while (!list_empty(&tfile->ref_list)) { + list = tfile->ref_list.next; + ref = list_entry(list, struct ttm_ref_object, head); + ttm_ref_object_release(&ref->kref); + } + + for (i = 0; i < TTM_REF_NUM; ++i) + drm_ht_remove(&tfile->ref_hash[i]); + + write_unlock(&tfile->lock); + ttm_object_file_unref(&tfile); +} +EXPORT_SYMBOL(ttm_object_file_release); + +struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, + unsigned int hash_order) +{ + struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); + unsigned int i; + unsigned int j = 0; + int ret; + + if (unlikely(tfile == NULL)) + return NULL; + + rwlock_init(&tfile->lock); + tfile->tdev = tdev; + kref_init(&tfile->refcount); + INIT_LIST_HEAD(&tfile->ref_list); + + for (i = 0; i < TTM_REF_NUM; ++i) { + ret = drm_ht_create(&tfile->ref_hash[i], hash_order); + if (ret) { + j = i; + goto out_err; + } + } + + return tfile; +out_err: + for (i = 0; i < j; ++i) + drm_ht_remove(&tfile->ref_hash[i]); + + kfree(tfile); + + return NULL; +} +EXPORT_SYMBOL(ttm_object_file_init); + +struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global + *mem_glob, + unsigned int hash_order) +{ + struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); + int ret; + + if (unlikely(tdev == NULL)) + return NULL; + + tdev->mem_glob = mem_glob; + rwlock_init(&tdev->object_lock); + atomic_set(&tdev->object_count, 0); + ret = drm_ht_create(&tdev->object_hash, hash_order); + + if (likely(ret == 0)) + return tdev; + + kfree(tdev); + return NULL; +} +EXPORT_SYMBOL(ttm_object_device_init); + +void ttm_object_device_release(struct ttm_object_device **p_tdev) +{ + struct ttm_object_device *tdev = *p_tdev; + + *p_tdev = NULL; + + write_lock(&tdev->object_lock); + drm_ht_remove(&tdev->object_hash); + write_unlock(&tdev->object_lock); + + kfree(tdev); +} +EXPORT_SYMBOL(ttm_object_device_release); diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h new file mode 100644 index 000000000000..703ca4db0a29 --- /dev/null +++ b/include/drm/ttm/ttm_object.h @@ -0,0 +1,267 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ +/** @file ttm_object.h + * + * Base- and reference object implementation for the various + * ttm objects. Implements reference counting, minimal security checks + * and release on file close. + */ + +#ifndef _TTM_OBJECT_H_ +#define _TTM_OBJECT_H_ + +#include +#include "drm_hashtab.h" +#include +#include + +/** + * enum ttm_ref_type + * + * Describes what type of reference a ref object holds. + * + * TTM_REF_USAGE is a simple refcount on a base object. + * + * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a + * buffer object. + * + * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a + * buffer object. + * + */ + +enum ttm_ref_type { + TTM_REF_USAGE, + TTM_REF_SYNCCPU_READ, + TTM_REF_SYNCCPU_WRITE, + TTM_REF_NUM +}; + +/** + * enum ttm_object_type + * + * One entry per ttm object type. + * Device-specific types should use the + * ttm_driver_typex types. + */ + +enum ttm_object_type { + ttm_fence_type, + ttm_buffer_type, + ttm_lock_type, + ttm_driver_type0 = 256, + ttm_driver_type1 +}; + +struct ttm_object_file; +struct ttm_object_device; + +/** + * struct ttm_base_object + * + * @hash: hash entry for the per-device object hash. + * @type: derived type this object is base class for. + * @shareable: Other ttm_object_files can access this object. + * + * @tfile: Pointer to ttm_object_file of the creator. + * NULL if the object was not created by a user request. + * (kernel object). + * + * @refcount: Number of references to this object, not + * including the hash entry. A reference to a base object can + * only be held by a ref object. + * + * @refcount_release: A function to be called when there are + * no more references to this object. This function should + * destroy the object (or make sure destruction eventually happens), + * and when it is called, the object has + * already been taken out of the per-device hash. The parameter + * "base" should be set to NULL by the function. + * + * @ref_obj_release: A function to be called when a reference object + * with another ttm_ref_type than TTM_REF_USAGE is deleted. + * this function may, for example, release a lock held by a user-space + * process. + * + * This struct is intended to be used as a base struct for objects that + * are visible to user-space. It provides a global name, race-safe + * access and refcounting, minimal access contol and hooks for unref actions. + */ + +struct ttm_base_object { + struct drm_hash_item hash; + enum ttm_object_type object_type; + bool shareable; + struct ttm_object_file *tfile; + struct kref refcount; + void (*refcount_release) (struct ttm_base_object **base); + void (*ref_obj_release) (struct ttm_base_object *base, + enum ttm_ref_type ref_type); +}; + +/** + * ttm_base_object_init + * + * @tfile: Pointer to a struct ttm_object_file. + * @base: The struct ttm_base_object to initialize. + * @shareable: This object is shareable with other applcations. + * (different @tfile pointers.) + * @type: The object type. + * @refcount_release: See the struct ttm_base_object description. + * @ref_obj_release: See the struct ttm_base_object description. + * + * Initializes a struct ttm_base_object. + */ + +extern int ttm_base_object_init(struct ttm_object_file *tfile, + struct ttm_base_object *base, + bool shareable, + enum ttm_object_type type, + void (*refcount_release) (struct ttm_base_object + **), + void (*ref_obj_release) (struct ttm_base_object + *, + enum ttm_ref_type + ref_type)); + +/** + * ttm_base_object_lookup + * + * @tfile: Pointer to a struct ttm_object_file. + * @key: Hash key + * + * Looks up a struct ttm_base_object with the key @key. + * Also verifies that the object is visible to the application, by + * comparing the @tfile argument and checking the object shareable flag. + */ + +extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file + *tfile, uint32_t key); + +/** + * ttm_base_object_unref + * + * @p_base: Pointer to a pointer referncing a struct ttm_base_object. + * + * Decrements the base object refcount and clears the pointer pointed to by + * p_base. + */ + +extern void ttm_base_object_unref(struct ttm_base_object **p_base); + +/** + * ttm_ref_object_add. + * + * @tfile: A struct ttm_object_file representing the application owning the + * ref_object. + * @base: The base object to reference. + * @ref_type: The type of reference. + * @existed: Upon completion, indicates that an identical reference object + * already existed, and the refcount was upped on that object instead. + * + * Adding a ref object to a base object is basically like referencing the + * base object, but a user-space application holds the reference. When the + * file corresponding to @tfile is closed, all its reference objects are + * deleted. A reference object can have different types depending on what + * it's intended for. It can be refcounting to prevent object destruction, + * When user-space takes a lock, it can add a ref object to that lock to + * make sure the lock is released if the application dies. A ref object + * will hold a single reference on a base object. + */ +extern int ttm_ref_object_add(struct ttm_object_file *tfile, + struct ttm_base_object *base, + enum ttm_ref_type ref_type, bool *existed); +/** + * ttm_ref_object_base_unref + * + * @key: Key representing the base object. + * @ref_type: Ref type of the ref object to be dereferenced. + * + * Unreference a ref object with type @ref_type + * on the base object identified by @key. If there are no duplicate + * references, the ref object will be destroyed and the base object + * will be unreferenced. + */ +extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, + unsigned long key, + enum ttm_ref_type ref_type); + +/** + * ttm_object_file_init - initialize a struct ttm_object file + * + * @tdev: A struct ttm_object device this file is initialized on. + * @hash_order: Order of the hash table used to hold the reference objects. + * + * This is typically called by the file_ops::open function. + */ + +extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device + *tdev, + unsigned int hash_order); + +/** + * ttm_object_file_release - release data held by a ttm_object_file + * + * @p_tfile: Pointer to pointer to the ttm_object_file object to release. + * *p_tfile will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_file. + * Typically called from file_ops::release. The caller must + * ensure that there are no concurrent users of tfile. + */ + +extern void ttm_object_file_release(struct ttm_object_file **p_tfile); + +/** + * ttm_object device init - initialize a struct ttm_object_device + * + * @hash_order: Order of hash table used to hash the base objects. + * + * This function is typically called on device initialization to prepare + * data structures needed for ttm base and ref objects. + */ + +extern struct ttm_object_device *ttm_object_device_init + (struct ttm_mem_global *mem_glob, unsigned int hash_order); + +/** + * ttm_object_device_release - release data held by a ttm_object_device + * + * @p_tdev: Pointer to pointer to the ttm_object_device object to release. + * *p_tdev will be set to NULL by this function. + * + * Releases all data associated by a ttm_object_device. + * Typically called from driver::unload before the destruction of the + * device private data structure. + */ + +extern void ttm_object_device_release(struct ttm_object_device **p_tdev); + +#endif -- cgit v1.2.3 From 4aff1013f5e4ae08a24155c029a2c5e1a7929de6 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 6 Dec 2009 21:46:25 +0100 Subject: drm/ttm: Add ttm lock functionality. This is intended to be used by ttm-aware drivers to 1) Block clients to inactive masters when they try to validate buffers for GPU use. 2) Optionally block clients to the current master when there is thrashing due to GPU memory shortage. Used by the vmwgfx driver. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/Makefile | 2 +- drivers/gpu/drm/ttm/ttm_lock.c | 311 +++++++++++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_lock.h | 247 ++++++++++++++++++++++++++++++++ 3 files changed, 559 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/ttm/ttm_lock.c create mode 100644 include/drm/ttm/ttm_lock.h (limited to 'include') diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index a9cc9f895360..4473549bc5d2 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ - ttm_object.o + ttm_object.o ttm_lock.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c new file mode 100644 index 000000000000..f619ebcaa4ec --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_lock.c @@ -0,0 +1,311 @@ +/************************************************************************** + * + * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "ttm/ttm_lock.h" +#include "ttm/ttm_module.h" +#include +#include +#include +#include +#include + +#define TTM_WRITE_LOCK_PENDING (1 << 0) +#define TTM_VT_LOCK_PENDING (1 << 1) +#define TTM_SUSPEND_LOCK_PENDING (1 << 2) +#define TTM_VT_LOCK (1 << 3) +#define TTM_SUSPEND_LOCK (1 << 4) + +void ttm_lock_init(struct ttm_lock *lock) +{ + spin_lock_init(&lock->lock); + init_waitqueue_head(&lock->queue); + lock->rw = 0; + lock->flags = 0; + lock->kill_takers = false; + lock->signal = SIGKILL; +} +EXPORT_SYMBOL(ttm_lock_init); + +void ttm_read_unlock(struct ttm_lock *lock) +{ + spin_lock(&lock->lock); + if (--lock->rw == 0) + wake_up_all(&lock->queue); + spin_unlock(&lock->lock); +} +EXPORT_SYMBOL(ttm_read_unlock); + +static bool __ttm_read_lock(struct ttm_lock *lock) +{ + bool locked = false; + + spin_lock(&lock->lock); + if (unlikely(lock->kill_takers)) { + send_sig(lock->signal, current, 0); + spin_unlock(&lock->lock); + return false; + } + if (lock->rw >= 0 && lock->flags == 0) { + ++lock->rw; + locked = true; + } + spin_unlock(&lock->lock); + return locked; +} + +int ttm_read_lock(struct ttm_lock *lock, bool interruptible) +{ + int ret = 0; + + if (interruptible) + ret = wait_event_interruptible(lock->queue, + __ttm_read_lock(lock)); + else + wait_event(lock->queue, __ttm_read_lock(lock)); + return ret; +} +EXPORT_SYMBOL(ttm_read_lock); + +static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) +{ + bool block = true; + + *locked = false; + + spin_lock(&lock->lock); + if (unlikely(lock->kill_takers)) { + send_sig(lock->signal, current, 0); + spin_unlock(&lock->lock); + return false; + } + if (lock->rw >= 0 && lock->flags == 0) { + ++lock->rw; + block = false; + *locked = true; + } else if (lock->flags == 0) { + block = false; + } + spin_unlock(&lock->lock); + + return !block; +} + +int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) +{ + int ret = 0; + bool locked; + + if (interruptible) + ret = wait_event_interruptible + (lock->queue, __ttm_read_trylock(lock, &locked)); + else + wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); + + if (unlikely(ret != 0)) { + BUG_ON(locked); + return ret; + } + + return (locked) ? 0 : -EBUSY; +} + +void ttm_write_unlock(struct ttm_lock *lock) +{ + spin_lock(&lock->lock); + lock->rw = 0; + wake_up_all(&lock->queue); + spin_unlock(&lock->lock); +} +EXPORT_SYMBOL(ttm_write_unlock); + +static bool __ttm_write_lock(struct ttm_lock *lock) +{ + bool locked = false; + + spin_lock(&lock->lock); + if (unlikely(lock->kill_takers)) { + send_sig(lock->signal, current, 0); + spin_unlock(&lock->lock); + return false; + } + if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { + lock->rw = -1; + lock->flags &= ~TTM_WRITE_LOCK_PENDING; + locked = true; + } else { + lock->flags |= TTM_WRITE_LOCK_PENDING; + } + spin_unlock(&lock->lock); + return locked; +} + +int ttm_write_lock(struct ttm_lock *lock, bool interruptible) +{ + int ret = 0; + + if (interruptible) { + ret = wait_event_interruptible(lock->queue, + __ttm_write_lock(lock)); + if (unlikely(ret != 0)) { + spin_lock(&lock->lock); + lock->flags &= ~TTM_WRITE_LOCK_PENDING; + wake_up_all(&lock->queue); + spin_unlock(&lock->lock); + } + } else + wait_event(lock->queue, __ttm_read_lock(lock)); + + return ret; +} +EXPORT_SYMBOL(ttm_write_lock); + +void ttm_write_lock_downgrade(struct ttm_lock *lock) +{ + spin_lock(&lock->lock); + lock->rw = 1; + wake_up_all(&lock->queue); + spin_unlock(&lock->lock); +} + +static int __ttm_vt_unlock(struct ttm_lock *lock) +{ + int ret = 0; + + spin_lock(&lock->lock); + if (unlikely(!(lock->flags & TTM_VT_LOCK))) + ret = -EINVAL; + lock->flags &= ~TTM_VT_LOCK; + wake_up_all(&lock->queue); + spin_unlock(&lock->lock); + printk(KERN_INFO TTM_PFX "vt unlock.\n"); + + return ret; +} + +static void ttm_vt_lock_remove(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct ttm_lock *lock = container_of(base, struct ttm_lock, base); + int ret; + + *p_base = NULL; + ret = __ttm_vt_unlock(lock); + BUG_ON(ret != 0); +} + +static bool __ttm_vt_lock(struct ttm_lock *lock) +{ + bool locked = false; + + spin_lock(&lock->lock); + if (lock->rw == 0) { + lock->flags &= ~TTM_VT_LOCK_PENDING; + lock->flags |= TTM_VT_LOCK; + locked = true; + } else { + lock->flags |= TTM_VT_LOCK_PENDING; + } + spin_unlock(&lock->lock); + return locked; +} + +int ttm_vt_lock(struct ttm_lock *lock, + bool interruptible, + struct ttm_object_file *tfile) +{ + int ret = 0; + + if (interruptible) { + ret = wait_event_interruptible(lock->queue, + __ttm_vt_lock(lock)); + if (unlikely(ret != 0)) { + spin_lock(&lock->lock); + lock->flags &= ~TTM_VT_LOCK_PENDING; + wake_up_all(&lock->queue); + spin_unlock(&lock->lock); + return ret; + } + } else + wait_event(lock->queue, __ttm_vt_lock(lock)); + + /* + * Add a base-object, the destructor of which will + * make sure the lock is released if the client dies + * while holding it. + */ + + ret = ttm_base_object_init(tfile, &lock->base, false, + ttm_lock_type, &ttm_vt_lock_remove, NULL); + if (ret) + (void)__ttm_vt_unlock(lock); + else { + lock->vt_holder = tfile; + printk(KERN_INFO TTM_PFX "vt lock.\n"); + } + + return ret; +} +EXPORT_SYMBOL(ttm_vt_lock); + +int ttm_vt_unlock(struct ttm_lock *lock) +{ + return ttm_ref_object_base_unref(lock->vt_holder, + lock->base.hash.key, TTM_REF_USAGE); +} +EXPORT_SYMBOL(ttm_vt_unlock); + +void ttm_suspend_unlock(struct ttm_lock *lock) +{ + spin_lock(&lock->lock); + lock->flags &= ~TTM_SUSPEND_LOCK; + wake_up_all(&lock->queue); + spin_unlock(&lock->lock); +} + +static bool __ttm_suspend_lock(struct ttm_lock *lock) +{ + bool locked = false; + + spin_lock(&lock->lock); + if (lock->rw == 0) { + lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; + lock->flags |= TTM_SUSPEND_LOCK; + locked = true; + } else { + lock->flags |= TTM_SUSPEND_LOCK_PENDING; + } + spin_unlock(&lock->lock); + return locked; +} + +void ttm_suspend_lock(struct ttm_lock *lock) +{ + wait_event(lock->queue, __ttm_suspend_lock(lock)); +} diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h new file mode 100644 index 000000000000..81ba0b0b891a --- /dev/null +++ b/include/drm/ttm/ttm_lock.h @@ -0,0 +1,247 @@ +/************************************************************************** + * + * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +/** @file ttm_lock.h + * This file implements a simple replacement for the buffer manager use + * of the DRM heavyweight hardware lock. + * The lock is a read-write lock. Taking it in read mode and write mode + * is relatively fast, and intended for in-kernel use only. + * + * The vt mode is used only when there is a need to block all + * user-space processes from validating buffers. + * It's allowed to leave kernel space with the vt lock held. + * If a user-space process dies while having the vt-lock, + * it will be released during the file descriptor release. The vt lock + * excludes write lock and read lock. + * + * The suspend mode is used to lock out all TTM users when preparing for + * and executing suspend operations. + * + */ + +#ifndef _TTM_LOCK_H_ +#define _TTM_LOCK_H_ + +#include "ttm/ttm_object.h" +#include +#include + +/** + * struct ttm_lock + * + * @base: ttm base object used solely to release the lock if the client + * holding the lock dies. + * @queue: Queue for processes waiting for lock change-of-status. + * @lock: Spinlock protecting some lock members. + * @rw: Read-write lock counter. Protected by @lock. + * @flags: Lock state. Protected by @lock. + * @kill_takers: Boolean whether to kill takers of the lock. + * @signal: Signal to send when kill_takers is true. + */ + +struct ttm_lock { + struct ttm_base_object base; + wait_queue_head_t queue; + spinlock_t lock; + int32_t rw; + uint32_t flags; + bool kill_takers; + int signal; + struct ttm_object_file *vt_holder; +}; + + +/** + * ttm_lock_init + * + * @lock: Pointer to a struct ttm_lock + * Initializes the lock. + */ +extern void ttm_lock_init(struct ttm_lock *lock); + +/** + * ttm_read_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a read lock. + */ +extern void ttm_read_unlock(struct ttm_lock *lock); + +/** + * ttm_read_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in read mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_read_trylock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Tries to take the lock in read mode. If the lock is already held + * in write mode, the function will return -EBUSY. If the lock is held + * in vt or suspend mode, the function will sleep until these modes + * are unlocked. + * + * Returns: + * -EBUSY The lock was already held in write mode. + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_write_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a write lock. + */ +extern void ttm_write_unlock(struct ttm_lock *lock); + +/** + * ttm_write_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in write mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_lock_downgrade + * + * @lock: Pointer to a struct ttm_lock + * + * Downgrades a write lock to a read lock. + */ +extern void ttm_lock_downgrade(struct ttm_lock *lock); + +/** + * ttm_suspend_lock + * + * @lock: Pointer to a struct ttm_lock + * + * Takes the lock in suspend mode. Excludes read and write mode. + */ +extern void ttm_suspend_lock(struct ttm_lock *lock); + +/** + * ttm_suspend_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a suspend lock + */ +extern void ttm_suspend_unlock(struct ttm_lock *lock); + +/** + * ttm_vt_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * @tfile: Pointer to a struct ttm_object_file to register the lock with. + * + * Takes the lock in vt mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + * -ENOMEM: Out of memory when locking. + */ +extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, + struct ttm_object_file *tfile); + +/** + * ttm_vt_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a vt lock. + * Returns: + * -EINVAL If the lock was not held. + */ +extern int ttm_vt_unlock(struct ttm_lock *lock); + +/** + * ttm_write_unlock + * + * @lock: Pointer to a struct ttm_lock + * + * Releases a write lock. + */ +extern void ttm_write_unlock(struct ttm_lock *lock); + +/** + * ttm_write_lock + * + * @lock: Pointer to a struct ttm_lock + * @interruptible: Interruptible sleeping while waiting for a lock. + * + * Takes the lock in write mode. + * Returns: + * -ERESTARTSYS If interrupted by a signal and interruptible is true. + */ +extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); + +/** + * ttm_lock_set_kill + * + * @lock: Pointer to a struct ttm_lock + * @val: Boolean whether to kill processes taking the lock. + * @signal: Signal to send to the process taking the lock. + * + * The kill-when-taking-lock functionality is used to kill processes that keep + * on using the TTM functionality when its resources has been taken down, for + * example when the X server exits. A typical sequence would look like this: + * - X server takes lock in write mode. + * - ttm_lock_set_kill() is called with @val set to true. + * - As part of X server exit, TTM resources are taken down. + * - X server releases the lock on file release. + * - Another dri client wants to render, takes the lock and is killed. + * + */ +static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, + int signal) +{ + lock->kill_takers = val; + if (val) + lock->signal = signal; +} + +#endif -- cgit v1.2.3 From c078aa2fc4d8e022c3b611e07b25ff77afdf9b73 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 6 Dec 2009 21:46:26 +0100 Subject: drm/ttm: Add TTM execbuf utilities. Utilities to reserve, unreserve and fence a list of TTM buffer objects in a deadlock-safe manner. Used by the vmwgfx driver. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/Makefile | 2 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 117 +++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_execbuf_util.h | 107 ++++++++++++++++++++++++++++++ 3 files changed, 225 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/ttm/ttm_execbuf_util.c create mode 100644 include/drm/ttm/ttm_execbuf_util.h (limited to 'include') diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 4473549bc5d2..1e138f5bae09 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ - ttm_object.o ttm_lock.o + ttm_object.o ttm_lock.o ttm_execbuf_util.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c new file mode 100644 index 000000000000..c285c2902d15 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -0,0 +1,117 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "ttm/ttm_execbuf_util.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" +#include +#include +#include + +void ttm_eu_backoff_reservation(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + if (!entry->reserved) + continue; + + entry->reserved = false; + ttm_bo_unreserve(bo); + } +} +EXPORT_SYMBOL(ttm_eu_backoff_reservation); + +/* + * Reserve buffers for validation. + * + * If a buffer in the list is marked for CPU access, we back off and + * wait for that buffer to become free for GPU access. + * + * If a buffer is reserved for another validation, the validator with + * the highest validation sequence backs off and waits for that buffer + * to become unreserved. This prevents deadlocks when validating multiple + * buffers in different orders. + */ + +int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) +{ + struct ttm_validate_buffer *entry; + int ret; + +retry: + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + + entry->reserved = false; + ret = ttm_bo_reserve(bo, true, false, true, val_seq); + if (ret != 0) { + ttm_eu_backoff_reservation(list); + if (ret == -EAGAIN) { + ret = ttm_bo_wait_unreserved(bo, true); + if (unlikely(ret != 0)) + return ret; + goto retry; + } else + return ret; + } + + entry->reserved = true; + if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { + ttm_eu_backoff_reservation(list); + ret = ttm_bo_wait_cpu(bo, false); + if (ret) + return ret; + goto retry; + } + } + return 0; +} +EXPORT_SYMBOL(ttm_eu_reserve_buffers); + +void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + struct ttm_bo_driver *driver = bo->bdev->driver; + void *old_sync_obj; + + spin_lock(&bo->lock); + old_sync_obj = bo->sync_obj; + bo->sync_obj = driver->sync_obj_ref(sync_obj); + bo->sync_obj_arg = entry->new_sync_obj_arg; + spin_unlock(&bo->lock); + ttm_bo_unreserve(bo); + entry->reserved = false; + if (old_sync_obj) + driver->sync_obj_unref(&old_sync_obj); + } +} +EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h new file mode 100644 index 000000000000..cd2c475da9ea --- /dev/null +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -0,0 +1,107 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _TTM_EXECBUF_UTIL_H_ +#define _TTM_EXECBUF_UTIL_H_ + +#include "ttm/ttm_bo_api.h" +#include + +/** + * struct ttm_validate_buffer + * + * @head: list head for thread-private list. + * @bo: refcounted buffer object pointer. + * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once + * adding a new sync object. + * @reservied: Indicates whether @bo has been reserved for validation. + */ + +struct ttm_validate_buffer { + struct list_head head; + struct ttm_buffer_object *bo; + void *new_sync_obj_arg; + bool reserved; +}; + +/** + * function ttm_eu_backoff_reservation + * + * @list: thread private list of ttm_validate_buffer structs. + * + * Undoes all buffer validation reservations for bos pointed to by + * the list entries. + */ + +extern void ttm_eu_backoff_reservation(struct list_head *list); + +/** + * function ttm_eu_reserve_buffers + * + * @list: thread private list of ttm_validate_buffer structs. + * @val_seq: A unique sequence number. + * + * Tries to reserve bos pointed to by the list entries for validation. + * If the function returns 0, all buffers are marked as "unfenced", + * taken off the lru lists and are not synced for write CPU usage. + * + * If the function detects a deadlock due to multiple threads trying to + * reserve the same buffers in reverse order, all threads except one will + * back off and retry. This function may sleep while waiting for + * CPU write reservations to be cleared, and for other threads to + * unreserve their buffers. + * + * This function may return -ERESTART or -EAGAIN if the calling process + * receives a signal while waiting. In that case, no buffers on the list + * will be reserved upon return. + * + * Buffers reserved by this function should be unreserved by + * a call to either ttm_eu_backoff_reservation() or + * ttm_eu_fence_buffer_objects() when command submission is complete or + * has failed. + */ + +extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); + +/** + * function ttm_eu_fence_buffer_objects. + * + * @list: thread private list of ttm_validate_buffer structs. + * @sync_obj: The new sync object for the buffers. + * + * This function should be called when command submission is complete, and + * it will add a new sync object to bos pointed to by entries on @list. + * It also unreserves all buffers, putting them on lru lists. + * + */ + +extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); + +#endif -- cgit v1.2.3 From 4bfd75cb08a362cb1df35dc6a5032d12843c6d87 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 6 Dec 2009 21:46:27 +0100 Subject: drm/ttm: Export symbols needed for the vmwgfx driver. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo_util.c | 1 + drivers/gpu/drm/ttm/ttm_memory.c | 3 +++ drivers/gpu/drm/ttm/ttm_tt.c | 1 + include/drm/ttm/ttm_bo_driver.h | 9 +++++++++ 4 files changed, 14 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c70927ecda21..ceae52f45c39 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) #endif return tmp; } +EXPORT_SYMBOL(ttm_io_prot); static int ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long bus_base, diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 336976e8652d..8bfde5f40841 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -461,6 +461,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob, { return ttm_mem_global_free_zone(glob, NULL, amount); } +EXPORT_SYMBOL(ttm_mem_global_free); static int ttm_mem_global_reserve(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, @@ -534,6 +535,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, interruptible); } +EXPORT_SYMBOL(ttm_mem_global_alloc); int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, struct page *page, @@ -589,3 +591,4 @@ size_t ttm_round_pot(size_t size) } return 0; } +EXPORT_SYMBOL(ttm_round_pot); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a55ee1a56c16..90ea46aed050 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) ttm->state = tt_unbound; return 0; } +EXPORT_SYMBOL(ttm_tt_populate); #ifdef CONFIG_X86 static inline int ttm_tt_set_page_caching(struct page *p, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e8cd6d20aed2..7a39ab9aa1d1 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -544,6 +544,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm, */ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); +/** + * ttm_tt_populate: + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Add backing pages to all of @ttm + */ +extern int ttm_tt_populate(struct ttm_tt *ttm); + /** * ttm_ttm_destroy: * -- cgit v1.2.3 From ed872d09effd54aa8ecb4ceedbc4dbab9592f337 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 7 Dec 2009 03:14:17 +0100 Subject: hw-breakpoints: Zeroe the breakpoint attrs on initialization The perf attrs used to set up breakpoint parameters are often allocated in the stack and not zeroed out before calling hw_breakpoint_init(). Handle it from this helper to avoid random attributes set by the stack. Signed-off-by: Frederic Weisbecker Cc: Prasad --- include/linux/hw_breakpoint.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 4d14a384a01e..42da1ce19ec0 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -22,6 +22,8 @@ enum { static inline void hw_breakpoint_init(struct perf_event_attr *attr) { + memset(attr, 0, sizeof(*attr)); + attr->type = PERF_TYPE_BREAKPOINT; attr->size = sizeof(*attr); /* -- cgit v1.2.3 From e15c1c1f3f903f679c9782b540f9d52c80c99610 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sat, 28 Nov 2009 18:12:06 +0100 Subject: pcmcia: remove unused IRQ_FIRST_SHARED Komuro pointed out that IRQ_FIRST_SHARED is not used at all in the PCMCIA subsystem, so remove it. Also, remove two bogus assignments. CC: Karsten Keil CC: netdev@vger.kernel.org CC: alsa-devel@alsa-project.org CC: Komuro Signed-off-by: Dominik Brodowski --- drivers/isdn/hardware/avm/avm_cs.c | 3 +-- drivers/isdn/hisax/avma1_cs.c | 3 +-- drivers/isdn/hisax/elsa_cs.c | 2 +- drivers/isdn/hisax/sedlbauer_cs.c | 2 +- drivers/isdn/hisax/teles_cs.c | 2 +- drivers/net/pcmcia/axnet_cs.c | 2 +- drivers/net/pcmcia/fmvj18x_cs.c | 2 +- drivers/net/pcmcia/pcnet_cs.c | 2 +- drivers/net/pcmcia/smc91c92_cs.c | 2 +- drivers/net/pcmcia/xirc2ps_cs.c | 2 +- include/pcmcia/cs.h | 4 ++-- sound/pcmcia/pdaudiocf/pdaudiocf.c | 3 ++- 12 files changed, 14 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c index 5a6ae646a636..94b796d84053 100644 --- a/drivers/isdn/hardware/avm/avm_cs.c +++ b/drivers/isdn/hardware/avm/avm_cs.c @@ -108,8 +108,7 @@ static int avmcs_probe(struct pcmcia_device *p_dev) p_dev->io.NumPorts2 = 0; /* Interrupt setup */ - p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; - p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; /* General socket configuration */ p_dev->conf.Attributes = CONF_ENABLE_IRQ; diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c index f9bdff39cf4a..e5deb15cf40c 100644 --- a/drivers/isdn/hisax/avma1_cs.c +++ b/drivers/isdn/hisax/avma1_cs.c @@ -120,8 +120,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev) p_dev->io.IOAddrLines = 5; /* Interrupt setup */ - p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; - p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; /* General socket configuration */ p_dev->conf.Attributes = CONF_ENABLE_IRQ; diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c index a2f709f53974..c9a30b1c9237 100644 --- a/drivers/isdn/hisax/elsa_cs.c +++ b/drivers/isdn/hisax/elsa_cs.c @@ -137,7 +137,7 @@ static int elsa_cs_probe(struct pcmcia_device *link) local->cardnr = -1; /* Interrupt setup */ - link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; link->irq.Handler = NULL; /* diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c index af5d393cc2d0..7836ec3c7f86 100644 --- a/drivers/isdn/hisax/sedlbauer_cs.c +++ b/drivers/isdn/hisax/sedlbauer_cs.c @@ -144,7 +144,7 @@ static int sedlbauer_probe(struct pcmcia_device *link) link->priv = local; /* Interrupt setup */ - link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; link->irq.Handler = NULL; /* diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c index ea705394ce2b..b0c5976cbdb3 100644 --- a/drivers/isdn/hisax/teles_cs.c +++ b/drivers/isdn/hisax/teles_cs.c @@ -127,7 +127,7 @@ static int teles_probe(struct pcmcia_device *link) link->priv = local; /* Interrupt setup */ - link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; link->irq.Handler = NULL; /* diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 800597b82d18..cb2ce03bd681 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -270,7 +270,7 @@ static int try_io_port(struct pcmcia_device *link) /* for master/slave multifunction cards */ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; } } else { /* This should be two 16-port windows */ diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 6e3e1ced6db4..fc5a890ba0a9 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -426,7 +426,7 @@ static int fmvj18x_config(struct pcmcia_device *link) if (link->io.NumPorts2 != 0) { link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; ret = mfc_try_io_port(link); if (ret != 0) goto failed; } else if (cardtype == UNGERMANN) { diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index cbe462ed221f..f41fbcc34327 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link) /* for master/slave multifunction cards */ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; } } else { /* This should be two 16-port windows */ diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 9e0da370912e..49185c3c1201 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -454,7 +454,7 @@ static int mhz_mfc_config(struct pcmcia_device *link) link->conf.Attributes |= CONF_ENABLE_SPKR; link->conf.Status = CCSR_AUDIO_ENA; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; link->io.IOAddrLines = 16; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->io.NumPorts2 = 8; diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index fe504b7f369f..55fa5ca245d1 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -841,7 +841,7 @@ xirc2ps_config(struct pcmcia_device * link) link->conf.Attributes |= CONF_ENABLE_SPKR; link->conf.Status |= CCSR_AUDIO_ENA; } - link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ; + link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING; link->io.NumPorts2 = 8; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; if (local->dingo) { diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h index afc2bfb9e917..75fa3530345b 100644 --- a/include/pcmcia/cs.h +++ b/include/pcmcia/cs.h @@ -126,8 +126,8 @@ typedef struct irq_req_t { #define IRQ_TYPE_TIME 0x01 #define IRQ_TYPE_DYNAMIC_SHARING 0x02 #define IRQ_FORCED_PULSE 0x04 -#define IRQ_FIRST_SHARED 0x08 -//#define IRQ_HANDLE_PRESENT 0x10 +#define IRQ_FIRST_SHARED 0x08 /* unused */ +#define IRQ_HANDLE_PRESENT 0x10 /* unused */ #define IRQ_PULSE_ALLOCATED 0x100 /* Bits in IRQInfo1 field */ diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c index 7717e01fc071..edaa729126bb 100644 --- a/sound/pcmcia/pdaudiocf/pdaudiocf.c +++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c @@ -143,7 +143,8 @@ static int snd_pdacf_probe(struct pcmcia_device *link) link->io.NumPorts1 = 16; link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE; - // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + /* FIXME: This driver should be updated to allow for dynamic IRQ sharing */ + /* link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; */ link->irq.Handler = pdacf_interrupt; link->conf.Attributes = CONF_ENABLE_IRQ; -- cgit v1.2.3 From 9fea84f46a821aa1ff2d034ffda8ad33bff48015 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Mon, 7 Dec 2009 22:11:45 +0100 Subject: pcmcia: CodingStyle fixes Fix several CodingStyle issues in drivers/pcmcia/ . checkpatch.pl no longer reports errors in the PCMCIA core. The remaining warnings mostly relate to wrong indent -- PCMCIA historically used 4 spaces --, to lines over 80 characters and to hundreds of typedefs. The cleanup of those will follow in the future. Signed-off-by: Dominik Brodowski --- drivers/pcmcia/Kconfig | 6 +- drivers/pcmcia/cardbus.c | 23 +-- drivers/pcmcia/cistpl.c | 181 +++++++++++++--------- drivers/pcmcia/cs.c | 12 +- drivers/pcmcia/ds.c | 66 ++++---- drivers/pcmcia/pcmcia_ioctl.c | 45 +++--- drivers/pcmcia/pcmcia_resource.c | 37 ++--- drivers/pcmcia/rsrc_mgr.c | 14 +- drivers/pcmcia/rsrc_nonstatic.c | 315 ++++++++++++++++++++------------------- drivers/pcmcia/socket_sysfs.c | 6 +- drivers/pcmcia/yenta_socket.c | 147 +++++++++++------- include/pcmcia/ds.h | 6 +- include/pcmcia/mem_op.h | 2 +- include/pcmcia/ss.h | 12 +- 14 files changed, 485 insertions(+), 387 deletions(-) (limited to 'include') diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index f3ccbccf5f21..6dc22770d8bc 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -64,7 +64,7 @@ config PCMCIA_IOCTL If unsure, say Y. config CARDBUS - bool "32-bit CardBus support" + bool "32-bit CardBus support" depends on PCI default y ---help--- @@ -87,8 +87,8 @@ config YENTA select PCCARD_NONSTATIC ---help--- This option enables support for CardBus host bridges. Virtually - all modern PCMCIA bridges are CardBus compatible. A "bridge" is - the hardware inside your computer that PCMCIA cards are plugged + all modern PCMCIA bridges are CardBus compatible. A "bridge" is + the hardware inside your computer that PCMCIA cards are plugged into. To compile this driver as modules, choose M here: the diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index 4cd70d056810..5e9b636cbf71 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c @@ -27,8 +27,8 @@ #include #include #include +#include #include -#include #include #include @@ -58,7 +58,7 @@ image number and an offset within that image. xlate_rom_addr() converts an image/offset address to an absolute offset from the ROM's base address. - + =====================================================================*/ static u_int xlate_rom_addr(void __iomem *b, u_int addr) @@ -85,10 +85,10 @@ static u_int xlate_rom_addr(void __iomem *b, u_int addr) These are similar to setup_cis_mem and release_cis_mem for 16-bit cards. The "result" that is used externally is the cb_cis_virt pointer in the struct pcmcia_socket structure. - + =====================================================================*/ -static void cb_release_cis_mem(struct pcmcia_socket * s) +static void cb_release_cis_mem(struct pcmcia_socket *s) { if (s->cb_cis_virt) { dev_dbg(&s->dev, "cb_release_cis_mem()\n"); @@ -98,7 +98,7 @@ static void cb_release_cis_mem(struct pcmcia_socket * s) } } -static int cb_setup_cis_mem(struct pcmcia_socket * s, struct resource *res) +static int cb_setup_cis_mem(struct pcmcia_socket *s, struct resource *res) { unsigned int start, size; @@ -124,10 +124,11 @@ static int cb_setup_cis_mem(struct pcmcia_socket * s, struct resource *res) This is used by the CIS processing code to read CIS information from a CardBus device. - + =====================================================================*/ -int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void *ptr) +int read_cb_mem(struct pcmcia_socket *s, int space, u_int addr, u_int len, + void *ptr) { struct pci_dev *dev; struct resource *res; @@ -181,7 +182,7 @@ fail: cb_alloc() and cb_free() allocate and free the kernel data structures for a Cardbus device, and handle the lowest level PCI device setup issues. - + =====================================================================*/ /* @@ -207,14 +208,14 @@ static void cardbus_assign_irqs(struct pci_bus *bus, int irq) } } -int __ref cb_alloc(struct pcmcia_socket * s) +int __ref cb_alloc(struct pcmcia_socket *s) { struct pci_bus *bus = s->cb_dev->subordinate; struct pci_dev *dev; unsigned int max, pass; s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); -// pcibios_fixup_bus(bus); +/* pcibios_fixup_bus(bus); */ max = bus->secondary; for (pass = 0; pass < 2; pass++) @@ -241,7 +242,7 @@ int __ref cb_alloc(struct pcmcia_socket * s) return 0; } -void cb_free(struct pcmcia_socket * s) +void cb_free(struct pcmcia_socket *s) { struct pci_dev *bridge = s->cb_dev; diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 8c1b73cf021b..25b1cd219e37 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -125,7 +125,7 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag Low-level functions to read and write CIS memory. I think the write routine is only useful for writing one-byte registers. - + ======================================================================*/ /* Bits in attr field */ @@ -137,7 +137,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, { void __iomem *sys, *end; unsigned char *buf = ptr; - + dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); if (attr & IS_INDIRECT) { @@ -203,7 +203,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, { void __iomem *sys, *end; unsigned char *buf = ptr; - + dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); if (attr & IS_INDIRECT) { @@ -262,7 +262,7 @@ EXPORT_SYMBOL(pcmcia_write_cis_mem); This is a wrapper around read_cis_mem, with the same interface, but which caches information, for cards whose CIS may not be readable all the time. - + ======================================================================*/ static void read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, @@ -342,7 +342,7 @@ EXPORT_SYMBOL(destroy_cis_cache); This verifies if the CIS of a card matches what is in the CIS cache. - + ======================================================================*/ int verify_cis_cache(struct pcmcia_socket *s) @@ -381,7 +381,7 @@ int verify_cis_cache(struct pcmcia_socket *s) For really bad cards, we provide a facility for uploading a replacement CIS. - + ======================================================================*/ int pcmcia_replace_cis(struct pcmcia_socket *s, @@ -406,7 +406,7 @@ EXPORT_SYMBOL(pcmcia_replace_cis); /*====================================================================== The high-level CIS tuple services - + ======================================================================*/ typedef struct tuple_flags { @@ -421,8 +421,6 @@ typedef struct tuple_flags { #define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn) #define SPACE(f) (((tuple_flags *)(&(f)))->space) -int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int func, tuple_t *tuple); - int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) { if (!s) @@ -523,10 +521,11 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_ ofs++; continue; } } - + /* End of chain? Follow long link if possible */ if (link[0] == CISTPL_END) { - if ((ofs = follow_link(s, tuple)) < 0) + ofs = follow_link(s, tuple); + if (ofs < 0) return -ENOSPC; attr = SPACE(tuple->Flags); read_cis_cache(s, attr, ofs, 2, link); @@ -578,7 +577,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_ } else if (tuple->DesiredTuple == RETURN_FIRST_TUPLE) break; - + if (link[0] == tuple->DesiredTuple) break; ofs += link[1] + 2; @@ -587,7 +586,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_ dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n"); return -ENOSPC; } - + tuple->TupleCode = link[0]; tuple->TupleLink = link[1]; tuple->CISOffset = ofs + 2; @@ -623,7 +622,7 @@ EXPORT_SYMBOL(pccard_get_tuple_data); /*====================================================================== Parsing routines for individual tuples - + ======================================================================*/ static int parse_device(tuple_t *tuple, cistpl_device_t *device) @@ -637,26 +636,37 @@ static int parse_device(tuple_t *tuple, cistpl_device_t *device) device->ndev = 0; for (i = 0; i < CISTPL_MAX_DEVICES; i++) { - - if (*p == 0xff) break; + + if (*p == 0xff) + break; device->dev[i].type = (*p >> 4); device->dev[i].wp = (*p & 0x08) ? 1 : 0; switch (*p & 0x07) { - case 0: device->dev[i].speed = 0; break; - case 1: device->dev[i].speed = 250; break; - case 2: device->dev[i].speed = 200; break; - case 3: device->dev[i].speed = 150; break; - case 4: device->dev[i].speed = 100; break; + case 0: + device->dev[i].speed = 0; + break; + case 1: + device->dev[i].speed = 250; + break; + case 2: + device->dev[i].speed = 200; + break; + case 3: + device->dev[i].speed = 150; + break; + case 4: + device->dev[i].speed = 100; + break; case 7: - if (++p == q) - return -EINVAL; - device->dev[i].speed = SPEED_CVT(*p); - while (*p & 0x80) if (++p == q) return -EINVAL; - break; + device->dev[i].speed = SPEED_CVT(*p); + while (*p & 0x80) + if (++p == q) + return -EINVAL; + break; default: - return -EINVAL; + return -EINVAL; } if (++p == q) @@ -671,7 +681,7 @@ static int parse_device(tuple_t *tuple, cistpl_device_t *device) if (++p == q) break; } - + return 0; } @@ -706,9 +716,9 @@ static int parse_longlink_mfc(tuple_t *tuple, { u_char *p; int i; - + p = (u_char *)tuple->TupleData; - + link->nfn = *p; p++; if (tuple->TupleDataLen <= link->nfn*5) return -EINVAL; @@ -737,11 +747,13 @@ static int parse_strings(u_char *p, u_char *q, int max, ns++; for (;;) { s[j++] = (*p == 0xff) ? '\0' : *p; - if ((*p == '\0') || (*p == 0xff)) break; + if ((*p == '\0') || (*p == 0xff)) + break; if (++p == q) return -EINVAL; } - if ((*p == 0xff) || (++p == q)) break; + if ((*p == 0xff) || (++p == q)) + break; } if (found) { *found = ns; @@ -756,10 +768,10 @@ static int parse_strings(u_char *p, u_char *q, int max, static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1) { u_char *p, *q; - + p = (u_char *)tuple->TupleData; q = p + tuple->TupleDataLen; - + vers_1->major = *p; p++; vers_1->minor = *p; p++; if (p >= q) @@ -774,10 +786,10 @@ static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1) static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr) { u_char *p, *q; - + p = (u_char *)tuple->TupleData; q = p + tuple->TupleDataLen; - + return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS, altstr->str, altstr->ofs, &altstr->ns); } @@ -793,7 +805,8 @@ static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec) q = p + tuple->TupleDataLen; for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) { - if (p > q-2) break; + if (p > q-2) + break; jedec->id[nid].mfr = p[0]; jedec->id[nid].info = p[1]; p += 2; @@ -871,7 +884,7 @@ static int parse_config(tuple_t *tuple, cistpl_config_t *config) The following routines are all used to parse the nightmarish config table entries. - + ======================================================================*/ static u_char *parse_power(u_char *p, u_char *q, @@ -880,17 +893,20 @@ static u_char *parse_power(u_char *p, u_char *q, int i; u_int scale; - if (p == q) return NULL; + if (p == q) + return NULL; pwr->present = *p; pwr->flags = 0; p++; for (i = 0; i < 7; i++) if (pwr->present & (1<param[i] = POWER_CVT(*p); scale = POWER_SCALE(*p); while (*p & 0x80) { - if (++p == q) return NULL; + if (++p == q) + return NULL; if ((*p & 0x7f) < 100) pwr->param[i] += (*p & 0x7f) * scale / 100; else if (*p == 0x7d) @@ -914,24 +930,28 @@ static u_char *parse_timing(u_char *p, u_char *q, { u_char scale; - if (p == q) return NULL; + if (p == q) + return NULL; scale = *p; if ((scale & 3) != 3) { - if (++p == q) return NULL; + if (++p == q) + return NULL; timing->wait = SPEED_CVT(*p); timing->waitscale = exponent[scale & 3]; } else timing->wait = 0; scale >>= 2; if ((scale & 7) != 7) { - if (++p == q) return NULL; + if (++p == q) + return NULL; timing->ready = SPEED_CVT(*p); timing->rdyscale = exponent[scale & 7]; } else timing->ready = 0; scale >>= 3; if (scale != 7) { - if (++p == q) return NULL; + if (++p == q) + return NULL; timing->reserved = SPEED_CVT(*p); timing->rsvscale = exponent[scale]; } else @@ -946,7 +966,8 @@ static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) { int i, j, bsz, lsz; - if (p == q) return NULL; + if (p == q) + return NULL; io->flags = *p; if (!(*p & 0x80)) { @@ -955,24 +976,29 @@ static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK)); return p+1; } - - if (++p == q) return NULL; + + if (++p == q) + return NULL; io->nwin = (*p & 0x0f) + 1; bsz = (*p & 0x30) >> 4; - if (bsz == 3) bsz++; + if (bsz == 3) + bsz++; lsz = (*p & 0xc0) >> 6; - if (lsz == 3) lsz++; + if (lsz == 3) + lsz++; p++; - + for (i = 0; i < io->nwin; i++) { io->win[i].base = 0; io->win[i].len = 1; for (j = 0; j < bsz; j++, p++) { - if (p == q) return NULL; + if (p == q) + return NULL; io->win[i].base += *p << (j*8); } for (j = 0; j < lsz; j++, p++) { - if (p == q) return NULL; + if (p == q) + return NULL; io->win[i].len += *p << (j*8); } } @@ -986,27 +1012,32 @@ static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem) int i, j, asz, lsz, has_ha; u_int len, ca, ha; - if (p == q) return NULL; + if (p == q) + return NULL; mem->nwin = (*p & 0x07) + 1; lsz = (*p & 0x18) >> 3; asz = (*p & 0x60) >> 5; has_ha = (*p & 0x80); - if (++p == q) return NULL; - + if (++p == q) + return NULL; + for (i = 0; i < mem->nwin; i++) { len = ca = ha = 0; for (j = 0; j < lsz; j++, p++) { - if (p == q) return NULL; + if (p == q) + return NULL; len += *p << (j*8); } for (j = 0; j < asz; j++, p++) { - if (p == q) return NULL; + if (p == q) + return NULL; ca += *p << (j*8); } if (has_ha) for (j = 0; j < asz; j++, p++) { - if (p == q) return NULL; + if (p == q) + return NULL; ha += *p << (j*8); } mem->win[i].len = len << 8; @@ -1095,7 +1126,7 @@ static int parse_cftable_entry(tuple_t *tuple, entry->timing.ready = 0; entry->timing.reserved = 0; } - + /* I/O window options */ if (features & 0x08) { p = parse_io(p, q, &entry->io); @@ -1103,7 +1134,7 @@ static int parse_cftable_entry(tuple_t *tuple, return -EINVAL; } else entry->io.nwin = 0; - + /* Interrupt options */ if (features & 0x10) { p = parse_irq(p, q, &entry->irq); @@ -1153,7 +1184,7 @@ static int parse_cftable_entry(tuple_t *tuple, } entry->subtuples = q-p; - + return 0; } @@ -1176,7 +1207,7 @@ static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar) static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config) { u_char *p; - + p = (u_char *)tuple->TupleData; if ((*p != 3) || (tuple->TupleDataLen < 6)) return -EINVAL; @@ -1231,7 +1262,7 @@ static int parse_cftable_entry_cb(tuple_t *tuple, entry->io = *p; p++; } else entry->io = 0; - + /* Interrupt options */ if (features & 0x10) { p = parse_irq(p, q, &entry->irq); @@ -1264,7 +1295,7 @@ static int parse_cftable_entry_cb(tuple_t *tuple, } entry->subtuples = q-p; - + return 0; } @@ -1281,7 +1312,8 @@ static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo) q = p + tuple->TupleDataLen; for (n = 0; n < CISTPL_MAX_DEVICES; n++) { - if (p > q-6) break; + if (p > q-6) + break; geo->geo[n].buswidth = p[0]; geo->geo[n].erase_block = 1 << (p[1]-1); geo->geo[n].read_block = 1 << (p[2]-1); @@ -1302,13 +1334,13 @@ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2) if (tuple->TupleDataLen < 10) return -EINVAL; - + p = tuple->TupleData; q = p + tuple->TupleDataLen; v2->vers = p[0]; v2->comply = p[1]; - v2->dindex = get_unaligned_le16(p +2 ); + v2->dindex = get_unaligned_le16(p + 2); v2->vspec8 = p[6]; v2->vspec9 = p[7]; v2->nhdr = p[8]; @@ -1322,7 +1354,7 @@ static int parse_org(tuple_t *tuple, cistpl_org_t *org) { u_char *p, *q; int i; - + p = tuple->TupleData; q = p + tuple->TupleDataLen; if (p == q) @@ -1332,7 +1364,8 @@ static int parse_org(tuple_t *tuple, cistpl_org_t *org) return -EINVAL; for (i = 0; i < 30; i++) { org->desc[i] = *p; - if (*p == '\0') break; + if (*p == '\0') + break; if (++p == q) return -EINVAL; } @@ -1363,7 +1396,7 @@ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt) int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse) { int ret = 0; - + if (tuple->TupleDataLen > tuple->TupleDataMax) return -EINVAL; switch (tuple->TupleCode) { @@ -1448,7 +1481,7 @@ EXPORT_SYMBOL(pcmcia_parse_tuple); /*====================================================================== This is used internally by Card Services to look up CIS stuff. - + ======================================================================*/ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse) @@ -1550,7 +1583,7 @@ EXPORT_SYMBOL(pccard_loop_tuple); checks include making sure several critical tuples are present and valid; seeing if the total number of tuples is reasonable; and looking for tuples that use reserved codes. - + ======================================================================*/ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info) diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 790af87a922f..6d6f82b38a68 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -135,7 +135,7 @@ int pcmcia_socket_dev_resume(struct device *dev) EXPORT_SYMBOL(pcmcia_socket_dev_resume); -struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt) +struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt) { struct device *dev = get_device(&skt->dev); if (!dev) @@ -145,7 +145,7 @@ struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt) put_device(&skt->dev); return NULL; } - return (skt); + return skt; } EXPORT_SYMBOL(pcmcia_get_socket); @@ -297,7 +297,7 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket) EXPORT_SYMBOL(pcmcia_unregister_socket); -struct pcmcia_socket * pcmcia_get_socket_by_nr(unsigned int nr) +struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr) { struct pcmcia_socket *s; @@ -736,7 +736,7 @@ EXPORT_SYMBOL(pcmcia_parse_events); /* register pcmcia_callback */ int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c) { - int ret = 0; + int ret = 0; /* s->skt_mutex also protects s->callback */ mutex_lock(&s->skt_mutex); @@ -848,7 +848,7 @@ EXPORT_SYMBOL(pcmcia_suspend_card); int pcmcia_resume_card(struct pcmcia_socket *skt) { int ret; - + dev_dbg(&skt->dev, "waking up socket\n"); mutex_lock(&skt->skt_mutex); @@ -876,7 +876,7 @@ EXPORT_SYMBOL(pcmcia_resume_card); int pcmcia_eject_card(struct pcmcia_socket *skt) { int ret; - + dev_dbg(&skt->dev, "user eject request\n"); mutex_lock(&skt->skt_mutex); diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 05893d41dd41..1a4a3c49cc15 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -57,7 +57,7 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) "function\n", p_drv->drv.name); while (did && did->match_flags) { - for (i=0; i<4; i++) { + for (i = 0; i < 4; i++) { if (!did->prod_id[i]) continue; @@ -105,7 +105,7 @@ pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count) __u16 match_flags, manf_id, card_id; __u8 func_id, function, device_no; __u32 prod_id_hash[4] = {0, 0, 0, 0}; - int fields=0; + int fields = 0; int retval = 0; fields = sscanf(buf, "%hx %hx %hx %hhx %hhx %hhx %x %x %x %x", @@ -214,7 +214,7 @@ EXPORT_SYMBOL(pcmcia_unregister_driver); /* pcmcia_device handling */ -struct pcmcia_device * pcmcia_get_dev(struct pcmcia_device *p_dev) +struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev) { struct device *tmp_dev; tmp_dev = get_device(&p_dev->dev); @@ -258,7 +258,7 @@ static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc) return; } -static int pcmcia_device_probe(struct device * dev) +static int pcmcia_device_probe(struct device *dev) { struct pcmcia_device *p_dev; struct pcmcia_driver *p_drv; @@ -325,7 +325,7 @@ put_module: put_dev: if (ret) put_device(dev); - return (ret); + return ret; } @@ -354,7 +354,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le spin_lock_irqsave(&pcmcia_dev_list_lock, flags); list_del(&p_dev->socket_device_list); - p_dev->_removed=1; + p_dev->_removed = 1; spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); dev_dbg(&p_dev->dev, "unregistering device\n"); @@ -364,7 +364,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le return; } -static int pcmcia_device_remove(struct device * dev) +static int pcmcia_device_remove(struct device *dev) { struct pcmcia_device *p_dev; struct pcmcia_driver *p_drv; @@ -391,7 +391,7 @@ static int pcmcia_device_remove(struct device * dev) return 0; if (p_drv->remove) - p_drv->remove(p_dev); + p_drv->remove(p_dev); p_dev->dev_node = NULL; @@ -499,7 +499,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev) */ static DEFINE_MUTEX(device_add_lock); -struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function) +struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function) { struct pcmcia_device *p_dev, *tmp_dev; unsigned long flags; @@ -545,8 +545,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f * Note that this is serialized by the device_add_lock, so that * only one such struct will be created. */ - list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) - if (p_dev->func == tmp_dev->func) { + list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) + if (p_dev->func == tmp_dev->func) { p_dev->function_config = tmp_dev->function_config; p_dev->io = tmp_dev->io; p_dev->irq = tmp_dev->irq; @@ -627,10 +627,10 @@ static int pcmcia_card_add(struct pcmcia_socket *s) no_funcs = 1; s->functions = no_funcs; - for (i=0; i < no_funcs; i++) + for (i = 0; i < no_funcs; i++) pcmcia_device_add(s, i); - return (ret); + return ret; } @@ -756,7 +756,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) release: release_firmware(fw); - return (ret); + return ret; } #else /* !CONFIG_PCMCIA_LOAD_CIS */ @@ -852,7 +852,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { int i; - for (i=0; i<4; i++) + for (i = 0; i < 4; i++) if (dev->prod_id[i]) return 0; if (dev->has_manf_id || dev->has_card_id || dev->has_func_id) @@ -865,9 +865,10 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, } -static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) { - struct pcmcia_device * p_dev = to_pcmcia_dev(dev); - struct pcmcia_driver * p_drv = to_pcmcia_drv(drv); +static int pcmcia_bus_match(struct device *dev, struct device_driver *drv) +{ + struct pcmcia_device *p_dev = to_pcmcia_dev(dev); + struct pcmcia_driver *p_drv = to_pcmcia_drv(drv); struct pcmcia_device_id *did = p_drv->id_table; struct pcmcia_dynid *dynid; @@ -917,7 +918,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env) p_dev = to_pcmcia_dev(dev); /* calculate hashes */ - for (i=0; i<4; i++) { + for (i = 0; i < 4; i++) { if (!p_dev->prod_id[i]) continue; hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i])); @@ -984,14 +985,14 @@ static void runtime_resume(struct device *dev) static ssize_t field##_show (struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \ - return p_dev->test ? sprintf (buf, format, p_dev->field) : -ENODEV; \ + return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \ } #define pcmcia_device_stringattr(name, field) \ static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \ - return p_dev->field ? sprintf (buf, "%s\n", p_dev->field) : -ENODEV; \ + return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \ } pcmcia_device_attr(func, socket, "0x%02x\n"); @@ -1020,8 +1021,8 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute struct pcmcia_device *p_dev = to_pcmcia_dev(dev); int ret = 0; - if (!count) - return -EINVAL; + if (!count) + return -EINVAL; if ((!p_dev->suspended) && !strncmp(buf, "off", 3)) ret = runtime_suspend(dev); @@ -1039,10 +1040,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, u32 hash[4] = { 0, 0, 0, 0}; /* calculate hashes */ - for (i=0; i<4; i++) { + for (i = 0; i < 4; i++) { if (!p_dev->prod_id[i]) continue; - hash[i] = crc32(0,p_dev->prod_id[i],strlen(p_dev->prod_id[i])); + hash[i] = crc32(0, p_dev->prod_id[i], + strlen(p_dev->prod_id[i])); } return sprintf(buf, "pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X" "pa%08Xpb%08Xpc%08Xpd%08X\n", @@ -1091,7 +1093,7 @@ static struct device_attribute pcmcia_dev_attrs[] = { /* PM support, also needed for reset */ -static int pcmcia_dev_suspend(struct device * dev, pm_message_t state) +static int pcmcia_dev_suspend(struct device *dev, pm_message_t state) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); struct pcmcia_driver *p_drv = NULL; @@ -1131,10 +1133,10 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state) } -static int pcmcia_dev_resume(struct device * dev) +static int pcmcia_dev_resume(struct device *dev) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); - struct pcmcia_driver *p_drv = NULL; + struct pcmcia_driver *p_drv = NULL; int ret = 0; if (!p_dev->suspended) @@ -1211,7 +1213,7 @@ static int pcmcia_bus_suspend(struct pcmcia_socket *skt) /*====================================================================== The card status event handler. - + ======================================================================*/ /* Normally, the event is passed to individual drivers after @@ -1264,7 +1266,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) } /* ds_event */ -struct pcmcia_device * pcmcia_dev_present(struct pcmcia_device *_p_dev) +struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) { struct pcmcia_device *p_dev; struct pcmcia_device *ret = NULL; @@ -1329,7 +1331,7 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev, if (ret) { dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n"); pcmcia_put_socket(socket); - return (ret); + return ret; } return 0; @@ -1400,7 +1402,7 @@ static int __init init_pcmcia_bus(void) return 0; } -fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that +fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that * pcmcia_socket_class is already registered */ diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index c4d7908fa37f..f73fd5beaa37 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c @@ -88,12 +88,12 @@ static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info) p_drv = container_of(drv, struct pcmcia_driver, drv); - return (p_drv); + return p_drv; } #ifdef CONFIG_PROC_FS -static struct proc_dir_entry *proc_pccard = NULL; +static struct proc_dir_entry *proc_pccard; static int proc_read_drivers_callback(struct device_driver *driver, void *_m) { @@ -158,7 +158,8 @@ static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) #else -static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) { +static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) +{ return 0; } @@ -195,7 +196,7 @@ static int pcmcia_adjust_resource_info(adjust_t *adj) begin = adj->resource.memory.Base; end = adj->resource.memory.Base + adj->resource.memory.Size - 1; if (s->resource_ops->add_mem) - ret =s->resource_ops->add_mem(s, adj->Action, begin, end); + ret = s->resource_ops->add_mem(s, adj->Action, begin, end); case RES_IO_RANGE: begin = adj->resource.io.BasePort; end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1; @@ -215,7 +216,7 @@ static int pcmcia_adjust_resource_info(adjust_t *adj) } up_read(&pcmcia_socket_list_rwsem); - return (ret); + return ret; } @@ -490,7 +491,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info) } spin_lock_irqsave(&pcmcia_dev_list_lock, flags); - list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { + list_for_each_entry(p_dev, &s->devices_list, socket_device_list) { if (p_dev->func == bind_info->function) { if ((p_dev->dev.driver == &p_drv->drv)) { if (p_dev->cardmgr) { @@ -558,7 +559,7 @@ rescan: err_put: pcmcia_put_socket(s); - return (ret); + return ret; } /* bind_request */ #ifdef CONFIG_CARDBUS @@ -655,7 +656,7 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int err_put: pcmcia_put_dev(p_dev); - return (ret); + return ret; } /* get_device_info */ @@ -664,7 +665,7 @@ static int ds_open(struct inode *inode, struct file *file) socket_t i = iminor(inode); struct pcmcia_socket *s; user_info_t *user; - static int warning_printed = 0; + static int warning_printed; int ret = 0; pr_debug("ds_open(socket %d)\n", i); @@ -738,12 +739,13 @@ static int ds_release(struct inode *inode, struct file *file) s = user->socket; /* Unlink user data structure */ - if ((file->f_flags & O_ACCMODE) != O_RDONLY) { + if ((file->f_flags & O_ACCMODE) != O_RDONLY) s->pcmcia_state.busy = 0; - } + file->private_data = NULL; for (link = &s->user; *link; link = &(*link)->next) - if (*link == user) break; + if (*link == user) + break; if (link == NULL) goto out; *link = user->next; @@ -774,7 +776,7 @@ static ssize_t ds_read(struct file *file, char __user *buf, s = user->socket; if (s->pcmcia_state.dead) - return -EIO; + return -EIO; ret = wait_event_interruptible(s->queue, !queue_empty(user)); if (ret == 0) @@ -824,7 +826,7 @@ static u_int ds_poll(struct file *file, poll_table *wait) /*====================================================================*/ -static int ds_ioctl(struct inode * inode, struct file * file, +static int ds_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long arg) { struct pcmcia_socket *s; @@ -842,10 +844,11 @@ static int ds_ioctl(struct inode * inode, struct file * file, s = user->socket; if (s->pcmcia_state.dead) - return -EIO; + return -EIO; size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; - if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL; + if (size > sizeof(ds_ioctl_arg_t)) + return -EINVAL; /* Permission check */ if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN)) @@ -1024,8 +1027,8 @@ static int ds_ioctl(struct inode * inode, struct file * file, } if (cmd & IOC_OUT) { - if (__copy_to_user(uarg, (char *)buf, size)) - err = -EFAULT; + if (__copy_to_user(uarg, (char *)buf, size)) + err = -EFAULT; } free_out: @@ -1045,7 +1048,8 @@ static const struct file_operations ds_fops = { .poll = ds_poll, }; -void __init pcmcia_setup_ioctl(void) { +void __init pcmcia_setup_ioctl(void) +{ int i; /* Set up character device for user mode clients */ @@ -1064,7 +1068,8 @@ void __init pcmcia_setup_ioctl(void) { } -void __exit pcmcia_cleanup_ioctl(void) { +void __exit pcmcia_cleanup_ioctl(void) +{ #ifdef CONFIG_PROC_FS if (proc_pccard) { remove_proc_entry("drivers", proc_pccard); diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index a8bf8c1b45ed..d5db95644b64 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -33,7 +33,7 @@ /* Access speed for IO windows */ -static int io_speed = 0; +static int io_speed; module_param(io_speed, int, 0444); @@ -62,7 +62,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, num, align); align = 0; } else - while (align && (align < num)) align <<= 1; + while (align && (align < num)) + align <<= 1; } if (*base & ~(align-1)) { dev_dbg(&s->dev, "odd IO request: base %#x align %#x\n", @@ -338,7 +339,7 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req) struct pcmcia_socket *s = p_dev->socket; config_t *c = p_dev->function_config; - if (!p_dev->_io ) + if (!p_dev->_io) return -EINVAL; p_dev->_io = 0; @@ -362,7 +363,7 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req) static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req) { struct pcmcia_socket *s = p_dev->socket; - config_t *c= p_dev->function_config; + config_t *c = p_dev->function_config; if (!p_dev->_irq) return -EINVAL; @@ -383,9 +384,8 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req) s->irq.AssignedIRQ = 0; } - if (req->Handler) { + if (req->Handler) free_irq(req->AssignedIRQ, p_dev->priv); - } #ifdef CONFIG_PCMCIA_PROBE pcmcia_used_irq[req->AssignedIRQ]--; @@ -656,7 +656,8 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) type = IRQF_SHARED; else if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING) type = IRQF_SHARED; - else printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); + else + printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); #ifdef CONFIG_PCMCIA_PROBE @@ -788,7 +789,8 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha /* Allocate system memory window */ for (w = 0; w < MAX_WIN; w++) - if (!(s->state & SOCKET_WIN_REQ(w))) break; + if (!(s->state & SOCKET_WIN_REQ(w))) + break; if (w == MAX_WIN) { dev_dbg(&s->dev, "all windows are used already\n"); return -EINVAL; @@ -826,18 +828,19 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha s->state |= SOCKET_WIN_REQ(w); /* Return window handle */ - if (s->features & SS_CAP_STATIC_MAP) { + if (s->features & SS_CAP_STATIC_MAP) req->Base = win->static_start; - } else { + else req->Base = win->res->start; - } + *wh = w + 1; return 0; } /* pcmcia_request_window */ EXPORT_SYMBOL(pcmcia_request_window); -void pcmcia_disable_device(struct pcmcia_device *p_dev) { +void pcmcia_disable_device(struct pcmcia_device *p_dev) +{ pcmcia_release_configuration(p_dev); pcmcia_release_io(p_dev, &p_dev->io); pcmcia_release_irq(p_dev, &p_dev->irq); @@ -970,7 +973,7 @@ int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code, return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL, &loop, pcmcia_do_loop_tuple); -}; +} EXPORT_SYMBOL(pcmcia_loop_tuple); @@ -1000,7 +1003,7 @@ static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple, } else dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n"); return 0; -}; +} /** * pcmcia_get_tuple() - get first tuple from CIS @@ -1024,7 +1027,7 @@ size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code, pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get); return get.len; -}; +} EXPORT_SYMBOL(pcmcia_get_tuple); @@ -1057,7 +1060,7 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple, for (i = 0; i < 6; i++) dev->dev_addr[i] = tuple->TupleData[i+2]; return 0; -}; +} /** * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE @@ -1071,6 +1074,6 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple, int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev) { return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev); -}; +} EXPORT_SYMBOL(pcmcia_get_mac_from_cis); diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c index de0e770ce6a3..52db17263d8b 100644 --- a/drivers/pcmcia/rsrc_mgr.c +++ b/drivers/pcmcia/rsrc_mgr.c @@ -126,16 +126,16 @@ static void pcmcia_align(void *align_data, struct resource *res, res->start = start; #ifdef CONFIG_X86 - if (res->flags & IORESOURCE_IO) { - if (start & 0x300) { - start = (start + 0x3ff) & ~0x3ff; - res->start = start; - } - } + if (res->flags & IORESOURCE_IO) { + if (start & 0x300) { + start = (start + 0x3ff) & ~0x3ff; + res->start = start; + } + } #endif #ifdef CONFIG_M68K - if (res->flags & IORESOURCE_IO) { + if (res->flags & IORESOURCE_IO) { if ((res->start + size - 1) >= 1024) res->start = res->end; } diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 7039f3cf5b77..9b0dc433a8c3 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -24,9 +24,9 @@ #include #include #include +#include #include -#include #include #include @@ -144,43 +144,44 @@ static int add_interval(struct resource_map *map, u_long base, u_long num) static int sub_interval(struct resource_map *map, u_long base, u_long num) { - struct resource_map *p, *q; - - for (p = map; ; p = q) { - q = p->next; - if (q == map) - break; - if ((q->base+q->num > base) && (base+num > q->base)) { - if (q->base >= base) { - if (q->base+q->num <= base+num) { - /* Delete whole block */ - p->next = q->next; - kfree(q); - /* don't advance the pointer yet */ - q = p; - } else { - /* Cut off bit from the front */ - q->num = q->base + q->num - base - num; - q->base = base + num; - } - } else if (q->base+q->num <= base+num) { - /* Cut off bit from the end */ - q->num = base - q->base; - } else { - /* Split the block into two pieces */ - p = kmalloc(sizeof(struct resource_map), GFP_KERNEL); - if (!p) { - printk(KERN_WARNING "out of memory to update resources\n"); - return -ENOMEM; + struct resource_map *p, *q; + + for (p = map; ; p = q) { + q = p->next; + if (q == map) + break; + if ((q->base+q->num > base) && (base+num > q->base)) { + if (q->base >= base) { + if (q->base+q->num <= base+num) { + /* Delete whole block */ + p->next = q->next; + kfree(q); + /* don't advance the pointer yet */ + q = p; + } else { + /* Cut off bit from the front */ + q->num = q->base + q->num - base - num; + q->base = base + num; + } + } else if (q->base+q->num <= base+num) { + /* Cut off bit from the end */ + q->num = base - q->base; + } else { + /* Split the block into two pieces */ + p = kmalloc(sizeof(struct resource_map), + GFP_KERNEL); + if (!p) { + printk(KERN_WARNING "out of memory to update resources\n"); + return -ENOMEM; + } + p->base = base+num; + p->num = q->base+q->num - p->base; + q->num = base - q->base; + p->next = q->next ; q->next = p; + } } - p->base = base+num; - p->num = q->base+q->num - p->base; - q->num = base - q->base; - p->next = q->next ; q->next = p; - } } - } - return 0; + return 0; } /*====================================================================== @@ -194,69 +195,72 @@ static int sub_interval(struct resource_map *map, u_long base, u_long num) static void do_io_probe(struct pcmcia_socket *s, unsigned int base, unsigned int num) { - struct resource *res; - struct socket_data *s_data = s->resource_data; - unsigned int i, j, bad; - int any; - u_char *b, hole, most; - - dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:", - base, base+num-1); - - /* First, what does a floating port look like? */ - b = kzalloc(256, GFP_KERNEL); - if (!b) { - printk("\n"); - dev_printk(KERN_ERR, &s->dev, - "do_io_probe: unable to kmalloc 256 bytes"); - return; - } - for (i = base, most = 0; i < base+num; i += 8) { - res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe"); - if (!res) - continue; - hole = inb(i); - for (j = 1; j < 8; j++) - if (inb(i+j) != hole) break; - free_region(res); - if ((j == 8) && (++b[hole] > b[most])) - most = hole; - if (b[most] == 127) break; - } - kfree(b); - - bad = any = 0; - for (i = base; i < base+num; i += 8) { - res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe"); - if (!res) - continue; - for (j = 0; j < 8; j++) - if (inb(i+j) != most) break; - free_region(res); - if (j < 8) { - if (!any) - printk(" excluding"); - if (!bad) - bad = any = i; - } else { - if (bad) { - sub_interval(&s_data->io_db, bad, i-bad); - printk(" %#x-%#x", bad, i-1); - bad = 0; - } + struct resource *res; + struct socket_data *s_data = s->resource_data; + unsigned int i, j, bad; + int any; + u_char *b, hole, most; + + dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:", + base, base+num-1); + + /* First, what does a floating port look like? */ + b = kzalloc(256, GFP_KERNEL); + if (!b) { + printk("\n"); + dev_printk(KERN_ERR, &s->dev, + "do_io_probe: unable to kmalloc 256 bytes"); + return; } - } - if (bad) { - if ((num > 16) && (bad == base) && (i == base+num)) { - printk(" nothing: probe failed.\n"); - return; - } else { - sub_interval(&s_data->io_db, bad, i-bad); - printk(" %#x-%#x", bad, i-1); + for (i = base, most = 0; i < base+num; i += 8) { + res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); + if (!res) + continue; + hole = inb(i); + for (j = 1; j < 8; j++) + if (inb(i+j) != hole) + break; + free_region(res); + if ((j == 8) && (++b[hole] > b[most])) + most = hole; + if (b[most] == 127) + break; } - } + kfree(b); - printk(any ? "\n" : " clean.\n"); + bad = any = 0; + for (i = base; i < base+num; i += 8) { + res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); + if (!res) + continue; + for (j = 0; j < 8; j++) + if (inb(i+j) != most) + break; + free_region(res); + if (j < 8) { + if (!any) + printk(" excluding"); + if (!bad) + bad = any = i; + } else { + if (bad) { + sub_interval(&s_data->io_db, bad, i-bad); + printk(" %#x-%#x", bad, i-1); + bad = 0; + } + } + } + if (bad) { + if ((num > 16) && (bad == base) && (i == base+num)) { + printk(" nothing: probe failed.\n"); + return; + } else { + sub_interval(&s_data->io_db, bad, i-bad); + printk(" %#x-%#x", bad, i-1); + } + } + + printk(any ? "\n" : " clean.\n"); } #endif @@ -327,8 +331,9 @@ cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size) unsigned int info1, info2; int ret = 0; - res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe"); - res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "cs memory probe"); + res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); + res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, + "PCMCIA memprobe"); if (res1 && res2) { ret = readable(s, res1, &info1); @@ -347,8 +352,9 @@ checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size) struct resource *res1, *res2; int a = -1, b = -1; - res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe"); - res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "cs memory probe"); + res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); + res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, + "PCMCIA memprobe"); if (res1 && res2) { a = checksum(s, res1); @@ -371,42 +377,43 @@ checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size) static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s) { - struct socket_data *s_data = s->resource_data; - u_long i, j, bad, fail, step; - - dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:", - base, base+num-1); - bad = fail = 0; - step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff); - /* don't allow too large steps */ - if (step > 0x800000) - step = 0x800000; - /* cis_readable wants to map 2x map_size */ - if (step < 2 * s->map_size) - step = 2 * s->map_size; - for (i = j = base; i < base+num; i = j + step) { - if (!fail) { - for (j = i; j < base+num; j += step) { - if (cis_readable(s, j, step)) - break; - } - fail = ((i == base) && (j == base+num)); - } - if (fail) { - for (j = i; j < base+num; j += 2*step) - if (checksum_match(s, j, step) && - checksum_match(s, j + step, step)) - break; - } - if (i != j) { - if (!bad) printk(" excluding"); - printk(" %#05lx-%#05lx", i, j-1); - sub_interval(&s_data->mem_db, i, j-i); - bad += j-i; + struct socket_data *s_data = s->resource_data; + u_long i, j, bad, fail, step; + + dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:", + base, base+num-1); + bad = fail = 0; + step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff); + /* don't allow too large steps */ + if (step > 0x800000) + step = 0x800000; + /* cis_readable wants to map 2x map_size */ + if (step < 2 * s->map_size) + step = 2 * s->map_size; + for (i = j = base; i < base+num; i = j + step) { + if (!fail) { + for (j = i; j < base+num; j += step) { + if (cis_readable(s, j, step)) + break; + } + fail = ((i == base) && (j == base+num)); + } + if (fail) { + for (j = i; j < base+num; j += 2*step) + if (checksum_match(s, j, step) && + checksum_match(s, j + step, step)) + break; + } + if (i != j) { + if (!bad) + printk(" excluding"); + printk(" %#05lx-%#05lx", i, j-1); + sub_interval(&s_data->mem_db, i, j-i); + bad += j-i; + } } - } - printk(bad ? "\n" : " clean.\n"); - return (num - bad); + printk(bad ? "\n" : " clean.\n"); + return num - bad; } #ifdef CONFIG_PCMCIA_PROBE @@ -656,7 +663,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num, return res; } -static struct resource * nonstatic_find_mem_region(u_long base, u_long num, +static struct resource *nonstatic_find_mem_region(u_long base, u_long num, u_long align, int low, struct pcmcia_socket *s) { struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev)); @@ -794,7 +801,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) return -EINVAL; #endif - for (i=0; i < PCI_BUS_NUM_RESOURCES; i++) { + for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { res = s->cb_dev->bus->resource[i]; if (!res) continue; @@ -908,14 +915,14 @@ static ssize_t show_io_db(struct device *dev, for (p = data->io_db.next; p != &data->io_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; - ret += snprintf (&buf[ret], (PAGE_SIZE - ret - 1), - "0x%08lx - 0x%08lx\n", - ((unsigned long) p->base), - ((unsigned long) p->base + p->num - 1)); + ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), + "0x%08lx - 0x%08lx\n", + ((unsigned long) p->base), + ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&rsrc_mutex); - return (ret); + return ret; } static ssize_t store_io_db(struct device *dev, @@ -927,12 +934,13 @@ static ssize_t store_io_db(struct device *dev, unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; - ret = sscanf (buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); + ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { - ret = sscanf (buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); + ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { - ret = sscanf (buf, "0x%lx - 0x%lx", &start_addr, &end_addr); + ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, + &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; @@ -963,14 +971,14 @@ static ssize_t show_mem_db(struct device *dev, for (p = data->mem_db.next; p != &data->mem_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; - ret += snprintf (&buf[ret], (PAGE_SIZE - ret - 1), - "0x%08lx - 0x%08lx\n", - ((unsigned long) p->base), - ((unsigned long) p->base + p->num - 1)); + ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), + "0x%08lx - 0x%08lx\n", + ((unsigned long) p->base), + ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&rsrc_mutex); - return (ret); + return ret; } static ssize_t store_mem_db(struct device *dev, @@ -982,12 +990,13 @@ static ssize_t store_mem_db(struct device *dev, unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; - ret = sscanf (buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); + ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { - ret = sscanf (buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); + ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { - ret = sscanf (buf, "0x%lx - 0x%lx", &start_addr, &end_addr); + ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, + &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c index 78d5aab542f7..7a456000332a 100644 --- a/drivers/pcmcia/socket_sysfs.c +++ b/drivers/pcmcia/socket_sysfs.c @@ -164,7 +164,7 @@ static ssize_t pccard_store_irq_mask(struct device *dev, if (!count) return -EINVAL; - ret = sscanf (buf, "0x%x\n", &mask); + ret = sscanf(buf, "0x%x\n", &mask); if (ret == 1) { s->irq_mask &= mask; @@ -278,7 +278,7 @@ static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off free_tuple: kfree(tuplebuffer); - return (ret); + return ret; } static ssize_t pccard_show_cis(struct kobject *kobj, @@ -308,7 +308,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, count = pccard_extract_cis(s, buf, off, count); } - return (count); + return count; } static ssize_t pccard_store_cis(struct kobject *kobj, diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 8be4cc447a17..fe02cfd4b5e9 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -6,7 +6,7 @@ * Changelog: * Aug 2002: Manfred Spraul * Dynamically adjust the size of the bridge resource - * + * * May 2003: Dominik Brodowski * Merge pci_socket.c and yenta.c into one file */ @@ -16,13 +16,12 @@ #include #include #include +#include #include #include #include -#include - #include "yenta_socket.h" #include "i82365.h" @@ -55,7 +54,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket); static unsigned int override_bios; module_param(override_bios, uint, 0000); -MODULE_PARM_DESC (override_bios, "yenta ignore bios resource allocation"); +MODULE_PARM_DESC(override_bios, "yenta ignore bios resource allocation"); /* * Generate easy-to-use ways of reading a cardbus sockets @@ -237,24 +236,42 @@ static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state) /* i82365SL-DF style */ if (socket->flags & YENTA_16BIT_POWER_DF) { switch (state->Vcc) { - case 33: reg |= I365_VCC_3V; break; - case 50: reg |= I365_VCC_5V; break; - default: reg = 0; break; + case 33: + reg |= I365_VCC_3V; + break; + case 50: + reg |= I365_VCC_5V; + break; + default: + reg = 0; + break; } switch (state->Vpp) { case 33: - case 50: reg |= I365_VPP1_5V; break; - case 120: reg |= I365_VPP1_12V; break; + case 50: + reg |= I365_VPP1_5V; + break; + case 120: + reg |= I365_VPP1_12V; + break; } } else { /* i82365SL-B style */ switch (state->Vcc) { - case 50: reg |= I365_VCC_5V; break; - default: reg = 0; break; + case 50: + reg |= I365_VCC_5V; + break; + default: + reg = 0; + break; } switch (state->Vpp) { - case 50: reg |= I365_VPP1_5V | I365_VPP2_5V; break; - case 120: reg |= I365_VPP1_12V | I365_VPP2_12V; break; + case 50: + reg |= I365_VPP1_5V | I365_VPP2_5V; + break; + case 120: + reg |= I365_VPP1_12V | I365_VPP2_12V; + break; } } @@ -263,14 +280,26 @@ static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state) } else { u32 reg = 0; /* CB_SC_STPCLK? */ switch (state->Vcc) { - case 33: reg = CB_SC_VCC_3V; break; - case 50: reg = CB_SC_VCC_5V; break; - default: reg = 0; break; + case 33: + reg = CB_SC_VCC_3V; + break; + case 50: + reg = CB_SC_VCC_5V; + break; + default: + reg = 0; + break; } switch (state->Vpp) { - case 33: reg |= CB_SC_VPP_3V; break; - case 50: reg |= CB_SC_VPP_5V; break; - case 120: reg |= CB_SC_VPP_12V; break; + case 33: + reg |= CB_SC_VPP_3V; + break; + case 50: + reg |= CB_SC_VPP_5V; + break; + case 120: + reg |= CB_SC_VPP_12V; + break; } if (reg != cb_readl(socket, CB_SOCKET_CONTROL)) cb_writel(socket, CB_SOCKET_CONTROL, reg); @@ -314,23 +343,29 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state) reg = exca_readb(socket, I365_POWER) & (I365_VCC_MASK|I365_VPP1_MASK); reg |= I365_PWR_NORESET; - if (state->flags & SS_PWR_AUTO) reg |= I365_PWR_AUTO; - if (state->flags & SS_OUTPUT_ENA) reg |= I365_PWR_OUT; + if (state->flags & SS_PWR_AUTO) + reg |= I365_PWR_AUTO; + if (state->flags & SS_OUTPUT_ENA) + reg |= I365_PWR_OUT; if (exca_readb(socket, I365_POWER) != reg) exca_writeb(socket, I365_POWER, reg); /* CSC interrupt: no ISA irq for CSC */ reg = I365_CSC_DETECT; if (state->flags & SS_IOCARD) { - if (state->csc_mask & SS_STSCHG) reg |= I365_CSC_STSCHG; + if (state->csc_mask & SS_STSCHG) + reg |= I365_CSC_STSCHG; } else { - if (state->csc_mask & SS_BATDEAD) reg |= I365_CSC_BVD1; - if (state->csc_mask & SS_BATWARN) reg |= I365_CSC_BVD2; - if (state->csc_mask & SS_READY) reg |= I365_CSC_READY; + if (state->csc_mask & SS_BATDEAD) + reg |= I365_CSC_BVD1; + if (state->csc_mask & SS_BATWARN) + reg |= I365_CSC_BVD2; + if (state->csc_mask & SS_READY) + reg |= I365_CSC_READY; } exca_writeb(socket, I365_CSCINT, reg); exca_readb(socket, I365_CSC); - if(sock->zoom_video) + if (sock->zoom_video) sock->zoom_video(sock, state->flags & SS_ZVCARD); } config_writew(socket, CB_BRIDGE_CONTROL, bridge); @@ -368,9 +403,12 @@ static int yenta_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io exca_writew(socket, I365_IO(map)+I365_W_STOP, io->stop); ioctl = exca_readb(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map); - if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map); - if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map); - if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map); + if (io->flags & MAP_0WS) + ioctl |= I365_IOCTL_0WS(map); + if (io->flags & MAP_16BIT) + ioctl |= I365_IOCTL_16BIT(map); + if (io->flags & MAP_AUTOSZ) + ioctl |= I365_IOCTL_IOCS16(map); exca_writeb(socket, I365_IOCTL, ioctl); if (io->flags & MAP_ACTIVE) @@ -416,10 +454,17 @@ static int yenta_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map * word = (stop >> 12) & 0x0fff; switch (to_cycles(mem->speed)) { - case 0: break; - case 1: word |= I365_MEM_WS0; break; - case 2: word |= I365_MEM_WS1; break; - default: word |= I365_MEM_WS1 | I365_MEM_WS0; break; + case 0: + break; + case 1: + word |= I365_MEM_WS0; + break; + case 2: + word |= I365_MEM_WS1; + break; + default: + word |= I365_MEM_WS1 | I365_MEM_WS0; + break; } exca_writew(socket, I365_MEM(map) + I365_W_STOP, word); @@ -547,9 +592,9 @@ static int yenta_sock_suspend(struct pcmcia_socket *sock) * max 4 MB, min 16 kB. We try very hard to not get below * the "ACC" values, though. */ -#define BRIDGE_MEM_MAX 4*1024*1024 -#define BRIDGE_MEM_ACC 128*1024 -#define BRIDGE_MEM_MIN 16*1024 +#define BRIDGE_MEM_MAX (4*1024*1024) +#define BRIDGE_MEM_ACC (128*1024) +#define BRIDGE_MEM_MIN (16*1024) #define BRIDGE_IO_MAX 512 #define BRIDGE_IO_ACC 256 @@ -574,7 +619,7 @@ static int yenta_search_one_res(struct resource *root, struct resource *res, int i; size = BRIDGE_MEM_MAX; if (size > avail/8) { - size=(avail+1)/8; + size = (avail+1)/8; /* round size down to next power of 2 */ i = 0; while ((size /= 2) != 0) @@ -590,7 +635,7 @@ static int yenta_search_one_res(struct resource *root, struct resource *res, do { if (allocate_resource(root, res, size, start, end, align, - NULL, NULL)==0) { + NULL, NULL) == 0) { return 1; } size = size/2; @@ -605,8 +650,8 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res, u32 min) { int i; - for (i=0; idev->bus->resource[i]; + for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { + struct resource *root = socket->dev->bus->resource[i]; if (!root) continue; @@ -704,7 +749,7 @@ static void yenta_allocate_resources(struct yenta_socket *socket) static void yenta_free_resources(struct yenta_socket *socket) { int i; - for (i=0;i<4;i++) { + for (i = 0; i < 4; i++) { struct resource *res; res = socket->dev->resource + PCI_BRIDGE_RESOURCES + i; if (res->start != 0 && res->end != 0) @@ -726,7 +771,7 @@ static void __devexit yenta_close(struct pci_dev *dev) /* we don't want a dying socket registered */ pcmcia_unregister_socket(&sock->socket); - + /* Disable all events so we don't die in an IRQ storm */ cb_writel(sock, CB_SOCKET_MASK, 0x0); exca_writeb(sock, I365_CSCINT, 0); @@ -898,7 +943,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id) { struct yenta_socket *socket = (struct yenta_socket *) dev_id; u8 csc; - u32 cb_event; + u32 cb_event; /* Clear interrupt status for the event */ cb_event = cb_readl(socket, CB_SOCKET_EVENT); @@ -1019,7 +1064,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge) { struct list_head *tmp; unsigned char upper_limit; - /* + /* * We only check and fix the parent bridge: All systems which need * this fixup that have been reviewed are laptops and the only bridge * which needed fixing was the parent bridge of the CardBus bridge: @@ -1038,7 +1083,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge) /* check the bus ranges of all silbling bridges to prevent overlap */ list_for_each(tmp, &bridge_to_fix->parent->children) { - struct pci_bus * silbling = pci_bus_b(tmp); + struct pci_bus *silbling = pci_bus_b(tmp); /* * If the silbling has a higher secondary bus number * and it's secondary is equal or smaller than our @@ -1083,7 +1128,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge) * interrupt, and that we can map the cardbus area. Fill in the * socket information structure.. */ -static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_id *id) +static int __devinit yenta_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct yenta_socket *socket; int ret; @@ -1302,7 +1347,7 @@ static struct dev_pm_ops yenta_pm_ops = { #define YENTA_PM_OPS NULL #endif -#define CB_ID(vend,dev,type) \ +#define CB_ID(vend, dev, type) \ { \ .vendor = vend, \ .device = dev, \ @@ -1313,7 +1358,7 @@ static struct dev_pm_ops yenta_pm_ops = { .driver_data = CARDBUS_TYPE_##type, \ } -static struct pci_device_id yenta_table [] = { +static struct pci_device_id yenta_table[] = { CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI), /* @@ -1403,13 +1448,13 @@ static struct pci_driver yenta_cardbus_driver = { static int __init yenta_socket_init(void) { - return pci_register_driver (¥ta_cardbus_driver); + return pci_register_driver(¥ta_cardbus_driver); } -static void __exit yenta_socket_exit (void) +static void __exit yenta_socket_exit(void) { - pci_unregister_driver (¥ta_cardbus_driver); + pci_unregister_driver(¥ta_cardbus_driver); } diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index d403c12f7978..ee148573c114 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h @@ -82,7 +82,7 @@ struct pcmcia_device { /* the hardware "function" device; certain subdevices can * share one hardware "function" device. */ u8 func; - struct config_t* function_config; + struct config_t *function_config; struct list_head socket_device_list; @@ -121,14 +121,14 @@ struct pcmcia_device { u16 manf_id; u16 card_id; - char * prod_id[4]; + char *prod_id[4]; u64 dma_mask; struct device dev; #ifdef CONFIG_PCMCIA_IOCTL /* device driver wanted by cardmgr */ - struct pcmcia_driver * cardmgr; + struct pcmcia_driver *cardmgr; #endif /* data private to drivers */ diff --git a/include/pcmcia/mem_op.h b/include/pcmcia/mem_op.h index 8d19b9401a5b..0fa06e5d5376 100644 --- a/include/pcmcia/mem_op.h +++ b/include/pcmcia/mem_op.h @@ -15,8 +15,8 @@ #ifndef _LINUX_MEM_OP_H #define _LINUX_MEM_OP_H +#include #include -#include /* If UNSAFE_MEMCPY is defined, we use the (optimized) system routines diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 7c23be706f12..cbfba885eb85 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h @@ -154,7 +154,7 @@ struct pcmcia_socket { struct list_head socket_list; struct completion socket_released; - /* deprecated */ + /* deprecated */ unsigned int sock; /* socket number */ @@ -164,7 +164,7 @@ struct pcmcia_socket { u_int map_size; u_int io_offset; u_int pci_irq; - struct pci_dev * cb_dev; + struct pci_dev *cb_dev; /* socket setup is done so resources should be able to be allocated. @@ -179,9 +179,9 @@ struct pcmcia_socket { u8 reserved:5; /* socket operations */ - struct pccard_operations * ops; - struct pccard_resource_ops * resource_ops; - void * resource_data; + struct pccard_operations *ops; + struct pccard_resource_ops *resource_ops; + void *resource_data; /* Zoom video behaviour is so chip specific its not worth adding this to _ops */ @@ -245,7 +245,7 @@ struct pcmcia_socket { /* cardbus (32-bit) */ #ifdef CONFIG_CARDBUS - struct resource * cb_cis_res; + struct resource *cb_cis_res; void __iomem *cb_cis_virt; #endif /* CONFIG_CARDBUS */ -- cgit v1.2.3 From ab2c0672984f7f7ebec6d5f615fd5a6ebad26f3d Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 4 Dec 2009 10:55:24 +1000 Subject: drm/intel: refactor DP i2c support and DP common header to drm helper Both radeon and nouveau can re-use this code so move it up a level so they can. However the hw interfaces for aux ch are different enough that the code to translate from mode, address, bytes to actual hw interfaces isn't generic, so move that code into the Intel driver. Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_dp_i2c_helper.c | 209 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/Makefile | 1 - drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_dp.c | 72 ++++++++- drivers/gpu/drm/i915/intel_dp.h | 144 ------------------ drivers/gpu/drm/i915/intel_dp_i2c.c | 273 ----------------------------------- include/drm/drm_dp_helper.h | 149 +++++++++++++++++++ 8 files changed, 426 insertions(+), 426 deletions(-) create mode 100644 drivers/gpu/drm/drm_dp_i2c_helper.c delete mode 100644 drivers/gpu/drm/i915/intel_dp.h delete mode 100644 drivers/gpu/drm/i915/intel_dp_i2c.c create mode 100644 include/drm/drm_dp_helper.h (limited to 'include') diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 3c8827a7aabd..91567ac806f1 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm-$(CONFIG_COMPAT) += drm_ioc32.o -drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o +drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c new file mode 100644 index 000000000000..f1c7c856e9db --- /dev/null +++ b/drivers/gpu/drm/drm_dp_i2c_helper.c @@ -0,0 +1,209 @@ +/* + * Copyright © 2009 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "drm_dp_helper.h" +#include "drmP.h" + +/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ +static int +i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + ret = (*algo_data->aux_ch)(adapter, mode, + write_byte, read_byte); + return ret; +} + +/* + * I2C over AUX CH + */ + +/* + * Send the address. If the I2C link is running, this 'restarts' + * the connection with the new address, this is used for doing + * a write followed by a read (as needed for DDC) + */ +static int +i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_START; + int ret; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + algo_data->address = address; + algo_data->running = true; + ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + return ret; +} + +/* + * Stop the I2C transaction. This closes out the link, sending + * a bare address packet with the MOT bit turned off + */ +static void +i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_STOP; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + if (algo_data->running) { + (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + algo_data->running = false; + } +} + +/* + * Write a single byte to the current I2C address, the + * the I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); + return ret; +} + +/* + * Read a single byte from the current I2C address, the + * I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); + return ret; +} + +static int +i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + int ret = 0; + bool reading = false; + int m; + int b; + + for (m = 0; m < num; m++) { + u16 len = msgs[m].len; + u8 *buf = msgs[m].buf; + reading = (msgs[m].flags & I2C_M_RD) != 0; + ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); + if (ret < 0) + break; + if (reading) { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); + if (ret < 0) + break; + } + } else { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); + if (ret < 0) + break; + } + } + if (ret < 0) + break; + } + if (ret >= 0) + ret = num; + i2c_algo_dp_aux_stop(adapter, reading); + DRM_DEBUG("dp_aux_xfer return %d\n", ret); + return ret; +} + +static u32 +i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm i2c_dp_aux_algo = { + .master_xfer = i2c_algo_dp_aux_xfer, + .functionality = i2c_algo_dp_aux_functionality, +}; + +static void +i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) +{ + (void) i2c_algo_dp_aux_address(adapter, 0, false); + (void) i2c_algo_dp_aux_stop(adapter, false); + +} + +static int +i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) +{ + adapter->algo = &i2c_dp_aux_algo; + adapter->retries = 3; + i2c_dp_aux_reset_bus(adapter); + return 0; +} + +int +i2c_dp_aux_add_bus(struct i2c_adapter *adapter) +{ + int error; + + error = i2c_dp_aux_prepare_bus(adapter); + if (error) + return error; + error = i2c_add_adapter(adapter); + return error; +} +EXPORT_SYMBOL(i2c_dp_aux_add_bus); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fa7b9be096bc..e3d049229cdd 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_lvds.o \ intel_bios.o \ intel_dp.o \ - intel_dp_i2c.o \ intel_hdmi.o \ intel_sdvo.o \ intel_modes.o \ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3ba6546b7c7f..ccd180dce4cc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -32,7 +32,7 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" -#include "intel_dp.h" +#include "drm_dp_helper.h" #include "drm_crtc_helper.h" diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d83447557f9b..63424d5db9c6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -33,7 +33,7 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" -#include "intel_dp.h" +#include "drm_dp_helper.h" #define DP_LINK_STATUS_SIZE 6 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) @@ -382,17 +382,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output, } static int -intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, - uint8_t *send, int send_bytes, - uint8_t *recv, int recv_bytes) +intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) { + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; struct intel_dp_priv *dp_priv = container_of(adapter, struct intel_dp_priv, adapter); struct intel_output *intel_output = dp_priv->intel_output; + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; + int msg_bytes; + int reply_bytes; + int ret; + + /* Set up the command byte */ + if (mode & MODE_I2C_READ) + msg[0] = AUX_I2C_READ << 4; + else + msg[0] = AUX_I2C_WRITE << 4; + + if (!(mode & MODE_I2C_STOP)) + msg[0] |= AUX_I2C_MOT << 4; + + msg[1] = address >> 8; + msg[2] = address; + + switch (mode) { + case MODE_I2C_WRITE: + msg[3] = 0; + msg[4] = write_byte; + msg_bytes = 5; + reply_bytes = 1; + break; + case MODE_I2C_READ: + msg[3] = 0; + msg_bytes = 4; + reply_bytes = 2; + break; + default: + msg_bytes = 3; + reply_bytes = 1; + break; + } - return intel_dp_aux_ch(intel_output, - send, send_bytes, recv, recv_bytes); + for (;;) { + ret = intel_dp_aux_ch(intel_output, + msg, msg_bytes, + reply, reply_bytes); + if (ret < 0) { + DRM_DEBUG("aux_ch failed %d\n", ret); + return ret; + } + switch (reply[0] & AUX_I2C_REPLY_MASK) { + case AUX_I2C_REPLY_ACK: + if (mode == MODE_I2C_READ) { + *read_byte = reply[1]; + } + return reply_bytes - 1; + case AUX_I2C_REPLY_NACK: + DRM_DEBUG("aux_ch nack\n"); + return -EREMOTEIO; + case AUX_I2C_REPLY_DEFER: + DRM_DEBUG("aux_ch defer\n"); + udelay(100); + break; + default: + DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); + return -EREMOTEIO; + } + } } static int diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h deleted file mode 100644 index 2b38054d3b6d..000000000000 --- a/drivers/gpu/drm/i915/intel_dp.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright © 2008 Keith Packard - * - * Permission to use, copy, modify, distribute, and sell this software and its - * documentation for any purpose is hereby granted without fee, provided that - * the above copyright notice appear in all copies and that both that copyright - * notice and this permission notice appear in supporting documentation, and - * that the name of the copyright holders not be used in advertising or - * publicity pertaining to distribution of the software without specific, - * written prior permission. The copyright holders make no representations - * about the suitability of this software for any purpose. It is provided "as - * is" without express or implied warranty. - * - * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO - * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, - * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER - * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -#ifndef _INTEL_DP_H_ -#define _INTEL_DP_H_ - -/* From the VESA DisplayPort spec */ - -#define AUX_NATIVE_WRITE 0x8 -#define AUX_NATIVE_READ 0x9 -#define AUX_I2C_WRITE 0x0 -#define AUX_I2C_READ 0x1 -#define AUX_I2C_STATUS 0x2 -#define AUX_I2C_MOT 0x4 - -#define AUX_NATIVE_REPLY_ACK (0x0 << 4) -#define AUX_NATIVE_REPLY_NACK (0x1 << 4) -#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) -#define AUX_NATIVE_REPLY_MASK (0x3 << 4) - -#define AUX_I2C_REPLY_ACK (0x0 << 6) -#define AUX_I2C_REPLY_NACK (0x1 << 6) -#define AUX_I2C_REPLY_DEFER (0x2 << 6) -#define AUX_I2C_REPLY_MASK (0x3 << 6) - -/* AUX CH addresses */ -#define DP_LINK_BW_SET 0x100 -# define DP_LINK_BW_1_62 0x06 -# define DP_LINK_BW_2_7 0x0a - -#define DP_LANE_COUNT_SET 0x101 -# define DP_LANE_COUNT_MASK 0x0f -# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) - -#define DP_TRAINING_PATTERN_SET 0x102 - -# define DP_TRAINING_PATTERN_DISABLE 0 -# define DP_TRAINING_PATTERN_1 1 -# define DP_TRAINING_PATTERN_2 2 -# define DP_TRAINING_PATTERN_MASK 0x3 - -# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) -# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) -# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) -# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) -# define DP_LINK_QUAL_PATTERN_MASK (3 << 2) - -# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) -# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) - -# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) -# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) -# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) -# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) - -#define DP_TRAINING_LANE0_SET 0x103 -#define DP_TRAINING_LANE1_SET 0x104 -#define DP_TRAINING_LANE2_SET 0x105 -#define DP_TRAINING_LANE3_SET 0x106 - -# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 -# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 -# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) -# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) -# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) -# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) -# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) - -# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) -# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) -# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) -# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) -# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) - -# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 -# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) - -#define DP_DOWNSPREAD_CTRL 0x107 -# define DP_SPREAD_AMP_0_5 (1 << 4) - -#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 -# define DP_SET_ANSI_8B10B (1 << 0) - -#define DP_LANE0_1_STATUS 0x202 -#define DP_LANE2_3_STATUS 0x203 - -# define DP_LANE_CR_DONE (1 << 0) -# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) -# define DP_LANE_SYMBOL_LOCKED (1 << 2) - -#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 - -#define DP_INTERLANE_ALIGN_DONE (1 << 0) -#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) -#define DP_LINK_STATUS_UPDATED (1 << 7) - -#define DP_SINK_STATUS 0x205 - -#define DP_RECEIVE_PORT_0_STATUS (1 << 0) -#define DP_RECEIVE_PORT_1_STATUS (1 << 1) - -#define DP_ADJUST_REQUEST_LANE0_1 0x206 -#define DP_ADJUST_REQUEST_LANE2_3 0x207 - -#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 -#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 -#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c -#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 -#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 -#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 -#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 -#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 - -struct i2c_algo_dp_aux_data { - bool running; - u16 address; - int (*aux_ch) (struct i2c_adapter *adapter, - uint8_t *send, int send_bytes, - uint8_t *recv, int recv_bytes); -}; - -int -i2c_dp_aux_add_bus(struct i2c_adapter *adapter); - -#endif /* _INTEL_DP_H_ */ diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c deleted file mode 100644 index a63b6f57d2d4..000000000000 --- a/drivers/gpu/drm/i915/intel_dp_i2c.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright © 2009 Keith Packard - * - * Permission to use, copy, modify, distribute, and sell this software and its - * documentation for any purpose is hereby granted without fee, provided that - * the above copyright notice appear in all copies and that both that copyright - * notice and this permission notice appear in supporting documentation, and - * that the name of the copyright holders not be used in advertising or - * publicity pertaining to distribution of the software without specific, - * written prior permission. The copyright holders make no representations - * about the suitability of this software for any purpose. It is provided "as - * is" without express or implied warranty. - * - * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO - * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, - * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER - * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "intel_dp.h" -#include "drmP.h" - -/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ - -#define MODE_I2C_START 1 -#define MODE_I2C_WRITE 2 -#define MODE_I2C_READ 4 -#define MODE_I2C_STOP 8 - -static int -i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, - uint8_t write_byte, uint8_t *read_byte) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - uint16_t address = algo_data->address; - uint8_t msg[5]; - uint8_t reply[2]; - int msg_bytes; - int reply_bytes; - int ret; - - /* Set up the command byte */ - if (mode & MODE_I2C_READ) - msg[0] = AUX_I2C_READ << 4; - else - msg[0] = AUX_I2C_WRITE << 4; - - if (!(mode & MODE_I2C_STOP)) - msg[0] |= AUX_I2C_MOT << 4; - - msg[1] = address >> 8; - msg[2] = address; - - switch (mode) { - case MODE_I2C_WRITE: - msg[3] = 0; - msg[4] = write_byte; - msg_bytes = 5; - reply_bytes = 1; - break; - case MODE_I2C_READ: - msg[3] = 0; - msg_bytes = 4; - reply_bytes = 2; - break; - default: - msg_bytes = 3; - reply_bytes = 1; - break; - } - - for (;;) { - ret = (*algo_data->aux_ch)(adapter, - msg, msg_bytes, - reply, reply_bytes); - if (ret < 0) { - DRM_DEBUG("aux_ch failed %d\n", ret); - return ret; - } - switch (reply[0] & AUX_I2C_REPLY_MASK) { - case AUX_I2C_REPLY_ACK: - if (mode == MODE_I2C_READ) { - *read_byte = reply[1]; - } - return reply_bytes - 1; - case AUX_I2C_REPLY_NACK: - DRM_DEBUG("aux_ch nack\n"); - return -EREMOTEIO; - case AUX_I2C_REPLY_DEFER: - DRM_DEBUG("aux_ch defer\n"); - udelay(100); - break; - default: - DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); - return -EREMOTEIO; - } - } -} - -/* - * I2C over AUX CH - */ - -/* - * Send the address. If the I2C link is running, this 'restarts' - * the connection with the new address, this is used for doing - * a write followed by a read (as needed for DDC) - */ -static int -i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int mode = MODE_I2C_START; - int ret; - - if (reading) - mode |= MODE_I2C_READ; - else - mode |= MODE_I2C_WRITE; - algo_data->address = address; - algo_data->running = true; - ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); - return ret; -} - -/* - * Stop the I2C transaction. This closes out the link, sending - * a bare address packet with the MOT bit turned off - */ -static void -i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int mode = MODE_I2C_STOP; - - if (reading) - mode |= MODE_I2C_READ; - else - mode |= MODE_I2C_WRITE; - if (algo_data->running) { - (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); - algo_data->running = false; - } -} - -/* - * Write a single byte to the current I2C address, the - * the I2C link must be running or this returns -EIO - */ -static int -i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int ret; - - if (!algo_data->running) - return -EIO; - - ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); - return ret; -} - -/* - * Read a single byte from the current I2C address, the - * I2C link must be running or this returns -EIO - */ -static int -i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - int ret; - - if (!algo_data->running) - return -EIO; - - ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); - return ret; -} - -static int -i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, - struct i2c_msg *msgs, - int num) -{ - int ret = 0; - bool reading = false; - int m; - int b; - - for (m = 0; m < num; m++) { - u16 len = msgs[m].len; - u8 *buf = msgs[m].buf; - reading = (msgs[m].flags & I2C_M_RD) != 0; - ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); - if (ret < 0) - break; - if (reading) { - for (b = 0; b < len; b++) { - ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); - if (ret < 0) - break; - } - } else { - for (b = 0; b < len; b++) { - ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); - if (ret < 0) - break; - } - } - if (ret < 0) - break; - } - if (ret >= 0) - ret = num; - i2c_algo_dp_aux_stop(adapter, reading); - DRM_DEBUG("dp_aux_xfer return %d\n", ret); - return ret; -} - -static u32 -i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) -{ - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | - I2C_FUNC_SMBUS_READ_BLOCK_DATA | - I2C_FUNC_SMBUS_BLOCK_PROC_CALL | - I2C_FUNC_10BIT_ADDR; -} - -static const struct i2c_algorithm i2c_dp_aux_algo = { - .master_xfer = i2c_algo_dp_aux_xfer, - .functionality = i2c_algo_dp_aux_functionality, -}; - -static void -i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) -{ - (void) i2c_algo_dp_aux_address(adapter, 0, false); - (void) i2c_algo_dp_aux_stop(adapter, false); - -} - -static int -i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) -{ - adapter->algo = &i2c_dp_aux_algo; - adapter->retries = 3; - i2c_dp_aux_reset_bus(adapter); - return 0; -} - -int -i2c_dp_aux_add_bus(struct i2c_adapter *adapter) -{ - int error; - - error = i2c_dp_aux_prepare_bus(adapter); - if (error) - return error; - error = i2c_add_adapter(adapter); - return error; -} -EXPORT_SYMBOL(i2c_dp_aux_add_bus); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h new file mode 100644 index 000000000000..e49879ce95f9 --- /dev/null +++ b/include/drm/drm_dp_helper.h @@ -0,0 +1,149 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_HELPER_H_ +#define _DRM_DP_HELPER_H_ + +/* From the VESA DisplayPort spec */ + +#define AUX_NATIVE_WRITE 0x8 +#define AUX_NATIVE_READ 0x9 +#define AUX_I2C_WRITE 0x0 +#define AUX_I2C_READ 0x1 +#define AUX_I2C_STATUS 0x2 +#define AUX_I2C_MOT 0x4 + +#define AUX_NATIVE_REPLY_ACK (0x0 << 4) +#define AUX_NATIVE_REPLY_NACK (0x1 << 4) +#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) +#define AUX_NATIVE_REPLY_MASK (0x3 << 4) + +#define AUX_I2C_REPLY_ACK (0x0 << 6) +#define AUX_I2C_REPLY_NACK (0x1 << 6) +#define AUX_I2C_REPLY_DEFER (0x2 << 6) +#define AUX_I2C_REPLY_MASK (0x3 << 6) + +/* AUX CH addresses */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 + +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_MASK 0x3 + +# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) +# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) +# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) +# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 + +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 + +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 + +#define DP_RECEIVE_PORT_0_STATUS (1 << 0) +#define DP_RECEIVE_PORT_1_STATUS (1 << 1) + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 + +#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +struct i2c_algo_dp_aux_data { + bool running; + u16 address; + int (*aux_ch) (struct i2c_adapter *adapter, + int mode, uint8_t write_byte, + uint8_t *read_byte); +}; + +int +i2c_dp_aux_add_bus(struct i2c_adapter *adapter); + +#endif /* _DRM_DP_HELPER_H_ */ -- cgit v1.2.3 From 746c1aa4d100f7441423050f34be79f401fbf7d4 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 Dec 2009 07:07:28 +1000 Subject: drm/radeon/kms: initial radeon displayport porting This is enough to retrieve EDID and DPCP. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/atombios_dp.c | 275 +++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_atombios.c | 17 +- drivers/gpu/drm/radeon/radeon_connectors.c | 57 +++++- drivers/gpu/drm/radeon/radeon_display.c | 7 + drivers/gpu/drm/radeon/radeon_encoders.c | 93 ++++++++++ drivers/gpu/drm/radeon/radeon_i2c.c | 50 ++++-- drivers/gpu/drm/radeon/radeon_mode.h | 21 ++- include/drm/drm_dp_helper.h | 2 + 9 files changed, 500 insertions(+), 24 deletions(-) create mode 100644 drivers/gpu/drm/radeon/atombios_dp.c (limited to 'include') diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index b5713eedd6e1..feb52eee4314 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ - r600_blit_kms.o radeon_pm.o + r600_blit_kms.o radeon_pm.o atombios_dp.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c new file mode 100644 index 000000000000..a4bc80113385 --- /dev/null +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -0,0 +1,275 @@ +/* + * Copyright 2007-8 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + */ +#include "drmP.h" +#include "radeon_drm.h" +#include "radeon.h" + +#include "atom.h" +#include "atom-bits.h" +#include "drm_dp_helper.h" + +#define DP_LINK_STATUS_SIZE 6 + +bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, + int num_bytes, u8 *read_byte, + u8 read_buf_len, u8 delay) +{ + struct drm_device *dev = chan->dev; + struct radeon_device *rdev = dev->dev_private; + PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); + unsigned char *base; + + memset(&args, 0, sizeof(args)); + + base = (unsigned char *)rdev->mode_info.atom_context->scratch; + + memcpy(base, req_bytes, num_bytes); + + args.lpAuxRequest = 0; + args.lpDataOut = 16; + args.ucDataOutLen = 0; + args.ucChannelID = chan->i2c_id; + args.ucDelay = delay; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + if (args.ucReplyStatus) { + DRM_ERROR("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", + req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], + chan->i2c_id, args.ucReplyStatus); + return false; + } + + if (args.ucDataOutLen && read_byte && read_buf_len) { + if (read_buf_len < args.ucDataOutLen) { + DRM_ERROR("Buffer to small for return answer %d %d\n", + read_buf_len, args.ucDataOutLen); + return false; + } + { + int len = min(read_buf_len, args.ucDataOutLen); + memcpy(read_byte, base + 16, len); + } + } + return true; +} + +int radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, + uint8_t ucconfig, uint8_t lane_num) +{ + DP_ENCODER_SERVICE_PARAMETERS args; + int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); + + memset(&args, 0, sizeof(args)); + args.ucLinkClock = dp_clock / 10; + args.ucConfig = ucconfig; + args.ucAction = action; + args.ucLaneNum = lane_num; + args.ucStatus = 0; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + return args.ucStatus; +} + +int radeon_dp_getsinktype(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + + return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, + radeon_dig_connector->uc_i2c_id, 0); +} + +union dig_transmitter_control { + DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; +}; + +bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, + uint8_t send_bytes, uint8_t *send) +{ + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + u8 msg[20]; + u8 msg_len, dp_msg_len; + bool ret; + + dp_msg_len = 4; + msg[0] = address; + msg[1] = address >> 8; + msg[2] = AUX_NATIVE_WRITE << 4; + dp_msg_len += send_bytes; + msg[3] = (dp_msg_len << 4) | (send_bytes - 1); + + if (send_bytes > 16) + return false; + + memcpy(&msg[4], send, send_bytes); + msg_len = 4 + send_bytes; + ret = radeon_process_aux_ch(radeon_dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0); + return ret; +} + +bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address, + uint8_t delay, uint8_t expected_bytes, + uint8_t *read_p) +{ + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + u8 msg[20]; + u8 msg_len, dp_msg_len; + bool ret = false; + msg_len = 4; + dp_msg_len = 4; + msg[0] = address; + msg[1] = address >> 8; + msg[2] = AUX_NATIVE_READ << 4; + msg[3] = (dp_msg_len) << 4; + msg[3] |= expected_bytes - 1; + + ret = radeon_process_aux_ch(radeon_dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); + return ret; +} + +void radeon_dp_getdpcp(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + u8 msg[25]; + int ret; + + ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCP_REV, 0, 8, msg); + if (ret) { + memcpy(radeon_dig_connector->dpcp, msg, 8); + { + int i; + printk("DPCP: "); + for (i = 0; i < 8; i++) + printk("%02x ", msg[i]); + printk("\n"); + } + } + radeon_dig_connector->dpcp[0] = 0; + return; +} + +static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + int ret; + ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100, + DP_LINK_STATUS_SIZE, link_status); + if (!ret) { + DRM_ERROR("displayport link status failed\n"); + return false; + } + + DRM_INFO("link status %02x %02x %02x %02x %02x %02x\n", + link_status[0], link_status[1], link_status[2], + link_status[3], link_status[4], link_status[5]); + return true; +} + +static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) +{ + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + if (radeon_dig_connector->dpcp[0] >= 0x11) { + radeon_dp_aux_native_write(radeon_connector, 0x600, 1, + &power_state); + } +} + +static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, + u8 train_set[4]) +{ + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + +// radeon_dp_digtransmitter_setup_vsemph(); + radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, + 0/* lc */, train_set); +} + +static void dp_set_training(struct radeon_connector *radeon_connector, + u8 training) +{ + radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET, + 1, &training); +} + +int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; + int ret = 0; + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; + int msg_len, dp_msg_len; + int reply_bytes; + + /* Set up the command byte */ + if (mode & MODE_I2C_READ) + msg[2] = AUX_I2C_READ << 4; + else + msg[2] = AUX_I2C_WRITE << 4; + + if (!(mode & MODE_I2C_STOP)) + msg[2] |= AUX_I2C_MOT << 4; + + msg[0] = address; + msg[1] = address >> 8; + + reply_bytes = 1; + + msg_len = 4; + dp_msg_len = 3; + switch (mode) { + case MODE_I2C_WRITE: + msg[4] = write_byte; + msg_len++; + dp_msg_len += 2; + break; + case MODE_I2C_READ: + dp_msg_len += 1; + break; + default: + break; + } + + msg[3] = (dp_msg_len) << 4; + ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0); + + if (ret) { + if (read_byte) + *read_byte = reply[0]; + return reply_bytes; + } + return -EREMOTEIO; +} diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5e414102c875..de05ac976472 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -47,7 +47,7 @@ radeon_add_atom_connector(struct drm_device *dev, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, bool linkb, uint32_t igp_lane_info, - uint16_t connector_object_id); + uint16_t connector_object_id, uint8_t uc_i2c_id); /* from radeon_legacy_encoder.c */ extern void @@ -60,8 +60,8 @@ union atom_supported_devices { struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; }; -static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device - *dev, uint8_t id) +static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device *dev, + uint8_t id) { struct radeon_device *rdev = dev->dev_private; struct atom_context *ctx = rdev->mode_info.atom_context; @@ -276,7 +276,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) uint16_t igp_lane_info, conn_id, connector_object_id; bool linkb; struct radeon_i2c_bus_rec ddc_bus; - + ATOM_I2C_ID_CONFIG_ACCESS i2c_id; atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); if (data_offset == 0) @@ -302,7 +302,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path_size += le16_to_cpu(path->usSize); linkb = false; - + i2c_id.ucAccess = 0; if (device_support & le16_to_cpu(path->usDeviceTag)) { uint8_t con_obj_id, con_obj_num, con_obj_type; @@ -420,7 +420,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) asObjects[j]. usRecordOffset)); ATOM_I2C_RECORD *i2c_record; - + while (record->ucRecordType > 0 && record-> ucRecordType <= @@ -431,6 +431,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) i2c_record = (ATOM_I2C_RECORD *) record; + i2c_id.sbfAccess = i2c_record->sucI2cId; line_mux = i2c_record-> sucI2cId. @@ -473,7 +474,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) usDeviceTag), connector_type, &ddc_bus, linkb, igp_lane_info, - connector_object_id); + connector_object_id, i2c_id.ucAccess); } } @@ -692,7 +693,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct connector_type, &bios_connectors[i].ddc_bus, false, 0, - connector_object_id); + connector_object_id, 0); } } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 7ab3c501b4dd..733427555ee1 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -896,6 +896,54 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = { .force = radeon_dvi_force, }; +static int radeon_dp_get_modes(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int ret; + + ret = radeon_ddc_get_modes(radeon_connector); + return ret; +} + +static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct drm_encoder *encoder = NULL; + struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_mode_object *obj; + int i; + enum drm_connector_status ret = connector_status_disconnected; + int sink_type; + bool dret; + + if (radeon_connector->edid) { + kfree(radeon_connector->edid); + radeon_connector->edid = NULL; + } + + sink_type = radeon_dp_getsinktype(radeon_connector); + if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { + radeon_dp_getdpcp(radeon_connector); + ret = connector_status_connected; + } + return ret; +} + +struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { + .get_modes = radeon_dp_get_modes, + .mode_valid = radeon_dvi_mode_valid, + .best_encoder = radeon_dvi_encoder, +}; + +struct drm_connector_funcs radeon_dp_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = radeon_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = radeon_connector_set_property, + .destroy = radeon_connector_destroy, + .force = radeon_dvi_force, +}; + void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, @@ -904,7 +952,7 @@ radeon_add_atom_connector(struct drm_device *dev, struct radeon_i2c_bus_rec *i2c_bus, bool linkb, uint32_t igp_lane_info, - uint16_t connector_object_id) + uint16_t connector_object_id, uint8_t uc_i2c_id) { struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; @@ -1030,10 +1078,13 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector->linkb = linkb; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); + drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); + ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (ret) goto failed; + /* add DP i2c bus */ + radeon_dig_connector->uc_i2c_id = uc_i2c_id; + radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, "DP-auxch", true, uc_i2c_id); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); if (!radeon_connector->ddc_bus) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 62b02372cb09..a1c2804b694d 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -337,6 +337,13 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) { int ret = 0; + if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + if (dig->dp_i2c_bus) { + radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); + DRM_INFO("got edid %p from DP\n", radeon_connector->edid); + } + } if (!radeon_connector->ddc_bus) return -1; if (!radeon_connector->edid) { diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 291f6dd3683c..37f5ea1af969 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -850,6 +850,99 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) } +static void +atombios_dig_transmitter_setup_vsemph(struct drm_encoder *encoder, u8 lane_num, + u8 lane_set) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + union dig_transmitter_control args; + int index = 0, num = 0; + uint8_t frev, crev; + struct radeon_encoder_atom_dig *dig; + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + + connector = radeon_get_connector_for_encoder(encoder); + if (!connector) + return; + + radeon_connector = to_radeon_connector(connector); + + if (!radeon_encoder->enc_priv) + return; + + dig = radeon_encoder->enc_priv; + + if (!radeon_connector->con_priv) + return; + + dig_connector = radeon_connector->con_priv; + + memset(&args, 0, sizeof(args)); + + if (ASIC_IS_DCE32(rdev)) + index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); + else { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl); + break; + } + } + + atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); + + args.v1.ucAction = ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH; + args.v1.asMode.ucLaneSel = lane_num; + args.v1.asMode.ucLaneSet = lane_set; + + if (ASIC_IS_DCE32(rdev)) { + args.v2.acConfig.fDPConnector = 1; + + if (dig->dig_block) + args.v2.acConfig.ucEncoderSel = 1; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v2.acConfig.ucTransmitterSel = 0; + num = 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + args.v2.acConfig.ucTransmitterSel = 1; + num = 1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + args.v2.acConfig.ucTransmitterSel = 2; + num = 2; + break; + } + } else { + args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; + if (dig_connector->linkb) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; + } + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + if (ASIC_IS_DCE32(rdev)) + DRM_INFO("Output UNIPHY%d transmitter VSEMPH setup success\n", num); + else + DRM_INFO("Output DIG%d transmitter VSEMPH setup success\n", num); +} + static void atombios_yuv_setup(struct drm_encoder *encoder, bool enable) { diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6c645fb4dad8..f200312dd5df 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -172,20 +172,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, return NULL; i2c->adapter.owner = THIS_MODULE; - i2c->adapter.algo_data = &i2c->algo; i2c->dev = dev; - i2c->algo.setsda = set_data; - i2c->algo.setscl = set_clock; - i2c->algo.getsda = get_data; - i2c->algo.getscl = get_clock; - i2c->algo.udelay = 20; + i2c_set_adapdata(&i2c->adapter, i2c); + i2c->adapter.algo_data = &i2c->algo.bit; + i2c->algo.bit.setsda = set_data; + i2c->algo.bit.setscl = set_clock; + i2c->algo.bit.getsda = get_data; + i2c->algo.bit.getscl = get_clock; + i2c->algo.bit.udelay = 20; /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always * make this, 2 jiffies is a lot more reliable */ - i2c->algo.timeout = 2; - i2c->algo.data = i2c; + i2c->algo.bit.timeout = 2; + i2c->algo.bit.data = i2c; i2c->rec = *rec; - i2c_set_adapdata(&i2c->adapter, i2c); - ret = i2c_bit_add_bus(&i2c->adapter); if (ret) { DRM_INFO("Failed to register i2c %s\n", name); @@ -199,6 +198,37 @@ out_free: } +struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, + const char *name, bool dp, u8 i2c_id) +{ + struct radeon_i2c_chan *i2c; + int ret; + + i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); + if (i2c == NULL) + return NULL; + + i2c->i2c_id = i2c_id; + i2c->adapter.owner = THIS_MODULE; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + i2c->adapter.algo_data = &i2c->algo.dp; + i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; + i2c->algo.dp.address = 0; + ret = i2c_dp_aux_add_bus(&i2c->adapter); + if (ret) { + DRM_INFO("Failed to register i2c %s\n", name); + goto out_free; + } + + return i2c; +out_free: + kfree(i2c); + return NULL; + +} + + void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) { if (!i2c) diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 135693d5437e..ce1cdc748f1f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -164,10 +165,14 @@ struct radeon_pll { }; struct radeon_i2c_chan { - struct drm_device *dev; struct i2c_adapter adapter; - struct i2c_algo_bit_data algo; + struct drm_device *dev; + union { + struct i2c_algo_dp_aux_data dp; + struct i2c_algo_bit_data bit; + } algo; struct radeon_i2c_bus_rec rec; + uint8_t i2c_id; }; /* mostly for macs, but really any system without connector tables */ @@ -328,6 +333,9 @@ struct radeon_encoder { struct radeon_connector_atom_dig { uint32_t igp_lane_info; bool linkb; + uint16_t uc_i2c_id; + struct radeon_i2c_chan *dp_i2c_bus; + u8 dpcp[8]; }; struct radeon_connector { @@ -344,6 +352,8 @@ struct radeon_connector { void *con_priv; bool dac_load_detect; uint16_t connector_object_id; + /* need to keep this for display port */ +// }; struct radeon_framebuffer { @@ -351,6 +361,13 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; +extern int radeon_dp_getsinktype(struct radeon_connector *radeon_connector); +extern void radeon_dp_getdpcp(struct radeon_connector *connector); +extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte); + +extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, + const char *name, bool dp, u8 i2c_id); extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_bus_rec *rec, const char *name); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index e49879ce95f9..376155f8f81f 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -43,6 +43,8 @@ #define AUX_I2C_REPLY_MASK (0x3 << 6) /* AUX CH addresses */ +#define DP_DPCP_REV 0x0 + #define DP_LINK_BW_SET 0x100 # define DP_LINK_BW_1_62 0x06 # define DP_LINK_BW_2_7 0x0a -- cgit v1.2.3 From 1a66c95a64c9ae0bc8382254f544b24b23f498ec Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 20 Nov 2009 19:40:13 -0500 Subject: drm/radeon/kms: DP fixes and cleanup from the ddx - dpcp -> dpcd - fix up dig encoder routing - aux transaction table takes delay in 10 usec units Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_dp.c | 24 ++-- drivers/gpu/drm/radeon/radeon_connectors.c | 2 +- drivers/gpu/drm/radeon/radeon_encoders.c | 221 ++++++++++++----------------- drivers/gpu/drm/radeon/radeon_mode.h | 4 +- include/drm/drm_dp_helper.h | 4 +- 5 files changed, 105 insertions(+), 150 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index a4bc80113385..d1c144be9734 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -34,7 +34,7 @@ #define DP_LINK_STATUS_SIZE 6 bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, - int num_bytes, u8 *read_byte, + int num_bytes, u8 *read_byte, u8 read_buf_len, u8 delay) { struct drm_device *dev = chan->dev; @@ -42,9 +42,9 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; - + memset(&args, 0, sizeof(args)); - + base = (unsigned char *)rdev->mode_info.atom_context->scratch; memcpy(base, req_bytes, num_bytes); @@ -53,7 +53,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, args.lpDataOut = 16; args.ucDataOutLen = 0; args.ucChannelID = chan->i2c_id; - args.ucDelay = delay; + args.ucDelay = delay / 10; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -158,24 +158,24 @@ bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16 return ret; } -void radeon_dp_getdpcp(struct radeon_connector *radeon_connector) +void radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; u8 msg[25]; int ret; - ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCP_REV, 0, 8, msg); + ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); if (ret) { - memcpy(radeon_dig_connector->dpcp, msg, 8); - { + memcpy(radeon_dig_connector->dpcd, msg, 8); + { int i; - printk("DPCP: "); + printk("DPCD: "); for (i = 0; i < 8; i++) printk("%02x ", msg[i]); printk("\n"); } } - radeon_dig_connector->dpcp[0] = 0; + radeon_dig_connector->dpcd[0] = 0; return; } @@ -199,8 +199,8 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) { struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - if (radeon_dig_connector->dpcp[0] >= 0x11) { - radeon_dp_aux_native_write(radeon_connector, 0x600, 1, + if (radeon_dig_connector->dpcd[0] >= 0x11) { + radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, &power_state); } } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 733427555ee1..4d457bc90141 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -923,7 +923,7 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto sink_type = radeon_dp_getsinktype(radeon_connector); if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - radeon_dp_getdpcp(radeon_connector); + radeon_dp_getdpcd(radeon_connector); ret = connector_status_connected; } return ret; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 37f5ea1af969..b4e7abadbfb2 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -605,6 +605,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) } } +/* + * DIG Encoder/Transmitter Setup + * + * DCE 3.0/3.1 + * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. + * Supports up to 3 digital outputs + * - 2 DIG encoder blocks. + * DIG1 can drive UNIPHY link A or link B + * DIG2 can drive UNIPHY link B or LVTMA + * + * DCE 3.2 + * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). + * Supports up to 5 digital outputs + * - 2 DIG encoder blocks. + * DIG1/2 can drive UNIPHY0/1/2 link A or link B + * + * Routing + * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) + * Examples: + * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI + * crtc1 -> dig1 -> UNIPHY0 link B -> DP + * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS + * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI + */ static void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) { @@ -646,10 +670,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) } else { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); + /* XXX doesn't really matter which dig encoder we pick as long as it's + * not already in use + */ + if (dig_connector->linkb) + index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); + else + index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); num = 1; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + /* Only dig2 encoder can drive LVTMA */ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); num = 2; break; @@ -684,16 +715,15 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) } } - if (radeon_encoder->pixel_clock > 165000) { - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B; + if (radeon_encoder->pixel_clock > 165000) args.ucLaneNum = 8; - } else { - if (dig_connector->linkb) - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; - else - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; + else args.ucLaneNum = 4; - } + + if (dig_connector->linkb) + args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; + else + args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; args.ucEncoderMode = atombios_get_encoder_mode(encoder); @@ -707,7 +737,7 @@ union dig_transmitter_control { }; static void -atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) +atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -756,6 +786,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) args.v1.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { args.v1.usInitInfo = radeon_connector->connector_object_id; + } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { + args.v1.asMode.ucLaneSel = lane_num; + args.v1.asMode.ucLaneSet = lane_set; } else { if (radeon_encoder->pixel_clock > 165000) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); @@ -767,6 +800,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); if (dig->dig_block) args.v2.acConfig.ucEncoderSel = 1; + if (dig_connector->linkb) + args.v2.acConfig.ucLinkSel = 1; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: @@ -792,17 +827,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; + /* XXX doesn't really matter which dig encoder we pick as long as it's + * not already in use + */ + if (dig_connector->linkb) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; if (rdev->flags & RADEON_IS_IGP) { if (radeon_encoder->pixel_clock > 165000) { - args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | - ATOM_TRANSMITTER_CONFIG_LINKA_B); if (dig_connector->igp_lane_info & 0x3) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; else if (dig_connector->igp_lane_info & 0xc) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; } else { - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; if (dig_connector->igp_lane_info & 0x1) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; else if (dig_connector->igp_lane_info & 0x2) @@ -812,34 +850,22 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) else if (dig_connector->igp_lane_info & 0x8) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; } - } else { - if (radeon_encoder->pixel_clock > 165000) - args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | - ATOM_TRANSMITTER_CONFIG_LINKA_B | - ATOM_TRANSMITTER_CONFIG_LANE_0_7); - else { - if (dig_connector->linkb) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - } } break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + /* Only dig2 encoder can drive LVTMA */ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; - if (radeon_encoder->pixel_clock > 165000) - args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | - ATOM_TRANSMITTER_CONFIG_LINKA_B | - ATOM_TRANSMITTER_CONFIG_LANE_0_7); - else { - if (dig_connector->linkb) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - } break; } + if (radeon_encoder->pixel_clock > 165000) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; + + if (dig_connector->linkb) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; + if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; @@ -850,99 +876,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) } -static void -atombios_dig_transmitter_setup_vsemph(struct drm_encoder *encoder, u8 lane_num, - u8 lane_set) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - union dig_transmitter_control args; - int index = 0, num = 0; - uint8_t frev, crev; - struct radeon_encoder_atom_dig *dig; - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - struct radeon_connector_atom_dig *dig_connector; - - connector = radeon_get_connector_for_encoder(encoder); - if (!connector) - return; - - radeon_connector = to_radeon_connector(connector); - - if (!radeon_encoder->enc_priv) - return; - - dig = radeon_encoder->enc_priv; - - if (!radeon_connector->con_priv) - return; - - dig_connector = radeon_connector->con_priv; - - memset(&args, 0, sizeof(args)); - - if (ASIC_IS_DCE32(rdev)) - index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); - else { - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl); - break; - } - } - - atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); - - args.v1.ucAction = ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH; - args.v1.asMode.ucLaneSel = lane_num; - args.v1.asMode.ucLaneSet = lane_set; - - if (ASIC_IS_DCE32(rdev)) { - args.v2.acConfig.fDPConnector = 1; - - if (dig->dig_block) - args.v2.acConfig.ucEncoderSel = 1; - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.v2.acConfig.ucTransmitterSel = 0; - num = 0; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - args.v2.acConfig.ucTransmitterSel = 1; - num = 1; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - args.v2.acConfig.ucTransmitterSel = 2; - num = 2; - break; - } - } else { - args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; - if (dig_connector->linkb) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - } - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - - if (ASIC_IS_DCE32(rdev)) - DRM_INFO("Output UNIPHY%d transmitter VSEMPH setup success\n", num); - else - DRM_INFO("Output DIG%d transmitter VSEMPH setup success\n", num); -} - static void atombios_yuv_setup(struct drm_encoder *encoder, bool enable) { @@ -1150,13 +1083,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; else args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; - } else - args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; + } else { + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + + connector = radeon_get_connector_for_encoder(encoder); + if (!connector) + return; + radeon_connector = to_radeon_connector(connector); + if (!radeon_connector->con_priv) + return; + dig_connector = radeon_connector->con_priv; + + /* XXX doesn't really matter which dig encoder we pick as long as it's + * not already in use + */ + if (dig_connector->linkb) + args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; + else + args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; + } break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + /* Only dig2 encoder can drive LVTMA */ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: @@ -1259,14 +1212,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); atombios_dig_encoder_setup(encoder, ATOM_DISABLE); /* setup and enable the encoder and transmitter */ atombios_dig_encoder_setup(encoder, ATOM_ENABLE); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); break; case ENCODER_OBJECT_ID_INTERNAL_DDI: atombios_ddia_setup(encoder, ATOM_ENABLE); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ce1cdc748f1f..166f75395f52 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -335,7 +335,7 @@ struct radeon_connector_atom_dig { bool linkb; uint16_t uc_i2c_id; struct radeon_i2c_chan *dp_i2c_bus; - u8 dpcp[8]; + u8 dpcd[8]; }; struct radeon_connector { @@ -362,7 +362,7 @@ struct radeon_framebuffer { }; extern int radeon_dp_getsinktype(struct radeon_connector *radeon_connector); -extern void radeon_dp_getdpcp(struct radeon_connector *connector); +extern void radeon_dp_getdpcd(struct radeon_connector *connector); extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, uint8_t write_byte, uint8_t *read_byte); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 376155f8f81f..f09b0b2a99b7 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -43,7 +43,7 @@ #define AUX_I2C_REPLY_MASK (0x3 << 6) /* AUX CH addresses */ -#define DP_DPCP_REV 0x0 +#define DP_DPCD_REV 0x0 #define DP_LINK_BW_SET 0x100 # define DP_LINK_BW_1_62 0x06 @@ -132,6 +132,8 @@ #define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 #define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 +#define DP_SET_POWER 0x600 + #define MODE_I2C_START 1 #define MODE_I2C_WRITE 2 #define MODE_I2C_READ 4 -- cgit v1.2.3 From 5801ead6bd6bddf5505d6eab55f84d8ee8106cd8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 24 Nov 2009 13:32:59 -0500 Subject: drm/radeon/kms: add support for DP modesetting Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atom.c | 25 ++ drivers/gpu/drm/radeon/atombios_dp.c | 541 ++++++++++++++++++++++++++--- drivers/gpu/drm/radeon/radeon_connectors.c | 16 +- drivers/gpu/drm/radeon/radeon_encoders.c | 30 +- drivers/gpu/drm/radeon/radeon_mode.h | 13 +- include/drm/drm_dp_helper.h | 57 ++- 6 files changed, 600 insertions(+), 82 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 85abc0850e7f..6578d19dff93 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1214,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, *crev = CU8(idx + 3); return; } + +int atom_allocate_fb_scratch(struct atom_context *ctx) +{ + int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); + uint16_t data_offset; + int usage_bytes; + struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; + + atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); + + firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); + + DRM_DEBUG("atom firmware requested %08x %dkb\n", + firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, + firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); + + usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; + if (usage_bytes == 0) + usage_bytes = 20 * 1024; + /* allocate some scratch memory */ + ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); + if (!ctx->scratch) + return -ENOMEM; + return 0; +} diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 76eb5c8a7016..ebaf3f8cd602 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -31,9 +31,20 @@ #include "atom-bits.h" #include "drm_dp_helper.h" -#define DP_LINK_STATUS_SIZE 6 - /* move these to drm_dp_helper.c/h */ +#define DP_LINK_CONFIGURATION_SIZE 9 +#define DP_LINK_STATUS_SIZE 6 +#define DP_DPCD_SIZE 8 + +static char *voltage_names[] = { + "0.4V", "0.6V", "0.8V", "1.2V" +}; +static char *pre_emph_names[] = { + "0dB", "3.5dB", "6dB", "9.5dB" +}; +static char *link_train_names[] = { + "pattern 1", "pattern 2", "idle", "off" +}; static const int dp_clocks[] = { 54000, // 1 lane, 1.62 Ghz @@ -46,9 +57,18 @@ static const int dp_clocks[] = { static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int); -int dp_lanes_for_mode_clock(int max_link_bw, int mode_clock) +/* common helper functions */ +static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) { int i; + u8 max_link_bw; + u8 max_lane_count; + + if (!dpcd) + return 0; + + max_link_bw = dpcd[DP_MAX_LINK_RATE]; + max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; switch (max_link_bw) { case DP_LINK_BW_1_62: @@ -56,6 +76,19 @@ int dp_lanes_for_mode_clock(int max_link_bw, int mode_clock) for (i = 0; i < num_dp_clocks; i++) { if (i % 2) continue; + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } if (dp_clocks[i] > mode_clock) { if (i < 2) return 1; @@ -68,6 +101,19 @@ int dp_lanes_for_mode_clock(int max_link_bw, int mode_clock) break; case DP_LINK_BW_2_7: for (i = 0; i < num_dp_clocks; i++) { + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } if (dp_clocks[i] > mode_clock) { if (i < 2) return 1; @@ -83,17 +129,56 @@ int dp_lanes_for_mode_clock(int max_link_bw, int mode_clock) return 0; } -int dp_link_clock_for_mode_clock(int max_link_bw, int mode_clock) +static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) { int i; + u8 max_link_bw; + u8 max_lane_count; + + if (!dpcd) + return 0; + + max_link_bw = dpcd[DP_MAX_LINK_RATE]; + max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; switch (max_link_bw) { case DP_LINK_BW_1_62: default: - return 162000; + for (i = 0; i < num_dp_clocks; i++) { + if (i % 2) + continue; + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } + if (dp_clocks[i] > mode_clock) + return 162000; + } break; case DP_LINK_BW_2_7: for (i = 0; i < num_dp_clocks; i++) { + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } if (dp_clocks[i] > mode_clock) return (i % 2) ? 270000 : 162000; } @@ -102,6 +187,145 @@ int dp_link_clock_for_mode_clock(int max_link_bw, int mode_clock) return 0; } +int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) +{ + int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); + int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); + + if ((lanes == 0) || (bw == 0)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + u8 l = dp_link_status(link_status, i); + return (l >> s) & 0xf; +} + +static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int lane; + u8 lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_align; + u8 lane_status; + int lane; + + lane_align = dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) + return false; + } + return true; +} + +static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) + +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + +/* XXX fix me -- chip specific */ +#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 +static u8 dp_pre_emphasis_max(u8 voltage_swing) +{ + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } +} + +static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count, + u8 train_set[4]) +{ + u8 v = 0; + u8 p = 0; + int lane; + + for (lane = 0; lane < lane_count; lane++) { + u8 this_v = dp_get_adjust_request_voltage(link_status, lane); + u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); + + DRM_INFO("requested signal parameters: lane %d voltage %s pre_emph %s\n", + lane, + voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT], + pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + if (v >= DP_VOLTAGE_MAX) + v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + + if (p >= dp_pre_emphasis_max(v)) + p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + DRM_INFO("using signal parameters: voltage %s pre_emph %s\n", + voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], + pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); + + for (lane = 0; lane < 4; lane++) + train_set[lane] = v | p; +} + + +/* radeon aux chan functions */ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, int num_bytes, u8 *read_byte, u8 read_buf_len, u8 delay) @@ -147,44 +371,10 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, return true; } -static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, - uint8_t ucconfig, uint8_t lane_num) -{ - DP_ENCODER_SERVICE_PARAMETERS args; - int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); - - memset(&args, 0, sizeof(args)); - args.ucLinkClock = dp_clock / 10; - args.ucConfig = ucconfig; - args.ucAction = action; - args.ucLaneNum = lane_num; - args.ucStatus = 0; - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - return args.ucStatus; -} - -u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) -{ - struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - struct drm_device *dev = radeon_connector->base.dev; - struct radeon_device *rdev = dev->dev_private; - - return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, - radeon_dig_connector->dp_i2c_bus->rec.i2c_id, 0); -} - -union dig_transmitter_control { - DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; - DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; -}; - bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, uint8_t send_bytes, uint8_t *send) { - struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - struct drm_device *dev = radeon_connector->base.dev; - struct radeon_device *rdev = dev->dev_private; + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[20]; u8 msg_len, dp_msg_len; bool ret; @@ -201,7 +391,7 @@ bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint1 memcpy(&msg[4], send, send_bytes); msg_len = 4 + send_bytes; - ret = radeon_process_aux_ch(radeon_dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0); + ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0); return ret; } @@ -209,9 +399,7 @@ bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16 uint8_t delay, uint8_t expected_bytes, uint8_t *read_p) { - struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - struct drm_device *dev = radeon_connector->base.dev; - struct radeon_device *rdev = dev->dev_private; + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[20]; u8 msg_len, dp_msg_len; bool ret = false; @@ -223,19 +411,47 @@ bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16 msg[3] = (dp_msg_len) << 4; msg[3] |= expected_bytes - 1; - ret = radeon_process_aux_ch(radeon_dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); + ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); return ret; } +/* radeon dp functions */ +static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, + uint8_t ucconfig, uint8_t lane_num) +{ + DP_ENCODER_SERVICE_PARAMETERS args; + int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); + + memset(&args, 0, sizeof(args)); + args.ucLinkClock = dp_clock / 10; + args.ucConfig = ucconfig; + args.ucAction = action; + args.ucLaneNum = lane_num; + args.ucStatus = 0; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + return args.ucStatus; +} + +u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + + return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, + dig_connector->dp_i2c_bus->rec.i2c_id, 0); +} + void radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { - struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[25]; int ret; ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); if (ret) { - memcpy(radeon_dig_connector->dpcd, msg, 8); + memcpy(dig_connector->dpcd, msg, 8); { int i; printk("DPCD: "); @@ -244,10 +460,38 @@ void radeon_dp_getdpcd(struct radeon_connector *radeon_connector) printk("\n"); } } - radeon_dig_connector->dpcd[0] = 0; + dig_connector->dpcd[0] = 0; return; } +void radeon_dp_set_link_config(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + return; + + radeon_connector = to_radeon_connector(connector); + if (!radeon_connector->con_priv) + return; + dig_connector = radeon_connector->con_priv; + + dig_connector->dp_clock = + dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock); + dig_connector->dp_lane_count = + dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock); +} + +int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, + struct drm_display_mode *mode) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + + return dp_mode_valid(dig_connector->dpcd, mode->clock); +} + static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, u8 link_status[DP_LINK_STATUS_SIZE]) { @@ -267,21 +511,41 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) { - struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - if (radeon_dig_connector->dpcd[0] >= 0x11) { + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + + if (dig_connector->dpcd[0] >= 0x11) { radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, &power_state); } } +static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread) +{ + radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1, + &downspread); +} + +static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector, + u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]) +{ + radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2, + link_configuration); +} + static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, + struct drm_encoder *encoder, u8 train_set[4]) { - struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + int i; + + for (i = 0; i < dig_connector->dp_lane_count; i++) + atombios_dig_transmitter_setup(encoder, + ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, + i, train_set[i]); -// radeon_dp_digtransmitter_setup_vsemph(); radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, - 0/* lc */, train_set); + dig_connector->dp_lane_count, train_set); } static void dp_set_training(struct radeon_connector *radeon_connector, @@ -291,6 +555,176 @@ static void dp_set_training(struct radeon_connector *radeon_connector, 1, &training); } +void dp_link_train(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + int enc_id = 0; + bool clock_recovery, channel_eq; + u8 link_status[DP_LINK_STATUS_SIZE]; + u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]; + u8 tries, voltage; + u8 train_set[4]; + int i; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + return; + + if (!radeon_encoder->enc_priv) + return; + dig = radeon_encoder->enc_priv; + + radeon_connector = to_radeon_connector(connector); + if (!radeon_connector->con_priv) + return; + dig_connector = radeon_connector->con_priv; + + if (ASIC_IS_DCE32(rdev)) { + if (dig->dig_block) + enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; + else + enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; + if (dig_connector->linkb) + enc_id |= ATOM_DP_CONFIG_LINK_B; + else + enc_id |= ATOM_DP_CONFIG_LINK_A; + } else { + if (dig_connector->linkb) + enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B; + else + enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A; + } + + memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + if (dig_connector->dp_clock == 270000) + link_configuration[0] = DP_LINK_BW_2_7; + else + link_configuration[0] = DP_LINK_BW_1_62; + link_configuration[1] = dig_connector->dp_lane_count; + if (dig_connector->dpcd[0] >= 0x11) + link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + /* power up the sink */ + dp_set_power(radeon_connector, DP_SET_POWER_D0); + /* disable the training pattern on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); + /* set link bw and lanes on the sink */ + dp_set_link_bw_lanes(radeon_connector, link_configuration); + /* disable downspread on the sink */ + dp_set_downspread(radeon_connector, 0); + /* start training on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, + dig_connector->dp_clock, enc_id, 0); + /* set training pattern 1 on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, + dig_connector->dp_clock, enc_id, 0); + + /* set initial vs/emph */ + memset(train_set, 0, 4); + dp_update_dpvs_emph(radeon_connector, encoder, train_set); + udelay(400); + /* set training pattern 1 on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1); + + /* clock recovery loop */ + clock_recovery = false; + tries = 0; + voltage = 0xff; + for (;;) { + udelay(100); + if (!atom_dp_get_link_status(radeon_connector, link_status)) + break; + + if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) { + clock_recovery = true; + break; + } + + for (i = 0; i < dig_connector->dp_lane_count; i++) { + if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + } + if (i == dig_connector->dp_lane_count) { + DRM_ERROR("clock recovery reached max voltage\n"); + break; + } + + if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) { + DRM_ERROR("clock recovery tried 5 times\n"); + break; + } + } else + tries = 0; + + voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new train_set as requested by sink */ + dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); + dp_update_dpvs_emph(radeon_connector, encoder, train_set); + } + if (!clock_recovery) + DRM_ERROR("clock recovery failed\n"); + else + DRM_INFO("clock recovery at voltage %d pre-emphasis %d\n", + train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, + (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT); + + + /* set training pattern 2 on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); + /* set training pattern 2 on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, + dig_connector->dp_clock, enc_id, 1); + + /* channel equalization loop */ + tries = 0; + channel_eq = false; + for (;;) { + udelay(400); + if (!atom_dp_get_link_status(radeon_connector, link_status)) + break; + + if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) { + channel_eq = true; + break; + } + + /* Try 5 times */ + if (tries > 5) { + DRM_ERROR("channel eq failed: 5 tries\n"); + break; + } + + /* Compute new train_set as requested by sink */ + dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); + dp_update_dpvs_emph(radeon_connector, encoder, train_set); + + tries++; + } + + if (!channel_eq) + DRM_ERROR("channel eq failed\n"); + else + DRM_INFO("channel eq at voltage %d pre-emphasis %d\n", + train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, + (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) + >> DP_TRAIN_PRE_EMPHASIS_SHIFT); + + /* disable the training pattern on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); + + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, + dig_connector->dp_clock, enc_id, 0); +} + int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, uint8_t write_byte, uint8_t *read_byte) { @@ -342,3 +776,4 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } return -EREMOTEIO; } + diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index b51e38386cc0..3837cc942617 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -934,9 +934,23 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto return ret; } +static int radeon_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + + /* XXX check mode bandwidth */ + + if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) + return radeon_dp_mode_valid_helper(radeon_connector, mode); + else + return MODE_OK; +} + struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, - .mode_valid = radeon_dvi_mode_valid, + .mode_valid = radeon_dp_mode_valid, .best_encoder = radeon_dvi_encoder, }; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 8f3d67b6032c..397c86f761cd 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -250,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, } } + if (ASIC_IS_DCE3(rdev) && + (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + radeon_dp_set_link_config(connector, mode); + } + return true; } @@ -719,11 +725,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) args.ucEncoderMode = atombios_get_encoder_mode(encoder); if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) { - if (dp_link_clock_for_mode_clock(dig_connector->dpcd[1], - radeon_encoder->pixel_clock) == 270000) + if (dig_connector->dp_clock == 270000) args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; - args.ucLaneNum = dp_lanes_for_mode_clock(dig_connector->dpcd[1], - radeon_encoder->pixel_clock); + args.ucLaneNum = dig_connector->dp_lane_count; } else if (radeon_encoder->pixel_clock > 165000) args.ucLaneNum = 8; else @@ -743,7 +747,7 @@ union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; }; -static void +void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; @@ -803,8 +807,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } else { if (is_dp) args.v1.usPixelClock = - cpu_to_le16(dp_link_clock_for_mode_clock(dig_connector->dpcd[1], - radeon_encoder->pixel_clock) / 10); + cpu_to_le16(dig_connector->dp_clock / 10); else if (radeon_encoder->pixel_clock > 165000) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else @@ -1198,12 +1201,16 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - if (radeon_encoder->enc_priv) { - struct radeon_encoder_atom_dig *dig; + if (radeon_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { + if (radeon_encoder->enc_priv) { + struct radeon_encoder_atom_dig *dig; - dig = radeon_encoder->enc_priv; - dig->dig_block = radeon_crtc->crtc_id; + dig = radeon_encoder->enc_priv; + dig->dig_block = radeon_crtc->crtc_id; + } } radeon_encoder->pixel_clock = adjusted_mode->clock; @@ -1237,6 +1244,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + dp_link_train(encoder, connector); break; case ENCODER_OBJECT_ID_INTERNAL_DDI: atombios_ddia_setup(encoder, ATOM_ENABLE); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index b516401c151a..7d03e3971498 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -343,6 +343,8 @@ struct radeon_connector_atom_dig { struct radeon_i2c_chan *dp_i2c_bus; u8 dpcd[8]; u8 dp_sink_type; + int dp_clock; + int dp_lane_count; }; struct radeon_connector { @@ -366,10 +368,17 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; -extern int dp_lanes_for_mode_clock(int max_link_bw, int mode_clock); -extern int dp_link_clock_for_mode_clock(int max_link_bw, int mode_clock); +extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, + struct drm_display_mode *mode); +extern void radeon_dp_set_link_config(struct drm_connector *connector, + struct drm_display_mode *mode); +extern void dp_link_train(struct drm_encoder *encoder, + struct drm_connector *connector); extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); extern void radeon_dp_getdpcd(struct radeon_connector *radeon_connector); +extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, + int action, uint8_t lane_num, + uint8_t lane_set); extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, uint8_t write_byte, uint8_t *read_byte); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index f09b0b2a99b7..a49e791db0b0 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -43,18 +43,41 @@ #define AUX_I2C_REPLY_MASK (0x3 << 6) /* AUX CH addresses */ -#define DP_DPCD_REV 0x0 +/* DPCD */ +#define DP_DPCD_REV 0x000 -#define DP_LINK_BW_SET 0x100 +#define DP_MAX_LINK_RATE 0x001 + +#define DP_MAX_LANE_COUNT 0x002 +# define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_ENHANCED_FRAME_CAP (1 << 7) + +#define DP_MAX_DOWNSPREAD 0x003 +# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) + +#define DP_NORP 0x004 + +#define DP_DOWNSTREAMPORT_PRESENT 0x005 +# define DP_DWN_STRM_PORT_PRESENT (1 << 0) +# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 +/* 00b = DisplayPort */ +/* 01b = Analog */ +/* 10b = TMDS or HDMI */ +/* 11b = Other */ +# define DP_FORMAT_CONVERSION (1 << 3) + +#define DP_MAIN_LINK_CHANNEL_CODING 0x006 + +/* link configuration */ +#define DP_LINK_BW_SET 0x100 # define DP_LINK_BW_1_62 0x06 # define DP_LINK_BW_2_7 0x0a -#define DP_LANE_COUNT_SET 0x101 +#define DP_LANE_COUNT_SET 0x101 # define DP_LANE_COUNT_MASK 0x0f # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) -#define DP_TRAINING_PATTERN_SET 0x102 - +#define DP_TRAINING_PATTERN_SET 0x102 # define DP_TRAINING_PATTERN_DISABLE 0 # define DP_TRAINING_PATTERN_1 1 # define DP_TRAINING_PATTERN_2 2 @@ -104,11 +127,14 @@ #define DP_LANE0_1_STATUS 0x202 #define DP_LANE2_3_STATUS 0x203 - # define DP_LANE_CR_DONE (1 << 0) # define DP_LANE_CHANNEL_EQ_DONE (1 << 1) # define DP_LANE_SYMBOL_LOCKED (1 << 2) +#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \ + DP_LANE_CHANNEL_EQ_DONE | \ + DP_LANE_SYMBOL_LOCKED) + #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 #define DP_INTERLANE_ALIGN_DONE (1 << 0) @@ -122,17 +148,18 @@ #define DP_ADJUST_REQUEST_LANE0_1 0x206 #define DP_ADJUST_REQUEST_LANE2_3 0x207 - -#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 -#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 -#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c -#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 -#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 -#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 -#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 -#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 #define DP_SET_POWER 0x600 +# define DP_SET_POWER_D0 0x1 +# define DP_SET_POWER_D3 0x2 #define MODE_I2C_START 1 #define MODE_I2C_WRITE 2 -- cgit v1.2.3 From 2ff6cfd70720780234fdfea636218c2a62b31287 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 7 Dec 2009 17:12:58 +0100 Subject: perf events: hw_breakpoints: Don't include asm/hw_breakpoint.h in user space asm/hw_breakpoint.h is evidently a kernel internal file and should not be included globally, not even under an #ifdef. Reported-by: Geert Uytterhoeven Signed-off-by: Arnd Bergmann Cc: Frederic Weisbecker Cc: Alan Stern Cc: K.Prasad LKML-Reference: <200912071712.58650.arnd@arndb.de> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 89098e35a036..bf3329413e18 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -18,10 +18,6 @@ #include #include -#ifdef CONFIG_HAVE_HW_BREAKPOINT -#include -#endif - /* * User-space ABI bits: */ @@ -451,6 +447,10 @@ enum perf_callchain_context { # include #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT +#include +#endif + #include #include #include -- cgit v1.2.3 From e15a113700324f7fdcee95589875daed2b98a2fe Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 30 Nov 2009 03:02:02 +0000 Subject: powerpc/kvm: Sync guest visible MMU state Currently userspace has no chance to find out which virtual address space we're in and resolve addresses. While that is a big problem for migration, it's also unpleasent when debugging, as gdb and the monitor don't work on virtual addresses. This patch exports enough of the MMU segment state to userspace to make debugging work and thus also includes the groundwork for migration. Signed-off-by: Alexander Graf Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/kvm.h | 18 ++++++++++++- arch/powerpc/include/asm/kvm_asm.h | 1 + arch/powerpc/include/asm/kvm_book3s.h | 3 +++ arch/powerpc/kvm/book3s.c | 49 +++++++++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_64_emulate.c | 38 ++++++++++++++++----------- arch/powerpc/kvm/book3s_64_mmu.c | 2 ++ arch/powerpc/kvm/powerpc.c | 3 +++ include/linux/kvm.h | 3 +++ 8 files changed, 101 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index c9ca97f43bc1..81f3b0b5601e 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -47,7 +47,23 @@ struct kvm_regs { struct kvm_sregs { __u32 pvr; - char pad[1020]; + union { + struct { + __u64 sdr1; + struct { + struct { + __u64 slbe; + __u64 slbv; + } slb[64]; + } ppc64; + struct { + __u32 sr[16]; + __u64 ibat[8]; + __u64 dbat[8]; + } ppc32; + } s; + __u8 pad[1020]; + } u; }; struct kvm_fpu { diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 19ddb352fd0f..af2abe74f544 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -87,6 +87,7 @@ #define BOOK3S_IRQPRIO_MAX 16 #define BOOK3S_HFLAG_DCBZ32 0x1 +#define BOOK3S_HFLAG_SLB 0x2 #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index c6011336371e..74b7369770d0 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -46,6 +46,7 @@ struct kvmppc_sr { }; struct kvmppc_bat { + u64 raw; u32 bepi; u32 bepi_mask; bool vs; @@ -113,6 +114,8 @@ extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, boo extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); +extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, + bool upper, u32 val); extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_enter; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 42037d46a416..3e294bd9b8c6 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -281,6 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) { + vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; vcpu->arch.pvr = pvr; if ((pvr >= 0x330000) && (pvr < 0x70330000)) { kvmppc_mmu_book3s_64_init(vcpu); @@ -762,14 +763,62 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); + int i; + sregs->pvr = vcpu->arch.pvr; + + sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; + if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { + for (i = 0; i < 64; i++) { + sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; + sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; + } + } else { + for (i = 0; i < 16; i++) { + sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; + sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; + } + for (i = 0; i < 8; i++) { + sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; + sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; + } + } return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); + int i; + kvmppc_set_pvr(vcpu, sregs->pvr); + + vcpu3s->sdr1 = sregs->u.s.sdr1; + if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { + for (i = 0; i < 64; i++) { + vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, + sregs->u.s.ppc64.slb[i].slbe); + } + } else { + for (i = 0; i < 16; i++) { + vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); + } + for (i = 0; i < 8; i++) { + kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, + (u32)sregs->u.s.ppc32.ibat[i]); + kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, + (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); + kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, + (u32)sregs->u.s.ppc32.dbat[i]); + kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, + (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); + } + } + + /* Flush the MMU after messing with the segments */ + kvmppc_mmu_pte_flush(vcpu, 0, 0); return 0; } diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c index c343e67306e0..1027eac6d474 100644 --- a/arch/powerpc/kvm/book3s_64_emulate.c +++ b/arch/powerpc/kvm/book3s_64_emulate.c @@ -185,7 +185,27 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) +void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, + u32 val) +{ + if (upper) { + /* Upper BAT */ + u32 bl = (val >> 2) & 0x7ff; + bat->bepi_mask = (~bl << 17); + bat->bepi = val & 0xfffe0000; + bat->vs = (val & 2) ? 1 : 0; + bat->vp = (val & 1) ? 1 : 0; + bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; + } else { + /* Lower BAT */ + bat->brpn = val & 0xfffe0000; + bat->wimg = (val >> 3) & 0xf; + bat->pp = val & 3; + bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); + } +} + +static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; @@ -207,19 +227,7 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) BUG(); } - if (!(sprn % 2)) { - /* Upper BAT */ - u32 bl = (val >> 2) & 0x7ff; - bat->bepi_mask = (~bl << 17); - bat->bepi = val & 0xfffe0000; - bat->vs = (val & 2) ? 1 : 0; - bat->vp = (val & 1) ? 1 : 0; - } else { - /* Lower BAT */ - bat->brpn = val & 0xfffe0000; - bat->wimg = (val >> 3) & 0xf; - bat->pp = val & 3; - } + kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); } int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) @@ -243,7 +251,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L: - kvmppc_write_bat(vcpu, sprn, vcpu->arch.gpr[rs]); + kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); /* BAT writes happen so rarely that we're ok to flush * everything here */ kvmppc_mmu_pte_flush(vcpu, 0, 0); diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index a31f9c677d23..5598f88f142e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -473,4 +473,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; + + vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 692c3709011e..d82551efbfbf 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -144,6 +144,9 @@ int kvm_dev_ioctl_check_extension(long ext) int r; switch (ext) { + case KVM_CAP_PPC_SEGSTATE: + r = 1; + break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f8f8900fc5ec..caf6173bd2e8 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -436,6 +436,9 @@ struct kvm_ioeventfd { #endif #define KVM_CAP_IOEVENTFD 36 #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 +/* KVM upstream has more features, but we synched this number. + Linux, please remove this comment on rebase. */ +#define KVM_CAP_PPC_SEGSTATE 43 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From d5af91a1faca68e9a8cc493b85aa7b194b6128aa Mon Sep 17 00:00:00 2001 From: Richard Röjfors Date: Fri, 13 Nov 2009 12:28:39 +0100 Subject: xilinx_spi: Split into of driver and generic part. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch splits the xilinx_spi driver into a generic part and a OF driver part. The reason for this is to later add in a platform driver as well. Tested-by: John Linn Signed-off-by: Richard Röjfors Signed-off-by: Grant Likely --- drivers/spi/Kconfig | 9 ++- drivers/spi/Makefile | 1 + drivers/spi/xilinx_spi.c | 159 ++++++++++------------------------------- drivers/spi/xilinx_spi.h | 32 +++++++++ drivers/spi/xilinx_spi_of.c | 133 ++++++++++++++++++++++++++++++++++ include/linux/spi/xilinx_spi.h | 16 +++++ 6 files changed, 229 insertions(+), 121 deletions(-) create mode 100644 drivers/spi/xilinx_spi.h create mode 100644 drivers/spi/xilinx_spi_of.c create mode 100644 include/linux/spi/xilinx_spi.h (limited to 'include') diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 2a4ba1993083..f34a2d16d18f 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -244,14 +244,21 @@ config SPI_TXX9 config SPI_XILINX tristate "Xilinx SPI controller" - depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL + depends on EXPERIMENTAL select SPI_BITBANG + select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE) help This exposes the SPI controller IP from the Xilinx EDK. See the "OPB Serial Peripheral Interface (SPI) (v1.00e)" Product Specification document (DS464) for hardware details. +config SPI_XILINX_OF + tristate "Xilinx SPI controller OF device" + depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE) + help + This is the OF driver for the SPI controller IP from the Xilinx EDK. + # # Add new SPI master controllers in alphabetical order above this line # diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index e3f092a9afa5..01c409548044 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o obj-$(CONFIG_SPI_TXX9) += spi_txx9.o obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o +obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o # ... add above this line ... diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 5a143b9f6361..69fa26d82ce4 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -14,16 +14,14 @@ #include #include #include -#include - -#include -#include -#include #include #include #include +#include "xilinx_spi.h" +#include + #define XILINX_SPI_NAME "xilinx_spi" /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) @@ -78,7 +76,7 @@ struct xilinx_spi { /* bitbang has to be first */ struct spi_bitbang bitbang; struct completion done; - + struct resource mem; /* phys mem */ void __iomem *regs; /* virt. address of the control registers */ u32 irq; @@ -284,40 +282,22 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int __init xilinx_spi_of_probe(struct of_device *ofdev, - const struct of_device_id *match) +struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, + u32 irq, s16 bus_num) { struct spi_master *master; struct xilinx_spi *xspi; - struct resource r_irq_struct; - struct resource r_mem_struct; - - struct resource *r_irq = &r_irq_struct; - struct resource *r_mem = &r_mem_struct; - int rc = 0; - const u32 *prop; - int len; - - /* Get resources(memory, IRQ) associated with the device */ - master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi)); + struct xspi_platform_data *pdata = dev->platform_data; + int ret; - if (master == NULL) { - return -ENOMEM; - } - - dev_set_drvdata(&ofdev->dev, master); - - rc = of_address_to_resource(ofdev->node, 0, r_mem); - if (rc) { - dev_warn(&ofdev->dev, "invalid address\n"); - goto put_master; + if (!pdata) { + dev_err(dev, "No platform data attached\n"); + return NULL; } - rc = of_irq_to_resource(ofdev->node, 0, r_irq); - if (rc == NO_IRQ) { - dev_warn(&ofdev->dev, "no IRQ found\n"); - goto put_master; - } + master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); + if (!master) + return NULL; /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA; @@ -330,128 +310,67 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev, xspi->bitbang.master->setup = xilinx_spi_setup; init_completion(&xspi->done); - xspi->irq = r_irq->start; - - if (!request_mem_region(r_mem->start, - r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) { - rc = -ENXIO; - dev_warn(&ofdev->dev, "memory request failure\n"); + if (!request_mem_region(mem->start, resource_size(mem), + XILINX_SPI_NAME)) goto put_master; - } - xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1); + xspi->regs = ioremap(mem->start, resource_size(mem)); if (xspi->regs == NULL) { - rc = -ENOMEM; - dev_warn(&ofdev->dev, "ioremap failure\n"); - goto release_mem; + dev_warn(dev, "ioremap failure\n"); + goto map_failed; } - xspi->irq = r_irq->start; - /* dynamic bus assignment */ - master->bus_num = -1; + master->bus_num = bus_num; + master->num_chipselect = pdata->num_chipselect; - /* number of slave select bits is required */ - prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len); - if (!prop || len < sizeof(*prop)) { - dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); - goto unmap_io; - } - master->num_chipselect = *prop; + xspi->mem = *mem; + xspi->irq = irq; /* SPI controller initializations */ xspi_init_hw(xspi->regs); /* Register for SPI Interrupt */ - rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); - if (rc != 0) { - dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq); + ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); + if (ret) goto unmap_io; - } - rc = spi_bitbang_start(&xspi->bitbang); - if (rc != 0) { - dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n"); + ret = spi_bitbang_start(&xspi->bitbang); + if (ret) { + dev_err(dev, "spi_bitbang_start FAILED\n"); goto free_irq; } - dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n", - (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq); - - /* Add any subnodes on the SPI bus */ - of_register_spi_devices(master, ofdev->node); - - return rc; + dev_info(dev, "at 0x%08X mapped to 0x%08X, irq=%d\n", + (u32)mem->start, (u32)xspi->regs, xspi->irq); + return master; free_irq: free_irq(xspi->irq, xspi); unmap_io: iounmap(xspi->regs); -release_mem: - release_mem_region(r_mem->start, resource_size(r_mem)); +map_failed: + release_mem_region(mem->start, resource_size(mem)); put_master: spi_master_put(master); - return rc; + return NULL; } +EXPORT_SYMBOL(xilinx_spi_init); -static int __devexit xilinx_spi_remove(struct of_device *ofdev) +void xilinx_spi_deinit(struct spi_master *master) { struct xilinx_spi *xspi; - struct spi_master *master; - struct resource r_mem; - master = platform_get_drvdata(ofdev); xspi = spi_master_get_devdata(master); spi_bitbang_stop(&xspi->bitbang); free_irq(xspi->irq, xspi); iounmap(xspi->regs); - if (!of_address_to_resource(ofdev->node, 0, &r_mem)) - release_mem_region(r_mem.start, resource_size(&r_mem)); - dev_set_drvdata(&ofdev->dev, 0); - spi_master_put(xspi->bitbang.master); - - return 0; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:" XILINX_SPI_NAME); - -static int __exit xilinx_spi_of_remove(struct of_device *op) -{ - return xilinx_spi_remove(op); -} -static struct of_device_id xilinx_spi_of_match[] = { - { .compatible = "xlnx,xps-spi-2.00.a", }, - { .compatible = "xlnx,xps-spi-2.00.b", }, - {} -}; - -MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); - -static struct of_platform_driver xilinx_spi_of_driver = { - .owner = THIS_MODULE, - .name = "xilinx-xps-spi", - .match_table = xilinx_spi_of_match, - .probe = xilinx_spi_of_probe, - .remove = __exit_p(xilinx_spi_of_remove), - .driver = { - .name = "xilinx-xps-spi", - .owner = THIS_MODULE, - }, -}; - -static int __init xilinx_spi_init(void) -{ - return of_register_platform_driver(&xilinx_spi_of_driver); + release_mem_region(xspi->mem.start, resource_size(&xspi->mem)); + spi_master_put(xspi->bitbang.master); } -module_init(xilinx_spi_init); +EXPORT_SYMBOL(xilinx_spi_deinit); -static void __exit xilinx_spi_exit(void) -{ - of_unregister_platform_driver(&xilinx_spi_of_driver); -} -module_exit(xilinx_spi_exit); MODULE_AUTHOR("MontaVista Software, Inc. "); MODULE_DESCRIPTION("Xilinx SPI driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/xilinx_spi.h b/drivers/spi/xilinx_spi.h new file mode 100644 index 000000000000..d211accf68d2 --- /dev/null +++ b/drivers/spi/xilinx_spi.h @@ -0,0 +1,32 @@ +/* + * Xilinx SPI device driver API and platform data header file + * + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _XILINX_SPI_H_ +#define _XILINX_SPI_H_ + +#include +#include + +#define XILINX_SPI_NAME "xilinx_spi" + +struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, + u32 irq, s16 bus_num); + +void xilinx_spi_deinit(struct spi_master *master); +#endif diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c new file mode 100644 index 000000000000..151aa13494bd --- /dev/null +++ b/drivers/spi/xilinx_spi_of.c @@ -0,0 +1,133 @@ +/* + * Xilinx SPI OF device driver + * + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * Xilinx SPI devices as OF devices + * + * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include "xilinx_spi.h" + + +static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct spi_master *master; + struct xspi_platform_data *pdata; + struct resource r_mem; + struct resource r_irq; + int rc = 0; + const u32 *prop; + int len; + + rc = of_address_to_resource(ofdev->node, 0, &r_mem); + if (rc) { + dev_warn(&ofdev->dev, "invalid address\n"); + return rc; + } + + rc = of_irq_to_resource(ofdev->node, 0, &r_irq); + if (rc == NO_IRQ) { + dev_warn(&ofdev->dev, "no IRQ found\n"); + return -ENODEV; + } + + ofdev->dev.platform_data = + kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL); + pdata = ofdev->dev.platform_data; + if (!pdata) + return -ENOMEM; + + /* number of slave select bits is required */ + prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len); + if (!prop || len < sizeof(*prop)) { + dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); + return -EINVAL; + } + pdata->num_chipselect = *prop; + master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1); + if (!master) + return -ENODEV; + + dev_set_drvdata(&ofdev->dev, master); + + /* Add any subnodes on the SPI bus */ + of_register_spi_devices(master, ofdev->node); + + return 0; +} + +static int __devexit xilinx_spi_remove(struct of_device *ofdev) +{ + xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev)); + dev_set_drvdata(&ofdev->dev, 0); + kfree(ofdev->dev.platform_data); + ofdev->dev.platform_data = NULL; + return 0; +} + +static int __exit xilinx_spi_of_remove(struct of_device *op) +{ + return xilinx_spi_remove(op); +} + +static struct of_device_id xilinx_spi_of_match[] = { + { .compatible = "xlnx,xps-spi-2.00.a", }, + { .compatible = "xlnx,xps-spi-2.00.b", }, + {} +}; + +MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); + +static struct of_platform_driver xilinx_spi_of_driver = { + .match_table = xilinx_spi_of_match, + .probe = xilinx_spi_of_probe, + .remove = __exit_p(xilinx_spi_of_remove), + .driver = { + .name = "xilinx-xps-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init xilinx_spi_of_init(void) +{ + return of_register_platform_driver(&xilinx_spi_of_driver); +} +module_init(xilinx_spi_of_init); + +static void __exit xilinx_spi_of_exit(void) +{ + of_unregister_platform_driver(&xilinx_spi_of_driver); +} +module_exit(xilinx_spi_of_exit); + +MODULE_AUTHOR("Mocean Laboratories "); +MODULE_DESCRIPTION("Xilinx SPI platform driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h new file mode 100644 index 000000000000..06df0aba3aee --- /dev/null +++ b/include/linux/spi/xilinx_spi.h @@ -0,0 +1,16 @@ +#ifndef __LINUX_SPI_XILINX_SPI_H +#define __LINUX_SPI_XILINX_SPI_H + +/** + * struct xspi_platform_data - Platform data of the Xilinx SPI driver + * @num_chipselect: Number of chip select by the IP + * @devices: Devices to add when the driver is probed. + * @num_devices: Number of devices in the devices array. + */ +struct xspi_platform_data { + u16 num_chipselect; + struct spi_board_info *devices; + u8 num_devices; +}; + +#endif /* __LINUX_SPI_XILINX_SPI_H */ -- cgit v1.2.3 From 86fc593599c11b62a11c85b4d7b709089df15c29 Mon Sep 17 00:00:00 2001 From: Richard Röjfors Date: Fri, 13 Nov 2009 12:28:49 +0100 Subject: xilinx_spi: Switch to iomem functions and support little endian. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch changes the out_(be)(8|16|32) and in_(be)(8|16|32) calls to 32 bits ioread/iowrite. The read and write function are attached to the internal struct as callbacks, callback is selected depending on endianess. This will also build on platforms not supporting the in/out calls for instance x86. Acked-by: Grant Likely Tested-by: John Linn Signed-off-by: Richard Röjfors Signed-off-by: Grant Likely --- drivers/spi/Kconfig | 2 +- drivers/spi/xilinx_spi.c | 96 ++++++++++++++++++++++++------------------ include/linux/spi/xilinx_spi.h | 2 + 3 files changed, 57 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f34a2d16d18f..b528271c72ef 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -244,7 +244,7 @@ config SPI_TXX9 config SPI_XILINX tristate "Xilinx SPI controller" - depends on EXPERIMENTAL + depends on HAS_IOMEM && EXPERIMENTAL select SPI_BITBANG select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE) help diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 69fa26d82ce4..135a3a5931a4 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -27,7 +27,7 @@ /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) * Product Specification", DS464 */ -#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */ +#define XSPI_CR_OFFSET 0x60 /* 16-bit Control Register */ #define XSPI_CR_ENABLE 0x02 #define XSPI_CR_MASTER_MODE 0x04 @@ -39,7 +39,7 @@ #define XSPI_CR_MANUAL_SSELECT 0x80 #define XSPI_CR_TRANS_INHIBIT 0x100 -#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */ +#define XSPI_SR_OFFSET 0x64 /* 8-bit Status Register */ #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */ #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */ @@ -47,8 +47,8 @@ #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */ #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */ -#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */ -#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */ +#define XSPI_TXD_OFFSET 0x68 /* 8-bit Data Transmit Register */ +#define XSPI_RXD_OFFSET 0x6c /* 8-bit Data Receive Register */ #define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */ @@ -86,25 +86,29 @@ struct xilinx_spi { u8 *rx_ptr; /* pointer in the Tx buffer */ const u8 *tx_ptr; /* pointer in the Rx buffer */ int remaining_bytes; /* the number of bytes left to transfer */ + unsigned int (*read_fn) (void __iomem *); + void (*write_fn) (u32, void __iomem *); }; -static void xspi_init_hw(void __iomem *regs_base) +static void xspi_init_hw(struct xilinx_spi *xspi) { + void __iomem *regs_base = xspi->regs; + /* Reset the SPI device */ - out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET, - XIPIF_V123B_RESET_MASK); + xspi->write_fn(XIPIF_V123B_RESET_MASK, + regs_base + XIPIF_V123B_RESETR_OFFSET); /* Disable all the interrupts just in case */ - out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0); + xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); /* Enable the global IPIF interrupt */ - out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET, - XIPIF_V123B_GINTR_ENABLE); + xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, + regs_base + XIPIF_V123B_DGIER_OFFSET); /* Deselect the slave on the SPI bus */ - out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff); + xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); /* Disable the transmitter, enable Manual Slave Select Assertion, * put SPI controller into master mode, and enable it */ - out_be16(regs_base + XSPI_CR_OFFSET, - XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT - | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE); + xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | + XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE, + regs_base + XSPI_CR_OFFSET); } static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) @@ -113,16 +117,16 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) if (is_on == BITBANG_CS_INACTIVE) { /* Deselect the slave on the SPI bus */ - out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff); + xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); } else if (is_on == BITBANG_CS_ACTIVE) { /* Set the SPI clock phase and polarity */ - u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET) + u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK; if (spi->mode & SPI_CPHA) cr |= XSPI_CR_CPHA; if (spi->mode & SPI_CPOL) cr |= XSPI_CR_CPOL; - out_be16(xspi->regs + XSPI_CR_OFFSET, cr); + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); /* We do not check spi->max_speed_hz here as the SPI clock * frequency is not software programmable (the IP block design @@ -130,8 +134,8 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) */ /* Activate the chip select */ - out_be32(xspi->regs + XSPI_SSR_OFFSET, - ~(0x0001 << spi->chip_select)); + xspi->write_fn(~(0x0001 << spi->chip_select), + xspi->regs + XSPI_SSR_OFFSET); } } @@ -178,15 +182,15 @@ static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) u8 sr; /* Fill the Tx FIFO with as many bytes as possible */ - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { - if (xspi->tx_ptr) { - out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++); - } else { - out_8(xspi->regs + XSPI_TXD_OFFSET, 0); - } + if (xspi->tx_ptr) + xspi->write_fn(*xspi->tx_ptr++, + xspi->regs + XSPI_TXD_OFFSET); + else + xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); xspi->remaining_bytes--; - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); } } @@ -208,18 +212,19 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) /* Enable the transmit empty interrupt, which we use to determine * progress on the transmission. */ - ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET); - out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, - ipif_ier | XSPI_INTR_TX_EMPTY); + ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET); + xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, + xspi->regs + XIPIF_V123B_IIER_OFFSET); /* Start the transfer by not inhibiting the transmitter any longer */ - cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT; - out_be16(xspi->regs + XSPI_CR_OFFSET, cr); + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & + ~XSPI_CR_TRANS_INHIBIT; + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); wait_for_completion(&xspi->done); /* Disable the transmit empty interrupt */ - out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier); + xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); return t->len - xspi->remaining_bytes; } @@ -236,8 +241,8 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) u32 ipif_isr; /* Get the IPIF interrupts, and clear them immediately */ - ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET); - out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr); + ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET); + xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ u16 cr; @@ -248,20 +253,20 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) * transmitter while the Isr refills the transmit register/FIFO, * or make sure it is stopped if we're done. */ - cr = in_be16(xspi->regs + XSPI_CR_OFFSET); - out_be16(xspi->regs + XSPI_CR_OFFSET, - cr | XSPI_CR_TRANS_INHIBIT); + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, + xspi->regs + XSPI_CR_OFFSET); /* Read out all the data from the Rx FIFO */ - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { u8 data; - data = in_8(xspi->regs + XSPI_RXD_OFFSET); + data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); if (xspi->rx_ptr) { *xspi->rx_ptr++ = data; } - sr = in_8(xspi->regs + XSPI_SR_OFFSET); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); } /* See if there is more data to send */ @@ -270,7 +275,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) /* Start the transfer by not inhibiting the * transmitter any longer */ - out_be16(xspi->regs + XSPI_CR_OFFSET, cr); + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); } else { /* No more data to send. * Indicate the transfer is completed. @@ -325,9 +330,16 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, xspi->mem = *mem; xspi->irq = irq; + if (pdata->little_endian) { + xspi->read_fn = ioread32; + xspi->write_fn = iowrite32; + } else { + xspi->read_fn = ioread32be; + xspi->write_fn = iowrite32be; + } /* SPI controller initializations */ - xspi_init_hw(xspi->regs); + xspi_init_hw(xspi); /* Register for SPI Interrupt */ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h index 06df0aba3aee..a705ad85eebe 100644 --- a/include/linux/spi/xilinx_spi.h +++ b/include/linux/spi/xilinx_spi.h @@ -4,11 +4,13 @@ /** * struct xspi_platform_data - Platform data of the Xilinx SPI driver * @num_chipselect: Number of chip select by the IP + * @little_endian If registers should be accessed little endian or not * @devices: Devices to add when the driver is probed. * @num_devices: Number of devices in the devices array. */ struct xspi_platform_data { u16 num_chipselect; + bool little_endian; struct spi_board_info *devices; u8 num_devices; }; -- cgit v1.2.3 From c9da2e125588677d74324df5088149063d578e8f Mon Sep 17 00:00:00 2001 From: Richard Röjfors Date: Fri, 13 Nov 2009 12:28:55 +0100 Subject: xilinx_spi: add support for the DS570 IP. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds in support for the DS570 IP. It's register compatible with the DS464, but adds support for 8/16/32 SPI. The 8/16/32 support is added by attaching callbacks reading/writing the proper amount of data. To indicate to the driver which amount of bits to use a new field is introduced in the platform data struct. Acked-by: Grant Likely Tested-by: John Linn Signed-off-by: Richard Röjfors Signed-off-by: Grant Likely --- drivers/spi/Kconfig | 4 +- drivers/spi/xilinx_spi.c | 118 ++++++++++++++++++++++++++++++----------- drivers/spi/xilinx_spi_of.c | 1 + include/linux/spi/xilinx_spi.h | 6 ++- 4 files changed, 95 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index b528271c72ef..4dda097c239e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -243,7 +243,7 @@ config SPI_TXX9 SPI driver for Toshiba TXx9 MIPS SoCs config SPI_XILINX - tristate "Xilinx SPI controller" + tristate "Xilinx SPI controller common module" depends on HAS_IOMEM && EXPERIMENTAL select SPI_BITBANG select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE) @@ -253,6 +253,8 @@ config SPI_XILINX See the "OPB Serial Peripheral Interface (SPI) (v1.00e)" Product Specification document (DS464) for hardware details. + Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)" + config SPI_XILINX_OF tristate "Xilinx SPI controller OF device" depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE) diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 135a3a5931a4..b927812822c1 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -27,7 +27,7 @@ /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) * Product Specification", DS464 */ -#define XSPI_CR_OFFSET 0x60 /* 16-bit Control Register */ +#define XSPI_CR_OFFSET 0x60 /* Control Register */ #define XSPI_CR_ENABLE 0x02 #define XSPI_CR_MASTER_MODE 0x04 @@ -38,8 +38,9 @@ #define XSPI_CR_RXFIFO_RESET 0x40 #define XSPI_CR_MANUAL_SSELECT 0x80 #define XSPI_CR_TRANS_INHIBIT 0x100 +#define XSPI_CR_LSB_FIRST 0x200 -#define XSPI_SR_OFFSET 0x64 /* 8-bit Status Register */ +#define XSPI_SR_OFFSET 0x64 /* Status Register */ #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */ #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */ @@ -47,8 +48,8 @@ #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */ #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */ -#define XSPI_TXD_OFFSET 0x68 /* 8-bit Data Transmit Register */ -#define XSPI_RXD_OFFSET 0x6c /* 8-bit Data Receive Register */ +#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */ +#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */ #define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */ @@ -68,6 +69,7 @@ #define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */ #define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */ #define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */ +#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */ #define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */ #define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */ @@ -81,15 +83,61 @@ struct xilinx_spi { u32 irq; - u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */ - u8 *rx_ptr; /* pointer in the Tx buffer */ const u8 *tx_ptr; /* pointer in the Rx buffer */ int remaining_bytes; /* the number of bytes left to transfer */ + u8 bits_per_word; unsigned int (*read_fn) (void __iomem *); void (*write_fn) (u32, void __iomem *); + void (*tx_fn) (struct xilinx_spi *); + void (*rx_fn) (struct xilinx_spi *); }; +static void xspi_tx8(struct xilinx_spi *xspi) +{ + xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr++; +} + +static void xspi_tx16(struct xilinx_spi *xspi) +{ + xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr += 2; +} + +static void xspi_tx32(struct xilinx_spi *xspi) +{ + xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr += 4; +} + +static void xspi_rx8(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *xspi->rx_ptr = data & 0xff; + xspi->rx_ptr++; + } +} + +static void xspi_rx16(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *(u16 *)(xspi->rx_ptr) = data & 0xffff; + xspi->rx_ptr += 2; + } +} + +static void xspi_rx32(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *(u32 *)(xspi->rx_ptr) = data; + xspi->rx_ptr += 4; + } +} + static void xspi_init_hw(struct xilinx_spi *xspi) { void __iomem *regs_base = xspi->regs; @@ -107,8 +155,8 @@ static void xspi_init_hw(struct xilinx_spi *xspi) /* Disable the transmitter, enable Manual Slave Select Assertion, * put SPI controller into master mode, and enable it */ xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | - XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE, - regs_base + XSPI_CR_OFFSET); + XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | + XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); } static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) @@ -141,18 +189,20 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) /* spi_bitbang requires custom setup_transfer() to be defined if there is a * custom txrx_bufs(). We have nothing to setup here as the SPI IP block - * supports just 8 bits per word, and SPI clock can't be changed in software. - * Check for 8 bits per word. Chip select delay calculations could be + * supports 8 or 16 bits per word which cannot be changed in software. + * SPI clock can't be changed in software either. + * Check for correct bits per word. Chip select delay calculations could be * added here as soon as bitbang_work() can be made aware of the delay value. */ static int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { + struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); u8 bits_per_word; bits_per_word = (t && t->bits_per_word) ? t->bits_per_word : spi->bits_per_word; - if (bits_per_word != 8) { + if (bits_per_word != xspi->bits_per_word) { dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", __func__, bits_per_word); return -EINVAL; @@ -163,17 +213,16 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi, static int xilinx_spi_setup(struct spi_device *spi) { - struct spi_bitbang *bitbang; - struct xilinx_spi *xspi; - int retval; - - xspi = spi_master_get_devdata(spi->master); - bitbang = &xspi->bitbang; - - retval = xilinx_spi_setup_transfer(spi, NULL); - if (retval < 0) - return retval; - + /* always return 0, we can not check the number of bits. + * There are cases when SPI setup is called before any driver is + * there, in that case the SPI core defaults to 8 bits, which we + * do not support in some cases. But if we return an error, the + * SPI device would not be registered and no driver can get hold of it + * When the driver is there, it will call SPI setup again with the + * correct number of bits per transfer. + * If a driver setups with the wrong bit number, it will fail when + * it tries to do a transfer + */ return 0; } @@ -185,11 +234,10 @@ static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { if (xspi->tx_ptr) - xspi->write_fn(*xspi->tx_ptr++, - xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_fn(xspi); else xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); - xspi->remaining_bytes--; + xspi->remaining_bytes -= xspi->bits_per_word / 8; sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); } } @@ -260,12 +308,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) /* Read out all the data from the Rx FIFO */ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { - u8 data; - - data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { - *xspi->rx_ptr++ = data; - } + xspi->rx_fn(xspi); sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); } @@ -337,6 +380,19 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, xspi->read_fn = ioread32be; xspi->write_fn = iowrite32be; } + xspi->bits_per_word = pdata->bits_per_word; + if (xspi->bits_per_word == 8) { + xspi->tx_fn = xspi_tx8; + xspi->rx_fn = xspi_rx8; + } else if (xspi->bits_per_word == 16) { + xspi->tx_fn = xspi_tx16; + xspi->rx_fn = xspi_rx16; + } else if (xspi->bits_per_word == 32) { + xspi->tx_fn = xspi_tx32; + xspi->rx_fn = xspi_rx32; + } else + goto unmap_io; + /* SPI controller initializations */ xspi_init_hw(xspi); diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c index 151aa13494bd..71dc3adc0495 100644 --- a/drivers/spi/xilinx_spi_of.c +++ b/drivers/spi/xilinx_spi_of.c @@ -72,6 +72,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, return -EINVAL; } pdata->num_chipselect = *prop; + pdata->bits_per_word = 8; master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1); if (!master) return -ENODEV; diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h index a705ad85eebe..6f17278810b0 100644 --- a/include/linux/spi/xilinx_spi.h +++ b/include/linux/spi/xilinx_spi.h @@ -3,14 +3,16 @@ /** * struct xspi_platform_data - Platform data of the Xilinx SPI driver - * @num_chipselect: Number of chip select by the IP - * @little_endian If registers should be accessed little endian or not + * @num_chipselect: Number of chip select by the IP. + * @little_endian: If registers should be accessed little endian or not. + * @bits_per_word: Number of bits per word. * @devices: Devices to add when the driver is probed. * @num_devices: Number of devices in the devices array. */ struct xspi_platform_data { u16 num_chipselect; bool little_endian; + u8 bits_per_word; struct spi_board_info *devices; u8 num_devices; }; -- cgit v1.2.3 From 937041e21634ffecc92d05cf693423a2c95b7252 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 24 Nov 2009 17:18:31 -0700 Subject: spi/mpc52xx-spi: minor cleanups - drop own, obsolete include-file - drop IRQF_SAMPLE_RANDOM (deprecated feature) - drop 'if' above kfree() - typos, braces & whitespaces Signed-off-by: Wolfram Sang Acked-by: Luotao Fu Signed-off-by: Grant Likely --- drivers/spi/mpc52xx_spi.c | 26 +++++++++++--------------- include/linux/spi/mpc52xx_spi.h | 10 ---------- 2 files changed, 11 insertions(+), 25 deletions(-) delete mode 100644 include/linux/spi/mpc52xx_spi.h (limited to 'include') diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index 97beba2895e7..45bfe6458173 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -54,7 +53,7 @@ MODULE_LICENSE("GPL"); /* FSM state return values */ #define FSM_STOP 0 /* Nothing more for the state machine to */ /* do. If something interesting happens */ - /* then and IRQ will be received */ + /* then an IRQ will be received */ #define FSM_POLL 1 /* need to poll for completion, an IRQ is */ /* not expected */ #define FSM_CONTINUE 2 /* Keep iterating the state machine */ @@ -62,13 +61,12 @@ MODULE_LICENSE("GPL"); /* Driver internal data */ struct mpc52xx_spi { struct spi_master *master; - u32 sysclk; void __iomem *regs; int irq0; /* MODF irq */ int irq1; /* SPIF irq */ - int ipb_freq; + unsigned int ipb_freq; - /* Statistics */ + /* Statistics; not used now, but will be reintroduced for debugfs */ int msg_count; int wcol_count; int wcol_ticks; @@ -229,7 +227,7 @@ static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, ms->wcol_tx_timestamp = get_tbl(); data = 0; if (ms->tx_buf) - data = *(ms->tx_buf-1); + data = *(ms->tx_buf - 1); out_8(ms->regs + SPI_DATA, data); /* try again */ return FSM_CONTINUE; } else if (status & SPI_STATUS_MODF) { @@ -481,8 +479,9 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, gpio_direction_output(gpio_cs, 1); ms->gpio_cs[i] = gpio_cs; } - } else + } else { master->num_chipselect = 1; + } spin_lock_init(&ms->lock); INIT_LIST_HEAD(&ms->queue); @@ -490,10 +489,10 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, /* Decide if interrupts can be used */ if (ms->irq0 && ms->irq1) { - rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, + rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0, "mpc5200-spi-modf", ms); - rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, - "mpc5200-spi-spiF", ms); + rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0, + "mpc5200-spi-spif", ms); if (rc) { free_irq(ms->irq0, ms); free_irq(ms->irq1, ms); @@ -524,8 +523,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, while (i-- > 0) gpio_free(ms->gpio_cs[i]); - if (ms->gpio_cs != NULL) - kfree(ms->gpio_cs); + kfree(ms->gpio_cs); err_alloc: err_init: iounmap(regs); @@ -544,9 +542,7 @@ static int __devexit mpc52xx_spi_remove(struct of_device *op) for (i = 0; i < ms->gpio_cs_count; i++) gpio_free(ms->gpio_cs[i]); - if (ms->gpio_cs != NULL) - kfree(ms->gpio_cs); - + kfree(ms->gpio_cs); spi_unregister_master(master); spi_master_put(master); iounmap(ms->regs); diff --git a/include/linux/spi/mpc52xx_spi.h b/include/linux/spi/mpc52xx_spi.h deleted file mode 100644 index d1004cf09241..000000000000 --- a/include/linux/spi/mpc52xx_spi.h +++ /dev/null @@ -1,10 +0,0 @@ - -#ifndef INCLUDE_MPC5200_SPI_H -#define INCLUDE_MPC5200_SPI_H - -extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master, - void (*hook)(struct spi_message *m, - void *context), - void *hook_context); - -#endif -- cgit v1.2.3 From ef61aae4ddf1dbd0e9b6ad21e2e57632a8fe76f6 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 7 Dec 2009 14:20:06 +0000 Subject: sh: add a start_transfer() callback to the LCDC driver This patch adds a ->start_transfer() callback to the driver sh_mobile_lcdcfb.c. The callback is used to program the LCDC panel in the case of one-shot mode. Needed by the LCD controller used on the KFR2R09 board. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- drivers/video/sh_mobile_lcdcfb.c | 10 +++++++++- include/video/sh_mobile_lcdc.h | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index b4b5de930cf5..d346bbab6cad 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -281,6 +281,7 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info, struct list_head *pagelist) { struct sh_mobile_lcdc_chan *ch = info->par; + struct sh_mobile_lcdc_board_cfg *bcfg = &ch->cfg.board_cfg; /* enable clocks before accessing hardware */ sh_mobile_lcdc_clk_on(ch->lcdc); @@ -305,10 +306,17 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info, /* trigger panel update */ dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); + if (bcfg->start_transfer) + bcfg->start_transfer(bcfg->board_data, ch, + &sh_mobile_lcdc_sys_bus_ops); lcdc_write_chan(ch, LDSM2R, 1); dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); - } else + } else { + if (bcfg->start_transfer) + bcfg->start_transfer(bcfg->board_data, ch, + &sh_mobile_lcdc_sys_bus_ops); lcdc_write_chan(ch, LDSM2R, 1); + } } static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h index 25144ab22b95..288205457713 100644 --- a/include/video/sh_mobile_lcdc.h +++ b/include/video/sh_mobile_lcdc.h @@ -50,6 +50,8 @@ struct sh_mobile_lcdc_board_cfg { void *board_data; int (*setup_sys)(void *board_data, void *sys_ops_handle, struct sh_mobile_lcdc_sys_bus_ops *sys_ops); + void (*start_transfer)(void *board_data, void *sys_ops_handle, + struct sh_mobile_lcdc_sys_bus_ops *sys_ops); void (*display_on)(void *board_data); void (*display_off)(void *board_data); }; -- cgit v1.2.3 From 9327f7053e3993c125944fdb137a0618319ef2a0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Dec 2009 03:46:54 +0000 Subject: tcp: Fix a connect() race with timewait sockets First patch changes __inet_hash_nolisten() and __inet6_hash() to get a timewait parameter to be able to unhash it from ehash at same time the new socket is inserted in hash. This makes sure timewait socket wont be found by a concurrent writer in __inet_check_established() Reported-by: kapil dakhane Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet6_hashtables.h | 2 +- include/net/inet_hashtables.h | 8 +++++--- net/dccp/ipv4.c | 2 +- net/dccp/ipv6.c | 4 ++-- net/ipv4/inet_hashtables.c | 22 ++++++++++++++++------ net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/inet6_hashtables.c | 8 +++++++- net/ipv6/tcp_ipv6.c | 4 ++-- 8 files changed, 35 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 92838d3a1ab7..e46674d5daea 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -53,7 +53,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk) return inet6_ehashfn(net, laddr, lport, faddr, fport); } -extern void __inet6_hash(struct sock *sk); +extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp); /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 41cbddd25b70..74358d1b3f43 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -251,7 +251,7 @@ extern void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); -extern void __inet_hash_nolisten(struct sock *sk); +extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); extern void inet_hash(struct sock *sk); extern void inet_unhash(struct sock *sk); @@ -391,10 +391,12 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, } extern int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, + struct sock *sk, + u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **), - void (*hash)(struct sock *sk)); + int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)); + extern int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); #endif /* _INET_HASHTABLES_H */ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index efbcfdc12796..dad7bc4878e0 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, dccp_sync_mss(newsk, dst_mtu(dst)); - __inet_hash_nolisten(newsk); + __inet_hash_nolisten(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 6574215a1f51..baf05cf43c28 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -46,7 +46,7 @@ static void dccp_v6_hash(struct sock *sk) return; } local_bh_disable(); - __inet6_hash(sk); + __inet6_hash(sk, NULL); local_bh_enable(); } } @@ -644,7 +644,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; - __inet6_hash(newsk); + __inet6_hash(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 21e5e32d8c60..c4201b7ece38 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -351,12 +351,13 @@ static inline u32 inet_sk_port_offset(const struct sock *sk) inet->inet_dport); } -void __inet_hash_nolisten(struct sock *sk) +int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct hlist_nulls_head *list; spinlock_t *lock; struct inet_ehash_bucket *head; + int twrefcnt = 0; WARN_ON(!sk_unhashed(sk)); @@ -367,8 +368,13 @@ void __inet_hash_nolisten(struct sock *sk) spin_lock(lock); __sk_nulls_add_node_rcu(sk, list); + if (tw) { + WARN_ON(sk->sk_hash != tw->tw_hash); + twrefcnt = inet_twsk_unhash(tw); + } spin_unlock(lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + return twrefcnt; } EXPORT_SYMBOL_GPL(__inet_hash_nolisten); @@ -378,7 +384,7 @@ static void __inet_hash(struct sock *sk) struct inet_listen_hashbucket *ilb; if (sk->sk_state != TCP_LISTEN) { - __inet_hash_nolisten(sk); + __inet_hash_nolisten(sk, NULL); return; } @@ -427,7 +433,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **), - void (*hash)(struct sock *sk)) + int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)) { struct inet_hashinfo *hinfo = death_row->hashinfo; const unsigned short snum = inet_sk(sk)->inet_num; @@ -435,6 +441,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct inet_bind_bucket *tb; int ret; struct net *net = sock_net(sk); + int twrefcnt = 1; if (!snum) { int i, remaining, low, high, port; @@ -493,13 +500,16 @@ ok: inet_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); - hash(sk); + twrefcnt += hash(sk, tw); } spin_unlock(&head->lock); if (tw) { inet_twsk_deschedule(tw, death_row); - inet_twsk_put(tw); + while (twrefcnt) { + twrefcnt--; + inet_twsk_put(tw); + } } ret = 0; @@ -510,7 +520,7 @@ ok: tb = inet_csk(sk)->icsk_bind_hash; spin_lock_bh(&head->lock); if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { - hash(sk); + hash(sk, NULL); spin_unlock_bh(&head->lock); return 0; } else { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 29002ab26e0d..15e96030ce47 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1464,7 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, } #endif - __inet_hash_nolisten(newsk); + __inet_hash_nolisten(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index c813e294ec0c..633a6c266136 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -22,9 +22,10 @@ #include #include -void __inet6_hash(struct sock *sk) +int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; + int twrefcnt = 0; WARN_ON(!sk_unhashed(sk)); @@ -45,10 +46,15 @@ void __inet6_hash(struct sock *sk) lock = inet_ehash_lockp(hashinfo, hash); spin_lock(lock); __sk_nulls_add_node_rcu(sk, list); + if (tw) { + WARN_ON(sk->sk_hash != tw->tw_hash); + twrefcnt = inet_twsk_unhash(tw); + } spin_unlock(lock); } sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + return twrefcnt; } EXPORT_SYMBOL(__inet6_hash); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index aadd7cef73b3..ee9cf62458d4 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -96,7 +96,7 @@ static void tcp_v6_hash(struct sock *sk) return; } local_bh_disable(); - __inet6_hash(sk); + __inet6_hash(sk, NULL); local_bh_enable(); } } @@ -1496,7 +1496,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, } #endif - __inet6_hash(newsk); + __inet6_hash(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; -- cgit v1.2.3 From 3cdaedae635b17ce23c738ce7d364b442310cdec Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Dec 2009 03:47:42 +0000 Subject: tcp: Fix a connect() race with timewait sockets When we find a timewait connection in __inet_hash_connect() and reuse it for a new connection request, we have a race window, releasing bind list lock and reacquiring it in __inet_twsk_kill() to remove timewait socket from list. Another thread might find the timewait socket we already chose, leading to list corruption and crashes. Fix is to remove timewait socket from bind list before releasing the bind lock. Note: This problem happens if sysctl_tcp_tw_reuse is set. Reported-by: kapil dakhane Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet_timewait_sock.h | 3 +++ net/ipv4/inet_hashtables.c | 2 ++ net/ipv4/inet_timewait_sock.c | 29 +++++++++++++++++++++-------- 3 files changed, 26 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index b801ade2295e..79f67eae8a7e 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -201,6 +201,9 @@ extern void inet_twsk_put(struct inet_timewait_sock *tw); extern int inet_twsk_unhash(struct inet_timewait_sock *tw); +extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, + struct inet_hashinfo *hashinfo); + extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index c4201b7ece38..2b79377b468d 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -502,6 +502,8 @@ ok: inet_sk(sk)->inet_sport = htons(port); twrefcnt += hash(sk, tw); } + if (tw) + twrefcnt += inet_twsk_bind_unhash(tw, hinfo); spin_unlock(&head->lock); if (tw) { diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 0fdf45e4c90c..bf4b1e2a4305 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -29,12 +29,29 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw) return 1; } +/* + * unhash a timewait socket from bind hash + * lock must be hold by caller + */ +int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, + struct inet_hashinfo *hashinfo) +{ + struct inet_bind_bucket *tb = tw->tw_tb; + + if (!tb) + return 0; + + __hlist_del(&tw->tw_bind_node); + tw->tw_tb = NULL; + inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); + return 1; +} + /* Must be called with locally disabled BHs. */ static void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) { struct inet_bind_hashbucket *bhead; - struct inet_bind_bucket *tb; int refcnt; /* Unlink from established hashes. */ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); @@ -46,15 +63,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, /* Disassociate with bind bucket. */ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, hashinfo->bhash_size)]; + spin_lock(&bhead->lock); - tb = tw->tw_tb; - if (tb) { - __hlist_del(&tw->tw_bind_node); - tw->tw_tb = NULL; - inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); - refcnt++; - } + refcnt += inet_twsk_bind_unhash(tw, hashinfo); spin_unlock(&bhead->lock); + #ifdef SOCK_REFCNT_DEBUG if (atomic_read(&tw->tw_refcnt) != 1) { printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", -- cgit v1.2.3 From 64f16603eae17e869d5fc8a60ae987394190e639 Mon Sep 17 00:00:00 2001 From: Tilman Schmidt Date: Sat, 5 Dec 2009 08:54:20 +0000 Subject: gigaset: documentation amendments Various additions and improvements to the Gigaset driver's README file, and added comments to its userspace visible include file. Signed-off-by: Tilman Schmidt Signed-off-by: David S. Miller --- Documentation/isdn/README.gigaset | 116 ++++++++++++++++++++++++++++++-------- include/linux/gigaset_dev.h | 22 +++++--- 2 files changed, 106 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/Documentation/isdn/README.gigaset b/Documentation/isdn/README.gigaset index 0fc9831d7ecb..794941fc9493 100644 --- a/Documentation/isdn/README.gigaset +++ b/Documentation/isdn/README.gigaset @@ -68,22 +68,38 @@ GigaSet 307x Device Driver for troubleshooting or to pass module parameters. The module ser_gigaset provides a serial line discipline N_GIGASET_M101 - which drives the device through the regular serial line driver. It must - be attached to the serial line to which the M101 is connected with the - ldattach(8) command (requires util-linux-ng release 2.14 or later), for - example: - ldattach GIGASET_M101 /dev/ttyS1 + which uses the regular serial port driver to access the device, and must + therefore be attached to the serial device to which the M101 is connected. + The ldattach(8) command (included in util-linux-ng release 2.14 or later) + can be used for that purpose, for example: + ldattach GIGASET_M101 /dev/ttyS1 This will open the device file, attach the line discipline to it, and then sleep in the background, keeping the device open so that the line discipline remains active. To deactivate it, kill the daemon, for example with - killall ldattach + killall ldattach before disconnecting the device. To have this happen automatically at system startup/shutdown on an LSB compatible system, create and activate an appropriate LSB startup script /etc/init.d/gigaset. (The init name 'gigaset' is officially assigned to this project by LANANA.) Alternatively, just add the 'ldattach' command line to /etc/rc.local. + The modules accept the following parameters: + + Module Parameter Meaning + + gigaset debug debug level (see section 3.2.) + + startmode initial operation mode (see section 2.5.): + bas_gigaset ) 1=ISDN4linux/CAPI (default), 0=Unimodem + ser_gigaset ) + usb_gigaset ) cidmode initial Call-ID mode setting (see section + 2.5.): 1=on (default), 0=off + + Depending on your distribution you may want to create a separate module + configuration file /etc/modprobe.d/gigaset for these, or add them to a + custom file like /etc/modprobe.conf.local. + 2.2. Device nodes for user space programs ------------------------------------ The device can be accessed from user space (eg. by the user space tools @@ -93,11 +109,48 @@ GigaSet 307x Device Driver - /dev/ttyGU0 for M105 (USB data boxes) - /dev/ttyGB0 for the base driver (direct USB connection) - You can also select a "default device" which is used by the frontends when + If you connect more than one device of a type, they will get consecutive + device nodes, eg. /dev/ttyGU1 for a second M105. + + You can also set a "default device" for the user space tools to use when no device node is given as parameter, by creating a symlink /dev/ttyG to one of them, eg.: - ln -s /dev/ttyGB0 /dev/ttyG + ln -s /dev/ttyGB0 /dev/ttyG + + The devices accept the following device specific ioctl calls + (defined in gigaset_dev.h): + + ioctl(int fd, GIGASET_REDIR, int *cmd); + If cmd==1, the device is set to be controlled exclusively through the + character device node; access from the ISDN subsystem is blocked. + If cmd==0, the device is set to be used from the ISDN subsystem and does + not communicate through the character device node. + + ioctl(int fd, GIGASET_CONFIG, int *cmd); + (ser_gigaset and usb_gigaset only) + If cmd==1, the device is set to adapter configuration mode where commands + are interpreted by the M10x DECT adapter itself instead of being + forwarded to the base station. In this mode, the device accepts the + commands described in Siemens document "AT-Kommando Alignment M10x Data" + for setting the operation mode, associating with a base station and + querying parameters like field strengh and signal quality. + Note that there is no ioctl command for leaving adapter configuration + mode and returning to regular operation. In order to leave adapter + configuration mode, write the command ATO to the device. + + ioctl(int fd, GIGASET_BRKCHARS, unsigned char brkchars[6]); + (usb_gigaset only) + Set the break characters on an M105's internal serial adapter to the six + bytes stored in brkchars[]. Unused bytes should be set to zero. + + ioctl(int fd, GIGASET_VERSION, unsigned version[4]); + Retrieve version information from the driver. version[0] must be set to + one of: + - GIGVER_DRIVER: retrieve driver version + - GIGVER_COMPAT: retrieve interface compatibility version + - GIGVER_FWBASE: retrieve the firmware version of the base + Upon return, version[] is filled with the requested version information. 2.3. ISDN4linux ---------- @@ -113,15 +166,24 @@ GigaSet 307x Device Driver Connection State: 0, Response: -1 gigaset_process_response: resp_code -1 in ConState 0 ! Timeout occurred - you might need to use unimodem mode. (see section 2.5.) + you probably need to use unimodem mode. (see section 2.5.) 2.4. CAPI ---- If the driver is compiled with CAPI support (kernel configuration option GIGASET_CAPI, experimental) it can also be used with CAPI 2.0 kernel and - user space applications. ISDN4Linux is supported in this configuration + user space applications. For user space access, the module capi.ko must + be loaded. The capiinit command (included in the capi4k-utils package) + does this for you. + + The CAPI variant of the driver supports legacy ISDN4Linux applications via the capidrv compatibility driver. The kernel module capidrv.ko must - be loaded explicitly ("modprobe capidrv") if needed. + be loaded explicitly with the command + modprobe capidrv + if needed, and cannot be unloaded again without unloading the driver + first. (These are limitations of capidrv.) + + The note about unimodem mode in the preceding section applies here, too. 2.5. Unimodem mode ------------- @@ -134,9 +196,14 @@ GigaSet 307x Device Driver You can switch back using gigacontr --mode isdn - You can also load the driver using e.g. - modprobe usb_gigaset startmode=0 - to prevent the driver from starting in "isdn4linux mode". + You can also put the driver directly into Unimodem mode when it's loaded, + by passing the module parameter startmode=0 to the hardware specific + module, e.g. + modprobe usb_gigaset startmode=0 + or by adding a line like + options usb_gigaset startmode=0 + to an appropriate module configuration file, like /etc/modprobe.d/gigaset + or /etc/modprobe.conf.local. In this mode the device works like a modem connected to a serial port (the /dev/ttyGU0, ... mentioned above) which understands the commands @@ -164,9 +231,8 @@ GigaSet 307x Device Driver options ppp_async flag_time=0 - to /etc/modprobe.conf. If your distribution has some local module - configuration file like /etc/modprobe.conf.local, - using that should be preferred. + to an appropriate module configuration file, like /etc/modprobe.d/gigaset + or /etc/modprobe.conf.local. 2.6. Call-ID (CID) mode ------------------ @@ -189,12 +255,13 @@ GigaSet 307x Device Driver settings (CID mode). - If you have several DECT data devices (M10x) which you want to use in turn, select Unimodem mode by passing the parameter "cidmode=0" to - the driver ("modprobe usb_gigaset cidmode=0" or modprobe.conf). + the appropriate driver module (ser_gigaset or usb_gigaset). If you want both of these at once, you are out of luck. - You can also use /sys/class/tty/ttyGxy/cidmode for changing the CID mode - setting (ttyGxy is ttyGU0 or ttyGB0). + You can also use the tty class parameter "cidmode" of the device to + change its CID mode while the driver is loaded, eg. + echo 0 > /sys/class/tty/ttyGU0/cidmode 2.7. Unregistered Wireless Devices (M101/M105) ----------------------------------------- @@ -208,7 +275,7 @@ GigaSet 307x Device Driver driver. In that situation, a restricted set of functions is available which includes, in particular, those necessary for registering the device to a base or for switching it between Fixed Part and Portable Part - modes. + modes. See the gigacontr(8) manpage for details. 3. Troubleshooting --------------- @@ -222,9 +289,7 @@ GigaSet 307x Device Driver options isdn dialtimeout=15 - to /etc/modprobe.conf. If your distribution has some local module - configuration file like /etc/modprobe.conf.local, - using that should be preferred. + to /etc/modprobe.d/gigaset, /etc/modprobe.conf.local or a similar file. Problem: Your isdn script aborts with a message about isdnlog. @@ -264,7 +329,8 @@ GigaSet 307x Device Driver The initial value can be set using the debug parameter when loading the module "gigaset", e.g. by adding a line options gigaset debug=0 - to /etc/modprobe.conf, ... + to your module configuration file, eg. /etc/modprobe.d/gigaset or + /etc/modprobe.conf.local. Generated debugging information can be found - as output of the command diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h index 5dc4a316ca37..258ba82937e7 100644 --- a/include/linux/gigaset_dev.h +++ b/include/linux/gigaset_dev.h @@ -16,15 +16,23 @@ #include +/* The magic IOCTL value for this interface. */ #define GIGASET_IOCTL 0x47 -#define GIGVER_DRIVER 0 -#define GIGVER_COMPAT 1 -#define GIGVER_FWBASE 2 +/* enable/disable device control via character device (lock out ISDN subsys) */ +#define GIGASET_REDIR _IOWR(GIGASET_IOCTL, 0, int) -#define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int) -#define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int) -#define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay? -#define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4]) +/* enable adapter configuration mode (M10x only) */ +#define GIGASET_CONFIG _IOWR(GIGASET_IOCTL, 1, int) + +/* set break characters (M105 only) */ +#define GIGASET_BRKCHARS _IOW(GIGASET_IOCTL, 2, unsigned char[6]) + +/* get version information selected by arg[0] */ +#define GIGASET_VERSION _IOWR(GIGASET_IOCTL, 3, unsigned[4]) +/* values for GIGASET_VERSION arg[0] */ +#define GIGVER_DRIVER 0 /* get driver version */ +#define GIGVER_COMPAT 1 /* get interface compatibility version */ +#define GIGVER_FWBASE 2 /* get base station firmware version */ #endif -- cgit v1.2.3 From b38310e99ed09163062902285edd6d7b3fc136d6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 6 Dec 2009 10:35:30 +0000 Subject: include/linux/if_ether.h: Remove unused defines MAC_BUF_SIZE and DECLARE_MAC_BUF Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- include/linux/if_ether.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 005e1525ab86..299b4121f914 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -137,8 +137,6 @@ extern struct ctl_table ether_table[]; extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" -#define MAC_BUF_SIZE 18 -#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] #endif -- cgit v1.2.3 From 07f29bc5bbae4e53e982ab956fed7207990a7786 Mon Sep 17 00:00:00 2001 From: Damian Lukowski Date: Mon, 7 Dec 2009 06:06:15 +0000 Subject: tcp: Stalling connections: Fix timeout calculation routine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes a problem in the TCP connection timeout calculation. Currently, timeout decisions are made on the basis of the current tcp_time_stamp and retrans_stamp, which is usually set at the first retransmission. However, if the retransmission fails in tcp_retransmit_skb(), retrans_stamp is not updated and remains zero. This leads to wrong decisions in retransmits_timed_out() if tcp_time_stamp is larger than the specified timeout, which is very likely. In this case, the TCP connection dies after the first attempted (and unsuccessful) retransmission. With this patch, tcp_skb_cb->when is used instead, when retrans_stamp is not available. This bug has been introduced together with retransmits_timed_out() in 2.6.32, as the number of retransmissions has been used for timeout decisions before. The corresponding commit was 6fa12c85031485dff38ce550c24f10da23b0adaa (Revert Backoff [v3]: Calculate TCP's connection close threshold as a time value.). Thanks to Ilpo Järvinen for code suggestions and Frederic Leroy for testing. Reported-by: Frederic Leroy Signed-off-by: Damian Lukowski Acked-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/net/tcp.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index e2d2ca2509be..e54bd85d9d40 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1265,14 +1265,20 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu * TCP connection after "boundary" unsucessful, exponentially backed-off * retransmissions with an initial RTO of TCP_RTO_MIN. */ -static inline bool retransmits_timed_out(const struct sock *sk, +static inline bool retransmits_timed_out(struct sock *sk, unsigned int boundary) { unsigned int timeout, linear_backoff_thresh; + unsigned int start_ts; if (!inet_csk(sk)->icsk_retransmits) return false; + if (unlikely(!tcp_sk(sk)->retrans_stamp)) + start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; + else + start_ts = tcp_sk(sk)->retrans_stamp; + linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); if (boundary <= linear_backoff_thresh) @@ -1281,7 +1287,7 @@ static inline bool retransmits_timed_out(const struct sock *sk, timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + (boundary - linear_backoff_thresh) * TCP_RTO_MAX; - return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout; + return (tcp_time_stamp - start_ts) >= timeout; } static inline struct sk_buff *tcp_send_head(struct sock *sk) -- cgit v1.2.3 From 2f7de5710a4d394920405febc2a9937c69e16dda Mon Sep 17 00:00:00 2001 From: Damian Lukowski Date: Mon, 7 Dec 2009 06:06:16 +0000 Subject: tcp: Stalling connections: Move timeout calculation routine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch moves retransmits_timed_out() from include/net/tcp.h to tcp_timer.c, where it is used. Reported-by: Frederic Leroy Signed-off-by: Damian Lukowski Acked-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/net/tcp.h | 28 ---------------------------- net/ipv4/tcp_timer.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index e54bd85d9d40..0248c181a92c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1261,34 +1261,6 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) -/* This function calculates a "timeout" which is equivalent to the timeout of a - * TCP connection after "boundary" unsucessful, exponentially backed-off - * retransmissions with an initial RTO of TCP_RTO_MIN. - */ -static inline bool retransmits_timed_out(struct sock *sk, - unsigned int boundary) -{ - unsigned int timeout, linear_backoff_thresh; - unsigned int start_ts; - - if (!inet_csk(sk)->icsk_retransmits) - return false; - - if (unlikely(!tcp_sk(sk)->retrans_stamp)) - start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; - else - start_ts = tcp_sk(sk)->retrans_stamp; - - linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); - - if (boundary <= linear_backoff_thresh) - timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; - else - timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + - (boundary - linear_backoff_thresh) * TCP_RTO_MAX; - - return (tcp_time_stamp - start_ts) >= timeout; -} static inline struct sk_buff *tcp_send_head(struct sock *sk) { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 8353a538cd4c..8816a20c2597 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -132,6 +132,35 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) } } +/* This function calculates a "timeout" which is equivalent to the timeout of a + * TCP connection after "boundary" unsucessful, exponentially backed-off + * retransmissions with an initial RTO of TCP_RTO_MIN. + */ +static bool retransmits_timed_out(struct sock *sk, + unsigned int boundary) +{ + unsigned int timeout, linear_backoff_thresh; + unsigned int start_ts; + + if (!inet_csk(sk)->icsk_retransmits) + return false; + + if (unlikely(!tcp_sk(sk)->retrans_stamp)) + start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; + else + start_ts = tcp_sk(sk)->retrans_stamp; + + linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); + + if (boundary <= linear_backoff_thresh) + timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; + else + timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + + (boundary - linear_backoff_thresh) * TCP_RTO_MAX; + + return (tcp_time_stamp - start_ts) >= timeout; +} + /* A write timeout has occurred. Process the after effects. */ static int tcp_write_timeout(struct sock *sk) { -- cgit v1.2.3 From 12633e803a2a556f6469e0933d08233d0844a2d9 Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Wed, 25 Nov 2009 17:23:25 +0000 Subject: sysfs/cpu: Add probe/release files Version 3 of this patch is updated with documentation added to Documentation/ABI. There are no changes to any of the C code from v2 of the patch. In order to support kernel DLPAR of CPU resources we need to provide an interface to add (probe) and remove (release) the resource from the system. This patch Creates new generic probe and release sysfs files to facilitate cpu probe/release. The probe/release interface provides for allowing each arch to supply their own routines for implementing the backend of adding and removing cpus to/from the system. This also creates the powerpc specific stubs to handle the arch callouts from writes to the sysfs files. The creation and use of these files is regulated by the CONFIG_ARCH_CPU_PROBE_RELEASE option so that only architectures that need the capability will have the files created. Signed-off-by: Nathan Fontenot Signed-off-by: Benjamin Herrenschmidt --- Documentation/ABI/testing/sysfs-devices-system-cpu | 15 ++++++++++ arch/powerpc/Kconfig | 4 +++ arch/powerpc/include/asm/machdep.h | 5 ++++ arch/powerpc/kernel/sysfs.c | 19 +++++++++++++ drivers/base/cpu.c | 32 ++++++++++++++++++++++ include/linux/cpu.h | 2 ++ 6 files changed, 77 insertions(+) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index a703b9e9aeb9..d868a11c94a5 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -62,6 +62,21 @@ Description: CPU topology files that describe kernel limits related to See Documentation/cputopology.txt for more information. +What: /sys/devices/system/cpu/probe + /sys/devices/system/cpu/release +Date: November 2009 +Contact: Linux kernel mailing list +Description: Dynamic addition and removal of CPU's. This is not hotplug + removal, this is meant complete removal/addition of the CPU + from the system. + + probe: writes to this file will dynamically add a CPU to the + system. Information written to the file to add CPU's is + architecture specific. + + release: writes to this file dynamically remove a CPU from + the system. Information writtento the file to remove CPU's + is architecture specific. What: /sys/devices/system/cpu/cpu#/node Date: October 2009 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5dbd375a3f2a..0df57466e783 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -320,6 +320,10 @@ config HOTPLUG_CPU Say N if you are unsure. +config ARCH_CPU_PROBE_RELEASE + def_bool y + depends on HOTPLUG_CPU + config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 9efa2be78331..9f0fc9e6ce0d 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -266,6 +266,11 @@ struct machdep_calls { void (*suspend_disable_irqs)(void); void (*suspend_enable_irqs)(void); #endif + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE + ssize_t (*cpu_probe)(const char *, size_t); + ssize_t (*cpu_release)(const char *, size_t); +#endif }; extern void e500_idle(void); diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 956ab33fd73f..e235e52dc4fe 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -461,6 +461,25 @@ static void unregister_cpu_online(unsigned int cpu) cacheinfo_cpu_offline(cpu); } + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +ssize_t arch_cpu_probe(const char *buf, size_t count) +{ + if (ppc_md.cpu_probe) + return ppc_md.cpu_probe(buf, count); + + return -EINVAL; +} + +ssize_t arch_cpu_release(const char *buf, size_t count) +{ + if (ppc_md.cpu_release) + return ppc_md.cpu_release(buf, count); + + return -EINVAL; +} +#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ + #endif /* CONFIG_HOTPLUG_CPU */ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e62a4ccea54d..7c03af7b84a9 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -72,6 +72,38 @@ void unregister_cpu(struct cpu *cpu) per_cpu(cpu_sys_devices, logical_cpu) = NULL; return; } + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +static ssize_t cpu_probe_store(struct class *class, const char *buf, + size_t count) +{ + return arch_cpu_probe(buf, count); +} + +static ssize_t cpu_release_store(struct class *class, const char *buf, + size_t count) +{ + return arch_cpu_release(buf, count); +} + +static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); +static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); + +int __init cpu_probe_release_init(void) +{ + int rc; + + rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &class_attr_probe.attr); + if (!rc) + rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &class_attr_release.attr); + + return rc; +} +device_initcall(cpu_probe_release_init); +#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ + #else /* ... !CONFIG_HOTPLUG_CPU */ static inline void register_cpu_control(struct cpu *cpu) { diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 47536197ffdd..c972f7ccb7d3 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); #ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu(struct cpu *cpu); +extern ssize_t arch_cpu_probe(const char *, size_t); +extern ssize_t arch_cpu_release(const char *, size_t); #endif struct notifier_block; -- cgit v1.2.3 From 51badebdcf394cc5fd574a524b55b3f6085e5e9c Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Thu, 26 Nov 2009 09:59:05 +0000 Subject: powerpc/pseries: Serialize cpu hotplug operations during deactivate Vs deallocate Currently the cpu-allocation/deallocation process comprises of two steps: - Set the indicators and to update the device tree with DLPAR node information. - Online/offline the allocated/deallocated CPU. This is achieved by writing to the sysfs tunables "probe" during allocation and "release" during deallocation. At the sametime, the userspace can independently online/offline the CPUs of the system using the sysfs tunable "online". It is quite possible that when a userspace tool offlines a CPU for the purpose of deallocation and is in the process of updating the device tree, some other userspace tool could bring the CPU back online by writing to the "online" sysfs tunable thereby causing the deallocate process to fail. The solution to this is to serialize writes to the "probe/release" sysfs tunable with the writes to the "online" sysfs tunable. This patch employs a mutex to provide this serialization, which is a no-op on all architectures except PPC_PSERIES Signed-off-by: Gautham R Shenoy Acked-by: Vaidyanathan Srinivasan Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/pseries/dlpar.c | 45 ++++++++++++++++++++++++++-------- drivers/base/cpu.c | 2 ++ include/linux/cpu.h | 13 ++++++++++ 3 files changed, 50 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 642e1b2e5c42..fd2f0afeb4de 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -436,6 +436,18 @@ int dlpar_release_drc(u32 drc_index) #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +static DEFINE_MUTEX(pseries_cpu_hotplug_mutex); + +void cpu_hotplug_driver_lock() +{ + mutex_lock(&pseries_cpu_hotplug_mutex); +} + +void cpu_hotplug_driver_unlock() +{ + mutex_unlock(&pseries_cpu_hotplug_mutex); +} + static ssize_t dlpar_cpu_probe(const char *buf, size_t count) { struct device_node *dn; @@ -443,13 +455,18 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) char *cpu_name; int rc; + cpu_hotplug_driver_lock(); rc = strict_strtoul(buf, 0, &drc_index); - if (rc) - return -EINVAL; + if (rc) { + rc = -EINVAL; + goto out; + } dn = dlpar_configure_connector(drc_index); - if (!dn) - return -EINVAL; + if (!dn) { + rc = -EINVAL; + goto out; + } /* configure-connector reports cpus as living in the base * directory of the device tree. CPUs actually live in the @@ -459,7 +476,8 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) GFP_KERNEL); if (!cpu_name) { dlpar_free_cc_nodes(dn); - return -ENOMEM; + rc = -ENOMEM; + goto out; } sprintf(cpu_name, "/cpus%s", dn->full_name); @@ -469,7 +487,8 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) rc = dlpar_acquire_drc(drc_index); if (rc) { dlpar_free_cc_nodes(dn); - return -EINVAL; + rc = -EINVAL; + goto out; } rc = dlpar_attach_node(dn); @@ -479,6 +498,8 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) } rc = online_node_cpus(dn); +out: + cpu_hotplug_driver_unlock(); return rc ? rc : count; } @@ -499,26 +520,30 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count) return -EINVAL; } + cpu_hotplug_driver_lock(); rc = offline_node_cpus(dn); if (rc) { of_node_put(dn); - return -EINVAL; + rc = -EINVAL; + goto out; } rc = dlpar_release_drc(*drc_index); if (rc) { of_node_put(dn); - return -EINVAL; + goto out; } rc = dlpar_detach_node(dn); if (rc) { dlpar_acquire_drc(*drc_index); - return rc; + goto out; } of_node_put(dn); - return count; +out: + cpu_hotplug_driver_unlock(); + return rc ? rc : count; } static int __init pseries_dlpar_init(void) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 7c03af7b84a9..27fd775375b0 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut struct cpu *cpu = container_of(dev, struct cpu, sysdev); ssize_t ret; + cpu_hotplug_driver_lock(); switch (buf[0]) { case '0': ret = cpu_down(cpu->sysdev.id); @@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut default: ret = -EINVAL; } + cpu_hotplug_driver_unlock(); if (ret >= 0) ret = count; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index c972f7ccb7d3..e287863ac053 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -117,6 +117,19 @@ extern void put_online_cpus(void); #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) int cpu_down(unsigned int cpu); +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +extern void cpu_hotplug_driver_lock(void); +extern void cpu_hotplug_driver_unlock(void); +#else +static inline void cpu_hotplug_driver_lock(void) +{ +} + +static inline void cpu_hotplug_driver_unlock(void) +{ +} +#endif + #else /* CONFIG_HOTPLUG_CPU */ #define get_online_cpus() do { } while (0) -- cgit v1.2.3 From 44234adcdce38f83c56e05f808ce656175b4beeb Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 9 Dec 2009 09:25:48 +0100 Subject: hw-breakpoints: Modify breakpoints without unregistering them Currently, when ptrace needs to modify a breakpoint, like disabling it, changing its address, type or len, it calls modify_user_hw_breakpoint(). This latter will perform the heavy and racy task of unregistering the old breakpoint and registering a new one. This is racy as someone else might steal the reserved breakpoint slot under us, which is undesired as the breakpoint is only supposed to be modified, sometimes in the middle of a debugging workflow. We don't want our slot to be stolen in the middle. So instead of unregistering/registering the breakpoint, just disable it while we modify its breakpoint fields and re-enable it after if necessary. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Prasad LKML-Reference: <1260347148-5519-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ptrace.c | 57 ++++++++++++++++++++----------------------- include/linux/hw_breakpoint.h | 4 +-- include/linux/perf_event.h | 5 +++- kernel/hw_breakpoint.c | 42 +++++++++++++++++++++++-------- kernel/perf_event.c | 4 +-- 5 files changed, 66 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b361d28061d0..7079ddaf0731 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -595,7 +595,7 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) return dr7; } -static struct perf_event * +static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, struct task_struct *tsk, int disabled) { @@ -609,11 +609,11 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, * written the address register first */ if (!bp) - return ERR_PTR(-EINVAL); + return -EINVAL; err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); if (err) - return ERR_PTR(err); + return err; attr = bp->attr; attr.bp_len = gen_len; @@ -658,28 +658,17 @@ restore: if (!second_pass) continue; - thread->ptrace_bps[i] = NULL; - bp = ptrace_modify_breakpoint(bp, len, type, + rc = ptrace_modify_breakpoint(bp, len, type, tsk, 1); - if (IS_ERR(bp)) { - rc = PTR_ERR(bp); - thread->ptrace_bps[i] = NULL; + if (rc) break; - } - thread->ptrace_bps[i] = bp; } continue; } - bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0); - - /* Incorrect bp, or we have a bug in bp API */ - if (IS_ERR(bp)) { - rc = PTR_ERR(bp); - thread->ptrace_bps[i] = NULL; + rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); + if (rc) break; - } - thread->ptrace_bps[i] = bp; } /* * Make a second pass to free the remaining unused breakpoints @@ -737,26 +726,32 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, attr.disabled = 1; bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); + + /* + * CHECKME: the previous code returned -EIO if the addr wasn't + * a valid task virtual addr. The new one will return -EINVAL in + * this case. + * -EINVAL may be what we want for in-kernel breakpoints users, + * but -EIO looks better for ptrace, since we refuse a register + * writing for the user. And anyway this is the previous + * behaviour. + */ + if (IS_ERR(bp)) + return PTR_ERR(bp); + + t->ptrace_bps[nr] = bp; } else { + int err; + bp = t->ptrace_bps[nr]; - t->ptrace_bps[nr] = NULL; attr = bp->attr; attr.bp_addr = addr; - bp = modify_user_hw_breakpoint(bp, &attr); + err = modify_user_hw_breakpoint(bp, &attr); + if (err) + return err; } - /* - * CHECKME: the previous code returned -EIO if the addr wasn't a - * valid task virtual addr. The new one will return -EINVAL in this - * case. - * -EINVAL may be what we want for in-kernel breakpoints users, but - * -EIO looks better for ptrace, since we refuse a register writing - * for the user. And anyway this is the previous behaviour. - */ - if (IS_ERR(bp)) - return PTR_ERR(bp); - t->ptrace_bps[nr] = bp; return 0; } diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 42da1ce19ec0..69f07a9f1277 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -55,7 +55,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, struct task_struct *tsk); /* FIXME: only change from the attr, and don't unregister */ -extern struct perf_event * +extern int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); /* @@ -91,7 +91,7 @@ static inline struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, struct task_struct *tsk) { return NULL; } -static inline struct perf_event * +static inline int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { return NULL; } static inline struct perf_event * diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index bf3329413e18..64a53f74c9a9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -872,6 +872,8 @@ extern void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); extern int perf_swevent_get_recursion_context(void); extern void perf_swevent_put_recursion_context(int rctx); +extern void perf_event_enable(struct perf_event *event); +extern void perf_event_disable(struct perf_event *event); #else static inline void perf_event_task_sched_in(struct task_struct *task, int cpu) { } @@ -902,7 +904,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } - +static inline void perf_event_enable(struct perf_event *event) { } +static inline void perf_event_disable(struct perf_event *event) { } #endif #define perf_output_put(handle, x) \ diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 03a0773ac2b2..366eedf949c0 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -320,18 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs */ -struct perf_event * -modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) +int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { - /* - * FIXME: do it without unregistering - * - We don't want to lose our slot - * - If the new bp is incorrect, don't lose the older one - */ - unregister_hw_breakpoint(bp); + u64 old_addr = bp->attr.bp_addr; + int old_type = bp->attr.bp_type; + int old_len = bp->attr.bp_len; + int err = 0; + + perf_event_disable(bp); + + bp->attr.bp_addr = attr->bp_addr; + bp->attr.bp_type = attr->bp_type; + bp->attr.bp_len = attr->bp_len; + + if (attr->disabled) + goto end; - return perf_event_create_kernel_counter(attr, -1, bp->ctx->task->pid, - bp->overflow_handler); + err = arch_validate_hwbkpt_settings(bp, bp->ctx->task); + if (!err) + perf_event_enable(bp); + + if (err) { + bp->attr.bp_addr = old_addr; + bp->attr.bp_type = old_type; + bp->attr.bp_len = old_len; + if (!bp->attr.disabled) + perf_event_enable(bp); + + return err; + } + +end: + bp->attr.disabled = attr->disabled; + + return 0; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fd43ff4ac860..3b0cf86eee84 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -567,7 +567,7 @@ static void __perf_event_disable(void *info) * is the current context on this CPU and preemption is disabled, * hence we can't get into perf_event_task_sched_out for this context. */ -static void perf_event_disable(struct perf_event *event) +void perf_event_disable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; @@ -971,7 +971,7 @@ static void __perf_event_enable(void *info) * perf_event_for_each_child or perf_event_for_each as described * for perf_event_disable. */ -static void perf_event_enable(struct perf_event *event) +void perf_event_enable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; -- cgit v1.2.3 From dba091b9e3522b9d32fc9975e48d3b69633b45f0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Dec 2009 09:32:03 +0100 Subject: sched: Protect sched_rr_get_param() access to task->sched_class sched_rr_get_param calls task->sched_class->get_rr_interval(task) without protection against a concurrent sched_setscheduler() call which modifies task->sched_class. Serialize the access with task_rq_lock(task) and hand the rq pointer into get_rr_interval() as it's needed at least in the sched_fair implementation. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 ++- kernel/sched.c | 6 +++++- kernel/sched_fair.c | 6 +----- kernel/sched_idletask.c | 2 +- kernel/sched_rt.c | 2 +- 5 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 89115ec7d43f..9b2402725088 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1111,7 +1111,8 @@ struct sched_class { void (*prio_changed) (struct rq *this_rq, struct task_struct *task, int oldprio, int running); - unsigned int (*get_rr_interval) (struct task_struct *task); + unsigned int (*get_rr_interval) (struct rq *rq, + struct task_struct *task); #ifdef CONFIG_FAIR_GROUP_SCHED void (*moved_group) (struct task_struct *p); diff --git a/kernel/sched.c b/kernel/sched.c index c4635f74540c..68db5a2e6545 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6887,6 +6887,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, { struct task_struct *p; unsigned int time_slice; + unsigned long flags; + struct rq *rq; int retval; struct timespec t; @@ -6903,7 +6905,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, if (retval) goto out_unlock; - time_slice = p->sched_class->get_rr_interval(p); + rq = task_rq_lock(p, &flags); + time_slice = p->sched_class->get_rr_interval(rq, p); + task_rq_unlock(rq, &flags); read_unlock(&tasklist_lock); jiffies_to_timespec(time_slice, &t); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f61837ad336d..613c1c749677 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2014,21 +2014,17 @@ static void moved_group_fair(struct task_struct *p) } #endif -unsigned int get_rr_interval_fair(struct task_struct *task) +unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) { struct sched_entity *se = &task->se; - unsigned long flags; - struct rq *rq; unsigned int rr_interval = 0; /* * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise * idle runqueue: */ - rq = task_rq_lock(task, &flags); if (rq->cfs.load.weight) rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); - task_rq_unlock(rq, &flags); return rr_interval; } diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index b133a28fcde3..33d5384a73a8 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -97,7 +97,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, check_preempt_curr(rq, p, 0); } -unsigned int get_rr_interval_idle(struct task_struct *task) +unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) { return 0; } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 5c5fef378415..aecbd9c6b20c 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1721,7 +1721,7 @@ static void set_curr_task_rt(struct rq *rq) dequeue_pushable_task(rq, p); } -unsigned int get_rr_interval_rt(struct task_struct *task) +unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) { /* * Time slice is 0 for SCHED_FIFO tasks -- cgit v1.2.3 From 6b314d0e11924c803bf8cd944e87fd58cdb5088c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 2 Dec 2009 18:58:05 +0100 Subject: sched: Remove sysctl.sched_features Since we've had a much saner debugfs interface to this, remove the sysctl one. Signed-off-by: Peter Zijlstra LKML-Reference: [ v2: build fix ] Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 - kernel/sysctl.c | 8 -------- 2 files changed, 9 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 9b2402725088..ca72ed42ac34 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1905,7 +1905,6 @@ extern unsigned int sysctl_sched_shares_ratelimit; extern unsigned int sysctl_sched_shares_thresh; extern unsigned int sysctl_sched_child_runs_first; #ifdef CONFIG_SCHED_DEBUG -extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_time_avg; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4dbf93a52ee9..e5cc53514caa 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -314,14 +314,6 @@ static struct ctl_table kern_table[] = { .strategy = &sysctl_intvec, .extra1 = &zero, }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "sched_features", - .data = &sysctl_sched_features, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, { .ctl_name = CTL_UNNUMBERED, .procname = "sched_migration_cost", -- cgit v1.2.3 From cd29fe6f2637cc2ccbda5ac65f5332d6bf5fa3c6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Nov 2009 17:32:46 +0100 Subject: sched: Sanitize fork() handling Currently we try to do task placement in wake_up_new_task() after we do the load-balance pass in sched_fork(). This yields complicated semantics in that we have to deal with tasks on different RQs and the set_task_cpu() calls in copy_process() and sched_fork() Rename ->task_new() to ->task_fork() and call it from sched_fork() before the balancing, this gives the policy a clear point to place the task. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 +- kernel/sched.c | 47 ++++++++++++++++++----------------------------- kernel/sched_fair.c | 28 +++++++++++++++------------- 3 files changed, 34 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index ca72ed42ac34..31d9dec78675 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1102,7 +1102,7 @@ struct sched_class { void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); - void (*task_new) (struct rq *rq, struct task_struct *p); + void (*task_fork) (struct task_struct *p); void (*switched_from) (struct rq *this_rq, struct task_struct *task, int running); diff --git a/kernel/sched.c b/kernel/sched.c index c92670f8e097..33c903573132 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1811,6 +1811,20 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) static void calc_load_account_active(struct rq *this_rq); +static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) +{ + set_task_rq(p, cpu); +#ifdef CONFIG_SMP + /* + * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be + * successfuly executed on another CPU. We must ensure that updates of + * per-task data have been completed by this moment. + */ + smp_wmb(); + task_thread_info(p)->cpu = cpu; +#endif +} + #include "sched_stats.h" #include "sched_idletask.c" #include "sched_fair.c" @@ -1967,20 +1981,6 @@ inline int task_curr(const struct task_struct *p) return cpu_curr(task_cpu(p)) == p; } -static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) -{ - set_task_rq(p, cpu); -#ifdef CONFIG_SMP - /* - * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be - * successfuly executed on another CPU. We must ensure that updates of - * per-task data have been completed by this moment. - */ - smp_wmb(); - task_thread_info(p)->cpu = cpu; -#endif -} - static inline void check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, int oldprio, int running) @@ -2552,7 +2552,6 @@ static void __sched_fork(struct task_struct *p) void sched_fork(struct task_struct *p, int clone_flags) { int cpu = get_cpu(); - unsigned long flags; __sched_fork(p); @@ -2586,13 +2585,13 @@ void sched_fork(struct task_struct *p, int clone_flags) if (!rt_prio(p->prio)) p->sched_class = &fair_sched_class; + if (p->sched_class->task_fork) + p->sched_class->task_fork(p); + #ifdef CONFIG_SMP cpu = select_task_rq(p, SD_BALANCE_FORK, 0); #endif - local_irq_save(flags); - update_rq_clock(cpu_rq(cpu)); set_task_cpu(p, cpu); - local_irq_restore(flags); #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) if (likely(sched_info_on())) @@ -2625,17 +2624,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) rq = task_rq_lock(p, &flags); BUG_ON(p->state != TASK_RUNNING); update_rq_clock(rq); - - if (!p->sched_class->task_new || !current->se.on_rq) { - activate_task(rq, p, 0); - } else { - /* - * Let the scheduling class do new task startup - * management (if any): - */ - p->sched_class->task_new(rq, p); - inc_nr_running(rq); - } + activate_task(rq, p, 0); trace_sched_wakeup_new(rq, p, 1); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 613c1c749677..44ec80ccfa85 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1922,28 +1922,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) } /* - * Share the fairness runtime between parent and child, thus the - * total amount of pressure for CPU stays equal - new tasks - * get a chance to run but frequent forkers are not allowed to - * monopolize the CPU. Note: the parent runqueue is locked, - * the child is not running yet. + * called on fork with the child task as argument from the parent's context + * - child not yet on the tasklist + * - preemption disabled */ -static void task_new_fair(struct rq *rq, struct task_struct *p) +static void task_fork_fair(struct task_struct *p) { - struct cfs_rq *cfs_rq = task_cfs_rq(p); + struct cfs_rq *cfs_rq = task_cfs_rq(current); struct sched_entity *se = &p->se, *curr = cfs_rq->curr; int this_cpu = smp_processor_id(); + struct rq *rq = this_rq(); + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); - sched_info_queued(p); + if (unlikely(task_cpu(p) != this_cpu)) + __set_task_cpu(p, this_cpu); update_curr(cfs_rq); + if (curr) se->vruntime = curr->vruntime; place_entity(cfs_rq, se, 1); - /* 'curr' will be NULL if the child belongs to a different group */ - if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && - curr && entity_before(curr, se)) { + if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { /* * Upon rescheduling, sched_class::put_prev_task() will place * 'current' within the tree based on its new key value. @@ -1952,7 +1954,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) resched_task(rq->curr); } - enqueue_task_fair(rq, p, 0); + spin_unlock_irqrestore(&rq->lock, flags); } /* @@ -2052,7 +2054,7 @@ static const struct sched_class fair_sched_class = { .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair, - .task_new = task_new_fair, + .task_fork = task_fork_fair, .prio_changed = prio_changed_fair, .switched_to = switched_to_fair, -- cgit v1.2.3 From 6cecd084d0fd27bb1e498e2829fd45846d806856 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Nov 2009 13:00:37 +0100 Subject: sched: Discard some old bits WAKEUP_RUNNING was an experiment, not sure why that ever ended up being merged... Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 -- kernel/sched.c | 17 +++++++---------- kernel/sched_debug.c | 1 - kernel/sched_fair.c | 3 --- kernel/sched_features.h | 5 ----- 5 files changed, 7 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 31d9dec78675..4b1ebd3280c6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1152,8 +1152,6 @@ struct sched_entity { u64 start_runtime; u64 avg_wakeup; - u64 avg_running; - #ifdef CONFIG_SCHEDSTATS u64 wait_start; u64 wait_max; diff --git a/kernel/sched.c b/kernel/sched.c index 33c903573132..0170735bdafc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2493,7 +2493,6 @@ static void __sched_fork(struct task_struct *p) p->se.avg_overlap = 0; p->se.start_runtime = 0; p->se.avg_wakeup = sysctl_sched_wakeup_granularity; - p->se.avg_running = 0; #ifdef CONFIG_SCHEDSTATS p->se.wait_start = 0; @@ -5379,13 +5378,14 @@ static inline void schedule_debug(struct task_struct *prev) #endif } -static void put_prev_task(struct rq *rq, struct task_struct *p) +static void put_prev_task(struct rq *rq, struct task_struct *prev) { - u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; + if (prev->state == TASK_RUNNING) { + u64 runtime = prev->se.sum_exec_runtime; - update_avg(&p->se.avg_running, runtime); + runtime -= prev->se.prev_sum_exec_runtime; + runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); - if (p->state == TASK_RUNNING) { /* * In order to avoid avg_overlap growing stale when we are * indeed overlapping and hence not getting put to sleep, grow @@ -5395,12 +5395,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p) * correlates to the amount of cache footprint a task can * build up. */ - runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); - update_avg(&p->se.avg_overlap, runtime); - } else { - update_avg(&p->se.avg_running, 0); + update_avg(&prev->se.avg_overlap, runtime); } - p->sched_class->put_prev_task(rq, p); + prev->sched_class->put_prev_task(rq, prev); } /* diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 6988cf08f705..5fda66615fee 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -399,7 +399,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) PN(se.sum_exec_runtime); PN(se.avg_overlap); PN(se.avg_wakeup); - PN(se.avg_running); nr_switches = p->nvcsw + p->nivcsw; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 76b5792c4198..e9f5daee12c7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1689,9 +1689,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ pse->avg_overlap < sysctl_sched_migration_cost) goto preempt; - if (sched_feat(WAKEUP_RUNNING) && pse->avg_running < se->avg_running) - goto preempt; - if (!sched_feat(WAKEUP_PREEMPT)) return; diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 0d94083582c7..d5059fd761d9 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -53,11 +53,6 @@ SCHED_FEAT(WAKEUP_SYNC, 0) */ SCHED_FEAT(WAKEUP_OVERLAP, 0) -/* - * Wakeup preemption towards tasks that run short - */ -SCHED_FEAT(WAKEUP_RUNNING, 0) - /* * Use the SYNC wakeup hint, pipes and the likes use this to indicate * the remote end is likely to consume the data we just wrote, and -- cgit v1.2.3 From 1983a922a1bc843806b9a36cf3a370b242783140 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Mon, 30 Nov 2009 12:16:47 +0100 Subject: sched: Make tunable scaling style configurable As scaling now takes place on all kind of cpu add/remove events a user that configures values via proc should be able to configure if his set values are still rescaled or kept whatever happens. As the comments state that log2 was just a second guess that worked the interface is not just designed for on/off, but to choose a scaling type. Currently this allows none, log and linear, but more important it allwos us to keep the interface even if someone has an even better idea how to scale the values. Signed-off-by: Christian Ehrhardt Signed-off-by: Peter Zijlstra LKML-Reference: <1259579808-11357-3-git-send-email-ehrhardt@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 11 ++++++++++- kernel/sched.c | 15 ++++++++++++++- kernel/sched_debug.c | 10 ++++++++++ kernel/sched_fair.c | 13 +++++++++++++ kernel/sysctl.c | 14 ++++++++++++++ 5 files changed, 61 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 4b1ebd3280c6..ee9f200d12d3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1902,13 +1902,22 @@ extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_shares_ratelimit; extern unsigned int sysctl_sched_shares_thresh; extern unsigned int sysctl_sched_child_runs_first; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE, + SCHED_TUNABLESCALING_LOG, + SCHED_TUNABLESCALING_LINEAR, + SCHED_TUNABLESCALING_END, +}; +extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; + #ifdef CONFIG_SCHED_DEBUG extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_time_avg; extern unsigned int sysctl_timer_migration; -int sched_nr_latency_handler(struct ctl_table *table, int write, +int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif diff --git a/kernel/sched.c b/kernel/sched.c index b54ecf84b6be..116efed962c6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7033,7 +7033,20 @@ cpumask_var_t nohz_cpu_mask; static void update_sysctl(void) { unsigned int cpus = min(num_online_cpus(), 8U); - unsigned int factor = 1 + ilog2(cpus); + unsigned int factor; + + switch (sysctl_sched_tunable_scaling) { + case SCHED_TUNABLESCALING_NONE: + factor = 1; + break; + case SCHED_TUNABLESCALING_LINEAR: + factor = cpus; + break; + case SCHED_TUNABLESCALING_LOG: + default: + factor = 1 + ilog2(cpus); + break; + } #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 5fda66615fee..0fc5287fe80f 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -309,6 +309,12 @@ static void print_cpu(struct seq_file *m, int cpu) print_rq(m, rq, cpu); } +static const char *sched_tunable_scaling_names[] = { + "none", + "logaritmic", + "linear" +}; + static int sched_debug_show(struct seq_file *m, void *v) { u64 now = ktime_to_ns(ktime_get()); @@ -334,6 +340,10 @@ static int sched_debug_show(struct seq_file *m, void *v) #undef PN #undef P + SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling", + sysctl_sched_tunable_scaling, + sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); + for_each_online_cpu(cpu) print_cpu(m, cpu); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 71b3458245e5..455106d318a8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -21,6 +21,7 @@ */ #include +#include /* * Targeted preemption latency for CPU-bound tasks: @@ -37,6 +38,18 @@ unsigned int sysctl_sched_latency = 5000000ULL; unsigned int normalized_sysctl_sched_latency = 5000000ULL; +/* + * The initial- and re-scaling of tunables is configurable + * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + * + * Options are: + * SCHED_TUNABLESCALING_NONE - unscaled, always *1 + * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) + * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus + */ +enum sched_tunable_scaling sysctl_sched_tunable_scaling + = SCHED_TUNABLESCALING_LOG; + /* * Minimal preemption granularity for CPU-bound tasks: * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e5cc53514caa..d10406e5fdfe 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -251,6 +251,8 @@ static int min_sched_granularity_ns = 100000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ +static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; +static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1; #endif static struct ctl_table kern_table[] = { @@ -304,6 +306,18 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sched_tunable_scaling", + .data = &sysctl_sched_tunable_scaling, + .maxlen = sizeof(enum sched_tunable_scaling), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &min_sched_tunable_scaling, + .extra2 = &max_sched_tunable_scaling, + }, + { .ctl_name = CTL_UNNUMBERED, .procname = "sched_shares_thresh", -- cgit v1.2.3 From 91773a00f8235e4b697217867529f73e298298df Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Mon, 3 Aug 2009 15:06:36 +0300 Subject: OMAP: OMAPFB: split omapfb.h Split arch/arm/plat-omap/include/mach/omapfb.h into two files: include/linux/omapfb.h - ioctls etc for userspace and some kernel stuff for board files drivers/video/omap/omapfb.h - for omapfb internal use This cleans up omapfb.h and also makes it easier for the upcoming new DSS driver to co-exist with the old driver. Signed-off-by: Tomi Valkeinen Acked-by: Tony Lindgren --- arch/arm/mach-omap1/board-nokia770.c | 2 +- arch/arm/mach-omap2/io.c | 2 +- arch/arm/plat-omap/fb.c | 2 +- arch/arm/plat-omap/include/plat/omapfb.h | 398 ------------------------------- drivers/video/omap/blizzard.c | 2 +- drivers/video/omap/dispc.c | 2 +- drivers/video/omap/hwa742.c | 3 +- drivers/video/omap/lcd_2430sdp.c | 3 +- drivers/video/omap/lcd_ams_delta.c | 3 +- drivers/video/omap/lcd_apollon.c | 3 +- drivers/video/omap/lcd_h3.c | 2 +- drivers/video/omap/lcd_h4.c | 2 +- drivers/video/omap/lcd_htcherald.c | 2 +- drivers/video/omap/lcd_inn1510.c | 2 +- drivers/video/omap/lcd_inn1610.c | 2 +- drivers/video/omap/lcd_ldp.c | 3 +- drivers/video/omap/lcd_mipid.c | 3 +- drivers/video/omap/lcd_omap2evm.c | 3 +- drivers/video/omap/lcd_omap3beagle.c | 4 +- drivers/video/omap/lcd_omap3evm.c | 3 +- drivers/video/omap/lcd_osk.c | 2 +- drivers/video/omap/lcd_overo.c | 3 +- drivers/video/omap/lcd_palmte.c | 2 +- drivers/video/omap/lcd_palmtt.c | 2 +- drivers/video/omap/lcd_palmz71.c | 2 +- drivers/video/omap/lcdc.c | 3 +- drivers/video/omap/omapfb.h | 227 ++++++++++++++++++ drivers/video/omap/omapfb_main.c | 2 +- drivers/video/omap/rfbi.c | 3 +- drivers/video/omap/sossi.c | 3 +- include/linux/omapfb.h | 197 +++++++++++++++ 31 files changed, 465 insertions(+), 427 deletions(-) delete mode 100644 arch/arm/plat-omap/include/plat/omapfb.h create mode 100644 drivers/video/omap/omapfb.h create mode 100644 include/linux/omapfb.h (limited to 'include') diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index 5a275bab2dfe..71e1a3fad0ea 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -32,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 59d28b2fd8c5..9f22c201ef9d 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -22,13 +22,13 @@ #include #include #include +#include #include #include #include -#include #include #include #include diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 78a4ce538dbd..18cf1d4b7931 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -28,13 +28,13 @@ #include #include #include +#include #include #include #include #include -#include #if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) diff --git a/arch/arm/plat-omap/include/plat/omapfb.h b/arch/arm/plat-omap/include/plat/omapfb.h deleted file mode 100644 index bfef7ab95f17..000000000000 --- a/arch/arm/plat-omap/include/plat/omapfb.h +++ /dev/null @@ -1,398 +0,0 @@ -/* - * File: arch/arm/plat-omap/include/mach/omapfb.h - * - * Framebuffer driver for TI OMAP boards - * - * Copyright (C) 2004 Nokia Corporation - * Author: Imre Deak - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#ifndef __OMAPFB_H -#define __OMAPFB_H - -#include -#include - -/* IOCTL commands. */ - -#define OMAP_IOW(num, dtype) _IOW('O', num, dtype) -#define OMAP_IOR(num, dtype) _IOR('O', num, dtype) -#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype) -#define OMAP_IO(num) _IO('O', num) - -#define OMAPFB_MIRROR OMAP_IOW(31, int) -#define OMAPFB_SYNC_GFX OMAP_IO(37) -#define OMAPFB_VSYNC OMAP_IO(38) -#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int) -#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps) -#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int) -#define OMAPFB_LCD_TEST OMAP_IOW(45, int) -#define OMAPFB_CTRL_TEST OMAP_IOW(46, int) -#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old) -#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key) -#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key) -#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info) -#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info) -#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window) -#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info) -#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info) - -#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff -#define OMAPFB_CAPS_LCDC_MASK 0x00fff000 -#define OMAPFB_CAPS_PANEL_MASK 0xff000000 - -#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000 -#define OMAPFB_CAPS_TEARSYNC 0x00002000 -#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000 -#define OMAPFB_CAPS_PLANE_SCALE 0x00008000 -#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000 -#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000 -#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000 -#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000 -#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000 - -/* Values from DSP must map to lower 16-bits */ -#define OMAPFB_FORMAT_MASK 0x00ff -#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100 -#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200 -#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400 -#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800 -#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000 - -#define OMAPFB_EVENT_READY 1 -#define OMAPFB_EVENT_DISABLED 2 - -#define OMAPFB_MEMTYPE_SDRAM 0 -#define OMAPFB_MEMTYPE_SRAM 1 -#define OMAPFB_MEMTYPE_MAX 1 - -enum omapfb_color_format { - OMAPFB_COLOR_RGB565 = 0, - OMAPFB_COLOR_YUV422, - OMAPFB_COLOR_YUV420, - OMAPFB_COLOR_CLUT_8BPP, - OMAPFB_COLOR_CLUT_4BPP, - OMAPFB_COLOR_CLUT_2BPP, - OMAPFB_COLOR_CLUT_1BPP, - OMAPFB_COLOR_RGB444, - OMAPFB_COLOR_YUY422, -}; - -struct omapfb_update_window { - __u32 x, y; - __u32 width, height; - __u32 format; - __u32 out_x, out_y; - __u32 out_width, out_height; - __u32 reserved[8]; -}; - -struct omapfb_update_window_old { - __u32 x, y; - __u32 width, height; - __u32 format; -}; - -enum omapfb_plane { - OMAPFB_PLANE_GFX = 0, - OMAPFB_PLANE_VID1, - OMAPFB_PLANE_VID2, -}; - -enum omapfb_channel_out { - OMAPFB_CHANNEL_OUT_LCD = 0, - OMAPFB_CHANNEL_OUT_DIGIT, -}; - -struct omapfb_plane_info { - __u32 pos_x; - __u32 pos_y; - __u8 enabled; - __u8 channel_out; - __u8 mirror; - __u8 reserved1; - __u32 out_width; - __u32 out_height; - __u32 reserved2[12]; -}; - -struct omapfb_mem_info { - __u32 size; - __u8 type; - __u8 reserved[3]; -}; - -struct omapfb_caps { - __u32 ctrl; - __u32 plane_color; - __u32 wnd_color; -}; - -enum omapfb_color_key_type { - OMAPFB_COLOR_KEY_DISABLED = 0, - OMAPFB_COLOR_KEY_GFX_DST, - OMAPFB_COLOR_KEY_VID_SRC, -}; - -struct omapfb_color_key { - __u8 channel_out; - __u32 background; - __u32 trans_key; - __u8 key_type; -}; - -enum omapfb_update_mode { - OMAPFB_UPDATE_DISABLED = 0, - OMAPFB_AUTO_UPDATE, - OMAPFB_MANUAL_UPDATE -}; - -#ifdef __KERNEL__ - -#include -#include -#include -#include - -#include - -#define OMAP_LCDC_INV_VSYNC 0x0001 -#define OMAP_LCDC_INV_HSYNC 0x0002 -#define OMAP_LCDC_INV_PIX_CLOCK 0x0004 -#define OMAP_LCDC_INV_OUTPUT_EN 0x0008 -#define OMAP_LCDC_HSVS_RISING_EDGE 0x0010 -#define OMAP_LCDC_HSVS_OPPOSITE 0x0020 - -#define OMAP_LCDC_SIGNAL_MASK 0x003f - -#define OMAP_LCDC_PANEL_TFT 0x0100 - -#define OMAPFB_PLANE_XRES_MIN 8 -#define OMAPFB_PLANE_YRES_MIN 8 - -#ifdef CONFIG_ARCH_OMAP1 -#define OMAPFB_PLANE_NUM 1 -#else -#define OMAPFB_PLANE_NUM 3 -#endif - -struct omapfb_device; - -struct lcd_panel { - const char *name; - int config; /* TFT/STN, signal inversion */ - int bpp; /* Pixel format in fb mem */ - int data_lines; /* Lines on LCD HW interface */ - - int x_res, y_res; - int pixel_clock; /* In kHz */ - int hsw; /* Horizontal synchronization - pulse width */ - int hfp; /* Horizontal front porch */ - int hbp; /* Horizontal back porch */ - int vsw; /* Vertical synchronization - pulse width */ - int vfp; /* Vertical front porch */ - int vbp; /* Vertical back porch */ - int acb; /* ac-bias pin frequency */ - int pcd; /* pixel clock divider. - Obsolete use pixel_clock instead */ - - int (*init) (struct lcd_panel *panel, - struct omapfb_device *fbdev); - void (*cleanup) (struct lcd_panel *panel); - int (*enable) (struct lcd_panel *panel); - void (*disable) (struct lcd_panel *panel); - unsigned long (*get_caps) (struct lcd_panel *panel); - int (*set_bklight_level)(struct lcd_panel *panel, - unsigned int level); - unsigned int (*get_bklight_level)(struct lcd_panel *panel); - unsigned int (*get_bklight_max) (struct lcd_panel *panel); - int (*run_test) (struct lcd_panel *panel, int test_num); -}; - -struct extif_timings { - int cs_on_time; - int cs_off_time; - int we_on_time; - int we_off_time; - int re_on_time; - int re_off_time; - int we_cycle_time; - int re_cycle_time; - int cs_pulse_width; - int access_time; - - int clk_div; - - u32 tim[5]; /* set by extif->convert_timings */ - - int converted; -}; - -struct lcd_ctrl_extif { - int (*init) (struct omapfb_device *fbdev); - void (*cleanup) (void); - void (*get_clk_info) (u32 *clk_period, u32 *max_clk_div); - unsigned long (*get_max_tx_rate)(void); - int (*convert_timings) (struct extif_timings *timings); - void (*set_timings) (const struct extif_timings *timings); - void (*set_bits_per_cycle)(int bpc); - void (*write_command) (const void *buf, unsigned int len); - void (*read_data) (void *buf, unsigned int len); - void (*write_data) (const void *buf, unsigned int len); - void (*transfer_area) (int width, int height, - void (callback)(void * data), void *data); - int (*setup_tearsync) (unsigned pin_cnt, - unsigned hs_pulse_time, unsigned vs_pulse_time, - int hs_pol_inv, int vs_pol_inv, int div); - int (*enable_tearsync) (int enable, unsigned line); - - unsigned long max_transmit_size; -}; - -struct omapfb_notifier_block { - struct notifier_block nb; - void *data; - int plane_idx; -}; - -typedef int (*omapfb_notifier_callback_t)(struct notifier_block *, - unsigned long event, - void *fbi); - -struct omapfb_mem_region { - u32 paddr; - void __iomem *vaddr; - unsigned long size; - u8 type; /* OMAPFB_PLANE_MEM_* */ - unsigned alloc:1; /* allocated by the driver */ - unsigned map:1; /* kernel mapped by the driver */ -}; - -struct omapfb_mem_desc { - int region_cnt; - struct omapfb_mem_region region[OMAPFB_PLANE_NUM]; -}; - -struct lcd_ctrl { - const char *name; - void *data; - - int (*init) (struct omapfb_device *fbdev, - int ext_mode, - struct omapfb_mem_desc *req_md); - void (*cleanup) (void); - void (*bind_client) (struct omapfb_notifier_block *nb); - void (*get_caps) (int plane, struct omapfb_caps *caps); - int (*set_update_mode)(enum omapfb_update_mode mode); - enum omapfb_update_mode (*get_update_mode)(void); - int (*setup_plane) (int plane, int channel_out, - unsigned long offset, - int screen_width, - int pos_x, int pos_y, int width, - int height, int color_mode); - int (*set_rotate) (int angle); - int (*setup_mem) (int plane, size_t size, - int mem_type, unsigned long *paddr); - int (*mmap) (struct fb_info *info, - struct vm_area_struct *vma); - int (*set_scale) (int plane, - int orig_width, int orig_height, - int out_width, int out_height); - int (*enable_plane) (int plane, int enable); - int (*update_window) (struct fb_info *fbi, - struct omapfb_update_window *win, - void (*callback)(void *), - void *callback_data); - void (*sync) (void); - void (*suspend) (void); - void (*resume) (void); - int (*run_test) (int test_num); - int (*setcolreg) (u_int regno, u16 red, u16 green, - u16 blue, u16 transp, - int update_hw_mem); - int (*set_color_key) (struct omapfb_color_key *ck); - int (*get_color_key) (struct omapfb_color_key *ck); -}; - -enum omapfb_state { - OMAPFB_DISABLED = 0, - OMAPFB_SUSPENDED= 99, - OMAPFB_ACTIVE = 100 -}; - -struct omapfb_plane_struct { - int idx; - struct omapfb_plane_info info; - enum omapfb_color_format color_mode; - struct omapfb_device *fbdev; -}; - -struct omapfb_device { - int state; - int ext_lcdc; /* Using external - LCD controller */ - struct mutex rqueue_mutex; - - int palette_size; - u32 pseudo_palette[17]; - - struct lcd_panel *panel; /* LCD panel */ - const struct lcd_ctrl *ctrl; /* LCD controller */ - const struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */ - struct lcd_ctrl_extif *ext_if; /* LCD ctrl external - interface */ - struct device *dev; - struct fb_var_screeninfo new_var; /* for mode changes */ - - struct omapfb_mem_desc mem_desc; - struct fb_info *fb_info[OMAPFB_PLANE_NUM]; -}; - -struct omapfb_platform_data { - struct omap_lcd_config lcd; - struct omapfb_mem_desc mem_desc; - void *ctrl_platform_data; -}; - -#ifdef CONFIG_ARCH_OMAP1 -extern struct lcd_ctrl omap1_lcd_ctrl; -#else -extern struct lcd_ctrl omap2_disp_ctrl; -#endif - -extern void omapfb_reserve_sdram(void); -extern void omapfb_register_panel(struct lcd_panel *panel); -extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval); -extern void omapfb_notify_clients(struct omapfb_device *fbdev, - unsigned long event); -extern int omapfb_register_client(struct omapfb_notifier_block *nb, - omapfb_notifier_callback_t callback, - void *callback_data); -extern int omapfb_unregister_client(struct omapfb_notifier_block *nb); -extern int omapfb_update_window_async(struct fb_info *fbi, - struct omapfb_update_window *win, - void (*callback)(void *), - void *callback_data); - -/* in arch/arm/plat-omap/fb.c */ -extern void omapfb_set_ctrl_platform_data(void *pdata); - -#endif /* __KERNEL__ */ - -#endif /* __OMAPFB_H */ diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c index f5d75f22cef9..2ffb34af4c59 100644 --- a/drivers/video/omap/blizzard.c +++ b/drivers/video/omap/blizzard.c @@ -27,9 +27,9 @@ #include #include -#include #include +#include "omapfb.h" #include "dispc.h" #define MODULE_NAME "blizzard" diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index 7c833db4f9b7..b8f75a7936a2 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c @@ -26,9 +26,9 @@ #include #include -#include #include +#include "omapfb.h" #include "dispc.h" #define MODULE_NAME "dispc" diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c index 17a975e4c9c9..0016f77cd13f 100644 --- a/drivers/video/omap/hwa742.c +++ b/drivers/video/omap/hwa742.c @@ -25,10 +25,11 @@ #include #include #include +#include #include -#include #include +#include "omapfb.h" #define HWA742_REV_CODE_REG 0x0 #define HWA742_CONFIG_REG 0x2 diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c index fea7feee0b77..760645d9dbb6 100644 --- a/drivers/video/omap/lcd_2430sdp.c +++ b/drivers/video/omap/lcd_2430sdp.c @@ -28,9 +28,10 @@ #include #include -#include #include +#include "omapfb.h" + #define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91 #define SDP2430_LCD_PANEL_ENABLE_GPIO 154 #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24 diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c index 3d5277252ca3..9340ca3c1c81 100644 --- a/drivers/video/omap/lcd_ams_delta.c +++ b/drivers/video/omap/lcd_ams_delta.c @@ -27,7 +27,8 @@ #include #include -#include + +#include "omapfb.h" #define AMS_DELTA_DEFAULT_CONTRAST 112 diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c index 4c5cefc5153b..2be94eb3bbf5 100644 --- a/drivers/video/omap/lcd_apollon.c +++ b/drivers/video/omap/lcd_apollon.c @@ -26,7 +26,8 @@ #include #include -#include + +#include "omapfb.h" /* #define USE_35INCH_LCD 1 */ diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c index 240b4fb10741..8df688748b5a 100644 --- a/drivers/video/omap/lcd_h3.c +++ b/drivers/video/omap/lcd_h3.c @@ -24,7 +24,7 @@ #include #include -#include +#include "omapfb.h" #define MODULE_NAME "omapfb-lcd_h3" diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c index 720625da1f4e..03a06a982750 100644 --- a/drivers/video/omap/lcd_h4.c +++ b/drivers/video/omap/lcd_h4.c @@ -22,7 +22,7 @@ #include #include -#include +#include "omapfb.h" static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c index 2e0c81ea7483..a9007c5d1fad 100644 --- a/drivers/video/omap/lcd_htcherald.c +++ b/drivers/video/omap/lcd_htcherald.c @@ -29,7 +29,7 @@ #include #include -#include +#include "omapfb.h" static int htcherald_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c index aafe9b497e2d..3271f1643b26 100644 --- a/drivers/video/omap/lcd_inn1510.c +++ b/drivers/video/omap/lcd_inn1510.c @@ -24,7 +24,7 @@ #include #include -#include +#include "omapfb.h" static int innovator1510_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c index 0de338264a8a..9fff86f67bde 100644 --- a/drivers/video/omap/lcd_inn1610.c +++ b/drivers/video/omap/lcd_inn1610.c @@ -23,7 +23,7 @@ #include #include -#include +#include "omapfb.h" #define MODULE_NAME "omapfb-lcd_h3" diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c index 6a260dfdadc5..5bb7f6f14601 100644 --- a/drivers/video/omap/lcd_ldp.c +++ b/drivers/video/omap/lcd_ldp.c @@ -28,9 +28,10 @@ #include #include -#include #include +#include "omapfb.h" + #define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES) #define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES) diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c index 2162eb09e0fe..46ca90d1d177 100644 --- a/drivers/video/omap/lcd_mipid.c +++ b/drivers/video/omap/lcd_mipid.c @@ -23,9 +23,10 @@ #include #include -#include #include +#include "omapfb.h" + #define MIPID_MODULE_NAME "lcd_mipid" #define MIPID_CMD_READ_DISP_ID 0x04 diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c index e1a38abca3e7..006c2fe7360e 100644 --- a/drivers/video/omap/lcd_omap2evm.c +++ b/drivers/video/omap/lcd_omap2evm.c @@ -27,9 +27,10 @@ #include #include -#include #include +#include "omapfb.h" + #define LCD_PANEL_ENABLE_GPIO 154 #define LCD_PANEL_LR 128 #define LCD_PANEL_UD 129 diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c index ccec084ed647..fc503d8f3c24 100644 --- a/drivers/video/omap/lcd_omap3beagle.c +++ b/drivers/video/omap/lcd_omap3beagle.c @@ -26,9 +26,11 @@ #include #include -#include +#include #include +#include "omapfb.h" + #define LCD_PANEL_ENABLE_GPIO 170 static int omap3beagle_panel_init(struct lcd_panel *panel, diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c index 556eb31db24c..ae2edc4081a8 100644 --- a/drivers/video/omap/lcd_omap3evm.c +++ b/drivers/video/omap/lcd_omap3evm.c @@ -26,9 +26,10 @@ #include #include -#include #include +#include "omapfb.h" + #define LCD_PANEL_ENABLE_GPIO 153 #define LCD_PANEL_LR 2 #define LCD_PANEL_UD 3 diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c index bb21d7dca39e..b87e8b83f29c 100644 --- a/drivers/video/omap/lcd_osk.c +++ b/drivers/video/omap/lcd_osk.c @@ -25,7 +25,7 @@ #include #include -#include +#include "omapfb.h" static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c index b0f86e514cde..56ee192e9ee2 100644 --- a/drivers/video/omap/lcd_overo.c +++ b/drivers/video/omap/lcd_overo.c @@ -25,9 +25,10 @@ #include #include -#include #include +#include "omapfb.h" + #define LCD_ENABLE 144 static int overo_panel_init(struct lcd_panel *panel, diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c index d30289603ce8..4cb301750d02 100644 --- a/drivers/video/omap/lcd_palmte.c +++ b/drivers/video/omap/lcd_palmte.c @@ -24,7 +24,7 @@ #include #include -#include +#include "omapfb.h" static int palmte_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c index 557424fb6df1..ff0e6d7ab3a2 100644 --- a/drivers/video/omap/lcd_palmtt.c +++ b/drivers/video/omap/lcd_palmtt.c @@ -30,7 +30,7 @@ GPIO13 - screen blanking #include #include -#include +#include "omapfb.h" static int palmtt_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c index 5f4b5b2c1f41..2334e56536bc 100644 --- a/drivers/video/omap/lcd_palmz71.c +++ b/drivers/video/omap/lcd_palmz71.c @@ -24,7 +24,7 @@ #include #include -#include +#include "omapfb.h" static int palmz71_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c index 5f32cafbf74c..b831e1df629e 100644 --- a/drivers/video/omap/lcdc.c +++ b/drivers/video/omap/lcdc.c @@ -30,10 +30,11 @@ #include #include -#include #include +#include "omapfb.h" + #include "lcdc.h" #define MODULE_NAME "lcdc" diff --git a/drivers/video/omap/omapfb.h b/drivers/video/omap/omapfb.h new file mode 100644 index 000000000000..46e4714014e8 --- /dev/null +++ b/drivers/video/omap/omapfb.h @@ -0,0 +1,227 @@ +/* + * File: drivers/video/omap/omapfb.h + * + * Framebuffer driver for TI OMAP boards + * + * Copyright (C) 2004 Nokia Corporation + * Author: Imre Deak + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __OMAPFB_H +#define __OMAPFB_H + +#include +#include +#include + +#define OMAPFB_EVENT_READY 1 +#define OMAPFB_EVENT_DISABLED 2 + +#define OMAP_LCDC_INV_VSYNC 0x0001 +#define OMAP_LCDC_INV_HSYNC 0x0002 +#define OMAP_LCDC_INV_PIX_CLOCK 0x0004 +#define OMAP_LCDC_INV_OUTPUT_EN 0x0008 +#define OMAP_LCDC_HSVS_RISING_EDGE 0x0010 +#define OMAP_LCDC_HSVS_OPPOSITE 0x0020 + +#define OMAP_LCDC_SIGNAL_MASK 0x003f + +#define OMAP_LCDC_PANEL_TFT 0x0100 + +#define OMAPFB_PLANE_XRES_MIN 8 +#define OMAPFB_PLANE_YRES_MIN 8 + +struct omapfb_device; + +struct lcd_panel { + const char *name; + int config; /* TFT/STN, signal inversion */ + int bpp; /* Pixel format in fb mem */ + int data_lines; /* Lines on LCD HW interface */ + + int x_res, y_res; + int pixel_clock; /* In kHz */ + int hsw; /* Horizontal synchronization + pulse width */ + int hfp; /* Horizontal front porch */ + int hbp; /* Horizontal back porch */ + int vsw; /* Vertical synchronization + pulse width */ + int vfp; /* Vertical front porch */ + int vbp; /* Vertical back porch */ + int acb; /* ac-bias pin frequency */ + int pcd; /* pixel clock divider. + Obsolete use pixel_clock instead */ + + int (*init) (struct lcd_panel *panel, + struct omapfb_device *fbdev); + void (*cleanup) (struct lcd_panel *panel); + int (*enable) (struct lcd_panel *panel); + void (*disable) (struct lcd_panel *panel); + unsigned long (*get_caps) (struct lcd_panel *panel); + int (*set_bklight_level)(struct lcd_panel *panel, + unsigned int level); + unsigned int (*get_bklight_level)(struct lcd_panel *panel); + unsigned int (*get_bklight_max) (struct lcd_panel *panel); + int (*run_test) (struct lcd_panel *panel, int test_num); +}; + +struct extif_timings { + int cs_on_time; + int cs_off_time; + int we_on_time; + int we_off_time; + int re_on_time; + int re_off_time; + int we_cycle_time; + int re_cycle_time; + int cs_pulse_width; + int access_time; + + int clk_div; + + u32 tim[5]; /* set by extif->convert_timings */ + + int converted; +}; + +struct lcd_ctrl_extif { + int (*init) (struct omapfb_device *fbdev); + void (*cleanup) (void); + void (*get_clk_info) (u32 *clk_period, u32 *max_clk_div); + unsigned long (*get_max_tx_rate)(void); + int (*convert_timings) (struct extif_timings *timings); + void (*set_timings) (const struct extif_timings *timings); + void (*set_bits_per_cycle)(int bpc); + void (*write_command) (const void *buf, unsigned int len); + void (*read_data) (void *buf, unsigned int len); + void (*write_data) (const void *buf, unsigned int len); + void (*transfer_area) (int width, int height, + void (callback)(void *data), void *data); + int (*setup_tearsync) (unsigned pin_cnt, + unsigned hs_pulse_time, unsigned vs_pulse_time, + int hs_pol_inv, int vs_pol_inv, int div); + int (*enable_tearsync) (int enable, unsigned line); + + unsigned long max_transmit_size; +}; + +struct omapfb_notifier_block { + struct notifier_block nb; + void *data; + int plane_idx; +}; + +typedef int (*omapfb_notifier_callback_t)(struct notifier_block *, + unsigned long event, + void *fbi); + +struct lcd_ctrl { + const char *name; + void *data; + + int (*init) (struct omapfb_device *fbdev, + int ext_mode, + struct omapfb_mem_desc *req_md); + void (*cleanup) (void); + void (*bind_client) (struct omapfb_notifier_block *nb); + void (*get_caps) (int plane, struct omapfb_caps *caps); + int (*set_update_mode)(enum omapfb_update_mode mode); + enum omapfb_update_mode (*get_update_mode)(void); + int (*setup_plane) (int plane, int channel_out, + unsigned long offset, + int screen_width, + int pos_x, int pos_y, int width, + int height, int color_mode); + int (*set_rotate) (int angle); + int (*setup_mem) (int plane, size_t size, + int mem_type, unsigned long *paddr); + int (*mmap) (struct fb_info *info, + struct vm_area_struct *vma); + int (*set_scale) (int plane, + int orig_width, int orig_height, + int out_width, int out_height); + int (*enable_plane) (int plane, int enable); + int (*update_window) (struct fb_info *fbi, + struct omapfb_update_window *win, + void (*callback)(void *), + void *callback_data); + void (*sync) (void); + void (*suspend) (void); + void (*resume) (void); + int (*run_test) (int test_num); + int (*setcolreg) (u_int regno, u16 red, u16 green, + u16 blue, u16 transp, + int update_hw_mem); + int (*set_color_key) (struct omapfb_color_key *ck); + int (*get_color_key) (struct omapfb_color_key *ck); +}; + +enum omapfb_state { + OMAPFB_DISABLED = 0, + OMAPFB_SUSPENDED = 99, + OMAPFB_ACTIVE = 100 +}; + +struct omapfb_plane_struct { + int idx; + struct omapfb_plane_info info; + enum omapfb_color_format color_mode; + struct omapfb_device *fbdev; +}; + +struct omapfb_device { + int state; + int ext_lcdc; /* Using external + LCD controller */ + struct mutex rqueue_mutex; + + int palette_size; + u32 pseudo_palette[17]; + + struct lcd_panel *panel; /* LCD panel */ + const struct lcd_ctrl *ctrl; /* LCD controller */ + const struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */ + struct lcd_ctrl_extif *ext_if; /* LCD ctrl external + interface */ + struct device *dev; + struct fb_var_screeninfo new_var; /* for mode changes */ + + struct omapfb_mem_desc mem_desc; + struct fb_info *fb_info[OMAPFB_PLANE_NUM]; +}; + +#ifdef CONFIG_ARCH_OMAP1 +extern struct lcd_ctrl omap1_lcd_ctrl; +#else +extern struct lcd_ctrl omap2_disp_ctrl; +#endif + +extern void omapfb_register_panel(struct lcd_panel *panel); +extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval); +extern void omapfb_notify_clients(struct omapfb_device *fbdev, + unsigned long event); +extern int omapfb_register_client(struct omapfb_notifier_block *nb, + omapfb_notifier_callback_t callback, + void *callback_data); +extern int omapfb_unregister_client(struct omapfb_notifier_block *nb); +extern int omapfb_update_window_async(struct fb_info *fbi, + struct omapfb_update_window *win, + void (*callback)(void *), + void *callback_data); + +#endif /* __OMAPFB_H */ diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index f900a43db8d7..c7f59a5ccdbc 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -29,8 +29,8 @@ #include #include -#include +#include "omapfb.h" #include "lcdc.h" #include "dispc.h" diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c index c90fa39486b4..fed7b1bda19c 100644 --- a/drivers/video/omap/rfbi.c +++ b/drivers/video/omap/rfbi.c @@ -27,8 +27,7 @@ #include #include -#include - +#include "omapfb.h" #include "dispc.h" /* To work around an RFBI transfer rate limitation */ diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c index 79dc84f09713..8fb7c708f563 100644 --- a/drivers/video/omap/sossi.c +++ b/drivers/video/omap/sossi.c @@ -23,10 +23,11 @@ #include #include #include +#include #include -#include +#include "omapfb.h" #include "lcdc.h" #define MODULE_NAME "omapfb-sossi" diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h new file mode 100644 index 000000000000..a8efa92c6c35 --- /dev/null +++ b/include/linux/omapfb.h @@ -0,0 +1,197 @@ +/* + * File: include/linux/omapfb.h + * + * Framebuffer driver for TI OMAP boards + * + * Copyright (C) 2004 Nokia Corporation + * Author: Imre Deak + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __LINUX_OMAPFB_H__ +#define __LINUX_OMAPFB_H__ + +#include +#include + +/* IOCTL commands. */ + +#define OMAP_IOW(num, dtype) _IOW('O', num, dtype) +#define OMAP_IOR(num, dtype) _IOR('O', num, dtype) +#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype) +#define OMAP_IO(num) _IO('O', num) + +#define OMAPFB_MIRROR OMAP_IOW(31, int) +#define OMAPFB_SYNC_GFX OMAP_IO(37) +#define OMAPFB_VSYNC OMAP_IO(38) +#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int) +#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps) +#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int) +#define OMAPFB_LCD_TEST OMAP_IOW(45, int) +#define OMAPFB_CTRL_TEST OMAP_IOW(46, int) +#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old) +#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key) +#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key) +#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info) +#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info) +#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window) +#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info) +#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info) + +#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff +#define OMAPFB_CAPS_LCDC_MASK 0x00fff000 +#define OMAPFB_CAPS_PANEL_MASK 0xff000000 + +#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000 +#define OMAPFB_CAPS_TEARSYNC 0x00002000 +#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000 +#define OMAPFB_CAPS_PLANE_SCALE 0x00008000 +#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000 +#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000 +#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000 +#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000 +#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000 + +/* Values from DSP must map to lower 16-bits */ +#define OMAPFB_FORMAT_MASK 0x00ff +#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100 +#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200 +#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400 +#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800 +#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000 + +#define OMAPFB_MEMTYPE_SDRAM 0 +#define OMAPFB_MEMTYPE_SRAM 1 +#define OMAPFB_MEMTYPE_MAX 1 + +enum omapfb_color_format { + OMAPFB_COLOR_RGB565 = 0, + OMAPFB_COLOR_YUV422, + OMAPFB_COLOR_YUV420, + OMAPFB_COLOR_CLUT_8BPP, + OMAPFB_COLOR_CLUT_4BPP, + OMAPFB_COLOR_CLUT_2BPP, + OMAPFB_COLOR_CLUT_1BPP, + OMAPFB_COLOR_RGB444, + OMAPFB_COLOR_YUY422, +}; + +struct omapfb_update_window { + __u32 x, y; + __u32 width, height; + __u32 format; + __u32 out_x, out_y; + __u32 out_width, out_height; + __u32 reserved[8]; +}; + +struct omapfb_update_window_old { + __u32 x, y; + __u32 width, height; + __u32 format; +}; + +enum omapfb_plane { + OMAPFB_PLANE_GFX = 0, + OMAPFB_PLANE_VID1, + OMAPFB_PLANE_VID2, +}; + +enum omapfb_channel_out { + OMAPFB_CHANNEL_OUT_LCD = 0, + OMAPFB_CHANNEL_OUT_DIGIT, +}; + +struct omapfb_plane_info { + __u32 pos_x; + __u32 pos_y; + __u8 enabled; + __u8 channel_out; + __u8 mirror; + __u8 reserved1; + __u32 out_width; + __u32 out_height; + __u32 reserved2[12]; +}; + +struct omapfb_mem_info { + __u32 size; + __u8 type; + __u8 reserved[3]; +}; + +struct omapfb_caps { + __u32 ctrl; + __u32 plane_color; + __u32 wnd_color; +}; + +enum omapfb_color_key_type { + OMAPFB_COLOR_KEY_DISABLED = 0, + OMAPFB_COLOR_KEY_GFX_DST, + OMAPFB_COLOR_KEY_VID_SRC, +}; + +struct omapfb_color_key { + __u8 channel_out; + __u32 background; + __u32 trans_key; + __u8 key_type; +}; + +enum omapfb_update_mode { + OMAPFB_UPDATE_DISABLED = 0, + OMAPFB_AUTO_UPDATE, + OMAPFB_MANUAL_UPDATE +}; + +#ifdef __KERNEL__ + +#include + +#ifdef CONFIG_ARCH_OMAP1 +#define OMAPFB_PLANE_NUM 1 +#else +#define OMAPFB_PLANE_NUM 3 +#endif + +struct omapfb_mem_region { + u32 paddr; + void __iomem *vaddr; + unsigned long size; + u8 type; /* OMAPFB_PLANE_MEM_* */ + unsigned alloc:1; /* allocated by the driver */ + unsigned map:1; /* kernel mapped by the driver */ +}; + +struct omapfb_mem_desc { + int region_cnt; + struct omapfb_mem_region region[OMAPFB_PLANE_NUM]; +}; + +struct omapfb_platform_data { + struct omap_lcd_config lcd; + struct omapfb_mem_desc mem_desc; + void *ctrl_platform_data; +}; + +/* in arch/arm/plat-omap/fb.c */ +extern void omapfb_set_ctrl_platform_data(void *pdata); +extern void omapfb_reserve_sdram(void); + +#endif + +#endif /* __OMAPFB_H */ -- cgit v1.2.3 From b39a982ddecf1d95ed96f8457c39d3ea11df93f6 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 4 Aug 2009 16:12:50 +0300 Subject: OMAP: DSS2: omapfb driver Signed-off-by: Tomi Valkeinen --- arch/arm/plat-omap/fb.c | 47 +- drivers/video/omap/Kconfig | 5 +- drivers/video/omap2/Kconfig | 1 + drivers/video/omap2/Makefile | 1 + drivers/video/omap2/omapfb/Kconfig | 37 + drivers/video/omap2/omapfb/Makefile | 2 + drivers/video/omap2/omapfb/omapfb-ioctl.c | 755 ++++++++++ drivers/video/omap2/omapfb/omapfb-main.c | 2261 +++++++++++++++++++++++++++++ drivers/video/omap2/omapfb/omapfb-sysfs.c | 507 +++++++ drivers/video/omap2/omapfb/omapfb.h | 146 ++ include/linux/omapfb.h | 54 + 11 files changed, 3813 insertions(+), 3 deletions(-) create mode 100644 drivers/video/omap2/omapfb/Kconfig create mode 100644 drivers/video/omap2/omapfb/Makefile create mode 100644 drivers/video/omap2/omapfb/omapfb-ioctl.c create mode 100644 drivers/video/omap2/omapfb/omapfb-main.c create mode 100644 drivers/video/omap2/omapfb/omapfb-sysfs.c create mode 100644 drivers/video/omap2/omapfb/omapfb.h (limited to 'include') diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 18cf1d4b7931..d3eea4f47533 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -55,6 +55,10 @@ static struct platform_device omap_fb_device = { .num_resources = 0, }; +void omapfb_set_platform_data(struct omapfb_platform_data *data) +{ +} + static inline int ranges_overlap(unsigned long start1, unsigned long size1, unsigned long start2, unsigned long size2) { @@ -327,7 +331,33 @@ static inline int omap_init_fb(void) arch_initcall(omap_init_fb); -#else +#elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) + +static u64 omap_fb_dma_mask = ~(u32)0; +static struct omapfb_platform_data omapfb_config; + +static struct platform_device omap_fb_device = { + .name = "omapfb", + .id = -1, + .dev = { + .dma_mask = &omap_fb_dma_mask, + .coherent_dma_mask = ~(u32)0, + .platform_data = &omapfb_config, + }, + .num_resources = 0, +}; + +void omapfb_set_platform_data(struct omapfb_platform_data *data) +{ + omapfb_config = *data; +} + +static inline int omap_init_fb(void) +{ + return platform_device_register(&omap_fb_device); +} + +arch_initcall(omap_init_fb); void omapfb_reserve_sdram(void) {} unsigned long omapfb_reserve_sram(unsigned long sram_pstart, @@ -339,5 +369,20 @@ unsigned long omapfb_reserve_sram(unsigned long sram_pstart, return 0; } +#else + +void omapfb_set_platform_data(struct omapfb_platform_data *data) +{ +} + +void omapfb_reserve_sdram(void) {} +unsigned long omapfb_reserve_sram(unsigned long sram_pstart, + unsigned long sram_vstart, + unsigned long sram_size, + unsigned long start_avail, + unsigned long size_avail) +{ + return 0; +} #endif diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig index 551e3e9c4cbe..455c6055325d 100644 --- a/drivers/video/omap/Kconfig +++ b/drivers/video/omap/Kconfig @@ -1,6 +1,7 @@ config FB_OMAP tristate "OMAP frame buffer support (EXPERIMENTAL)" - depends on FB && ARCH_OMAP + depends on FB && ARCH_OMAP && (OMAP2_DSS = "n") + select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -72,7 +73,7 @@ config FB_OMAP_LCD_MIPID config FB_OMAP_BOOTLOADER_INIT bool "Check bootloader initialization" - depends on FB_OMAP + depends on FB_OMAP || FB_OMAP2 help Say Y here if you want to enable checking if the bootloader has already initialized the display controller. In this case the diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig index 55b4c4265f57..3e60d7e88540 100644 --- a/drivers/video/omap2/Kconfig +++ b/drivers/video/omap2/Kconfig @@ -5,3 +5,4 @@ config OMAP2_VRFB bool source "drivers/video/omap2/dss/Kconfig" +source "drivers/video/omap2/omapfb/Kconfig" diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile index ee0644f9d3c1..3ba6ef5e30d4 100644 --- a/drivers/video/omap2/Makefile +++ b/drivers/video/omap2/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_OMAP2_VRAM) += vram.o obj-$(CONFIG_OMAP2_VRFB) += vrfb.o obj-y += dss/ +obj-y += omapfb/ diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig new file mode 100644 index 000000000000..bb694cc52a50 --- /dev/null +++ b/drivers/video/omap2/omapfb/Kconfig @@ -0,0 +1,37 @@ +menuconfig FB_OMAP2 + tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)" + depends on FB && OMAP2_DSS + + select OMAP2_VRAM + select OMAP2_VRFB + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Frame buffer driver for OMAP2/3 based boards. + +config FB_OMAP2_DEBUG_SUPPORT + bool "Debug support for OMAP2/3 FB" + default y + depends on FB_OMAP2 + help + Support for debug output. You have to enable the actual printing + with debug module parameter. + +config FB_OMAP2_FORCE_AUTO_UPDATE + bool "Force main display to automatic update mode" + depends on FB_OMAP2 + help + Forces main display to automatic update mode (if possible), + and also enables tearsync (if possible). By default + displays that support manual update are started in manual + update mode. + +config FB_OMAP2_NUM_FBS + int "Number of framebuffers" + range 1 10 + default 3 + depends on FB_OMAP2 + help + Select the number of framebuffers created. OMAP2/3 has 3 overlays + so normally this would be 3. diff --git a/drivers/video/omap2/omapfb/Makefile b/drivers/video/omap2/omapfb/Makefile new file mode 100644 index 000000000000..51c2e00d9bf8 --- /dev/null +++ b/drivers/video/omap2/omapfb/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_FB_OMAP2) += omapfb.o +omapfb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c new file mode 100644 index 000000000000..4c4bafdfaa43 --- /dev/null +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -0,0 +1,755 @@ +/* + * linux/drivers/video/omap2/omapfb-ioctl.c + * + * Copyright (C) 2008 Nokia Corporation + * Author: Tomi Valkeinen + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "omapfb.h" + +static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_overlay *ovl; + struct omap_overlay_info info; + int r = 0; + + DBG("omapfb_setup_plane\n"); + + if (ofbi->num_overlays != 1) { + r = -EINVAL; + goto out; + } + + /* XXX uses only the first overlay */ + ovl = ofbi->overlays[0]; + + if (pi->enabled && !ofbi->region.size) { + /* + * This plane's memory was freed, can't enable it + * until it's reallocated. + */ + r = -EINVAL; + goto out; + } + + ovl->get_overlay_info(ovl, &info); + + info.pos_x = pi->pos_x; + info.pos_y = pi->pos_y; + info.out_width = pi->out_width; + info.out_height = pi->out_height; + info.enabled = pi->enabled; + + r = ovl->set_overlay_info(ovl, &info); + if (r) + goto out; + + if (ovl->manager) { + r = ovl->manager->apply(ovl->manager); + if (r) + goto out; + } + +out: + if (r) + dev_err(fbdev->dev, "setup_plane failed\n"); + return r; +} + +static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + + if (ofbi->num_overlays != 1) { + memset(pi, 0, sizeof(*pi)); + } else { + struct omap_overlay_info *ovli; + struct omap_overlay *ovl; + + ovl = ofbi->overlays[0]; + ovli = &ovl->info; + + pi->pos_x = ovli->pos_x; + pi->pos_y = ovli->pos_y; + pi->enabled = ovli->enabled; + pi->channel_out = 0; /* xxx */ + pi->mirror = 0; + pi->out_width = ovli->out_width; + pi->out_height = ovli->out_height; + } + + return 0; +} + +static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omapfb2_mem_region *rg; + int r, i; + size_t size; + + if (mi->type > OMAPFB_MEMTYPE_MAX) + return -EINVAL; + + size = PAGE_ALIGN(mi->size); + + rg = &ofbi->region; + + for (i = 0; i < ofbi->num_overlays; i++) { + if (ofbi->overlays[i]->info.enabled) + return -EBUSY; + } + + if (rg->size != size || rg->type != mi->type) { + r = omapfb_realloc_fbmem(fbi, size, mi->type); + if (r) { + dev_err(fbdev->dev, "realloc fbmem failed\n"); + return r; + } + } + + return 0; +} + +static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_mem_region *rg; + + rg = &ofbi->region; + memset(mi, 0, sizeof(*mi)); + + mi->size = rg->size; + mi->type = rg->type; + + return 0; +} + +static int omapfb_update_window_nolock(struct fb_info *fbi, + u32 x, u32 y, u32 w, u32 h) +{ + struct omap_dss_device *display = fb2display(fbi); + u16 dw, dh; + + if (!display) + return 0; + + if (w == 0 || h == 0) + return 0; + + display->get_resolution(display, &dw, &dh); + + if (x + w > dw || y + h > dh) + return -EINVAL; + + return display->update(display, x, y, w, h); +} + +/* This function is exported for SGX driver use */ +int omapfb_update_window(struct fb_info *fbi, + u32 x, u32 y, u32 w, u32 h) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + int r; + + omapfb_lock(fbdev); + lock_fb_info(fbi); + + r = omapfb_update_window_nolock(fbi, x, y, w, h); + + unlock_fb_info(fbi); + omapfb_unlock(fbdev); + + return r; +} +EXPORT_SYMBOL(omapfb_update_window); + +static int omapfb_set_update_mode(struct fb_info *fbi, + enum omapfb_update_mode mode) +{ + struct omap_dss_device *display = fb2display(fbi); + enum omap_dss_update_mode um; + int r; + + if (!display || !display->set_update_mode) + return -EINVAL; + + switch (mode) { + case OMAPFB_UPDATE_DISABLED: + um = OMAP_DSS_UPDATE_DISABLED; + break; + + case OMAPFB_AUTO_UPDATE: + um = OMAP_DSS_UPDATE_AUTO; + break; + + case OMAPFB_MANUAL_UPDATE: + um = OMAP_DSS_UPDATE_MANUAL; + break; + + default: + return -EINVAL; + } + + r = display->set_update_mode(display, um); + + return r; +} + +static int omapfb_get_update_mode(struct fb_info *fbi, + enum omapfb_update_mode *mode) +{ + struct omap_dss_device *display = fb2display(fbi); + enum omap_dss_update_mode m; + + if (!display || !display->get_update_mode) + return -EINVAL; + + m = display->get_update_mode(display); + + switch (m) { + case OMAP_DSS_UPDATE_DISABLED: + *mode = OMAPFB_UPDATE_DISABLED; + break; + case OMAP_DSS_UPDATE_AUTO: + *mode = OMAPFB_AUTO_UPDATE; + break; + case OMAP_DSS_UPDATE_MANUAL: + *mode = OMAPFB_MANUAL_UPDATE; + break; + default: + BUG(); + } + + return 0; +} + +/* XXX this color key handling is a hack... */ +static struct omapfb_color_key omapfb_color_keys[2]; + +static int _omapfb_set_color_key(struct omap_overlay_manager *mgr, + struct omapfb_color_key *ck) +{ + struct omap_overlay_manager_info info; + enum omap_dss_trans_key_type kt; + int r; + + mgr->get_manager_info(mgr, &info); + + if (ck->key_type == OMAPFB_COLOR_KEY_DISABLED) { + info.trans_enabled = false; + omapfb_color_keys[mgr->id] = *ck; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + + return r; + } + + switch (ck->key_type) { + case OMAPFB_COLOR_KEY_GFX_DST: + kt = OMAP_DSS_COLOR_KEY_GFX_DST; + break; + case OMAPFB_COLOR_KEY_VID_SRC: + kt = OMAP_DSS_COLOR_KEY_VID_SRC; + break; + default: + return -EINVAL; + } + + info.default_color = ck->background; + info.trans_key = ck->trans_key; + info.trans_key_type = kt; + info.trans_enabled = true; + + omapfb_color_keys[mgr->id] = *ck; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + + return r; +} + +static int omapfb_set_color_key(struct fb_info *fbi, + struct omapfb_color_key *ck) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + int r; + int i; + struct omap_overlay_manager *mgr = NULL; + + omapfb_lock(fbdev); + + for (i = 0; i < ofbi->num_overlays; i++) { + if (ofbi->overlays[i]->manager) { + mgr = ofbi->overlays[i]->manager; + break; + } + } + + if (!mgr) { + r = -EINVAL; + goto err; + } + + r = _omapfb_set_color_key(mgr, ck); +err: + omapfb_unlock(fbdev); + + return r; +} + +static int omapfb_get_color_key(struct fb_info *fbi, + struct omapfb_color_key *ck) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_overlay_manager *mgr = NULL; + int r = 0; + int i; + + omapfb_lock(fbdev); + + for (i = 0; i < ofbi->num_overlays; i++) { + if (ofbi->overlays[i]->manager) { + mgr = ofbi->overlays[i]->manager; + break; + } + } + + if (!mgr) { + r = -EINVAL; + goto err; + } + + *ck = omapfb_color_keys[mgr->id]; +err: + omapfb_unlock(fbdev); + + return r; +} + +static int omapfb_memory_read(struct fb_info *fbi, + struct omapfb_memory_read *mr) +{ + struct omap_dss_device *display = fb2display(fbi); + void *buf; + int r; + + if (!display || !display->memory_read) + return -ENOENT; + + if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) + return -EFAULT; + + if (mr->w * mr->h * 3 > mr->buffer_size) + return -EINVAL; + + buf = vmalloc(mr->buffer_size); + if (!buf) { + DBG("vmalloc failed\n"); + return -ENOMEM; + } + + r = display->memory_read(display, buf, mr->buffer_size, + mr->x, mr->y, mr->w, mr->h); + + if (r > 0) { + if (copy_to_user(mr->buffer, buf, mr->buffer_size)) + r = -EFAULT; + } + + vfree(buf); + + return r; +} + +static int omapfb_get_ovl_colormode(struct omapfb2_device *fbdev, + struct omapfb_ovl_colormode *mode) +{ + int ovl_idx = mode->overlay_idx; + int mode_idx = mode->mode_idx; + struct omap_overlay *ovl; + enum omap_color_mode supported_modes; + struct fb_var_screeninfo var; + int i; + + if (ovl_idx >= fbdev->num_overlays) + return -ENODEV; + ovl = fbdev->overlays[ovl_idx]; + supported_modes = ovl->supported_modes; + + mode_idx = mode->mode_idx; + + for (i = 0; i < sizeof(supported_modes) * 8; i++) { + if (!(supported_modes & (1 << i))) + continue; + /* + * It's possible that the FB doesn't support a mode + * that is supported by the overlay, so call the + * following here. + */ + if (dss_mode_to_fb_mode(1 << i, &var) < 0) + continue; + + mode_idx--; + if (mode_idx < 0) + break; + } + + if (i == sizeof(supported_modes) * 8) + return -ENOENT; + + mode->bits_per_pixel = var.bits_per_pixel; + mode->nonstd = var.nonstd; + mode->red = var.red; + mode->green = var.green; + mode->blue = var.blue; + mode->transp = var.transp; + + return 0; +} + +static int omapfb_wait_for_go(struct fb_info *fbi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + int r = 0; + int i; + + for (i = 0; i < ofbi->num_overlays; ++i) { + struct omap_overlay *ovl = ofbi->overlays[i]; + r = ovl->wait_for_go(ovl); + if (r) + break; + } + + return r; +} + +int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_dss_device *display = fb2display(fbi); + + union { + struct omapfb_update_window_old uwnd_o; + struct omapfb_update_window uwnd; + struct omapfb_plane_info plane_info; + struct omapfb_caps caps; + struct omapfb_mem_info mem_info; + struct omapfb_color_key color_key; + struct omapfb_ovl_colormode ovl_colormode; + enum omapfb_update_mode update_mode; + int test_num; + struct omapfb_memory_read memory_read; + struct omapfb_vram_info vram_info; + struct omapfb_tearsync_info tearsync_info; + } p; + + int r = 0; + + switch (cmd) { + case OMAPFB_SYNC_GFX: + DBG("ioctl SYNC_GFX\n"); + if (!display || !display->sync) { + /* DSS1 never returns an error here, so we neither */ + /*r = -EINVAL;*/ + break; + } + + r = display->sync(display); + break; + + case OMAPFB_UPDATE_WINDOW_OLD: + DBG("ioctl UPDATE_WINDOW_OLD\n"); + if (!display || !display->update) { + r = -EINVAL; + break; + } + + if (copy_from_user(&p.uwnd_o, + (void __user *)arg, + sizeof(p.uwnd_o))) { + r = -EFAULT; + break; + } + + r = omapfb_update_window_nolock(fbi, p.uwnd_o.x, p.uwnd_o.y, + p.uwnd_o.width, p.uwnd_o.height); + break; + + case OMAPFB_UPDATE_WINDOW: + DBG("ioctl UPDATE_WINDOW\n"); + if (!display || !display->update) { + r = -EINVAL; + break; + } + + if (copy_from_user(&p.uwnd, (void __user *)arg, + sizeof(p.uwnd))) { + r = -EFAULT; + break; + } + + r = omapfb_update_window_nolock(fbi, p.uwnd.x, p.uwnd.y, + p.uwnd.width, p.uwnd.height); + break; + + case OMAPFB_SETUP_PLANE: + DBG("ioctl SETUP_PLANE\n"); + if (copy_from_user(&p.plane_info, (void __user *)arg, + sizeof(p.plane_info))) + r = -EFAULT; + else + r = omapfb_setup_plane(fbi, &p.plane_info); + break; + + case OMAPFB_QUERY_PLANE: + DBG("ioctl QUERY_PLANE\n"); + r = omapfb_query_plane(fbi, &p.plane_info); + if (r < 0) + break; + if (copy_to_user((void __user *)arg, &p.plane_info, + sizeof(p.plane_info))) + r = -EFAULT; + break; + + case OMAPFB_SETUP_MEM: + DBG("ioctl SETUP_MEM\n"); + if (copy_from_user(&p.mem_info, (void __user *)arg, + sizeof(p.mem_info))) + r = -EFAULT; + else + r = omapfb_setup_mem(fbi, &p.mem_info); + break; + + case OMAPFB_QUERY_MEM: + DBG("ioctl QUERY_MEM\n"); + r = omapfb_query_mem(fbi, &p.mem_info); + if (r < 0) + break; + if (copy_to_user((void __user *)arg, &p.mem_info, + sizeof(p.mem_info))) + r = -EFAULT; + break; + + case OMAPFB_GET_CAPS: + DBG("ioctl GET_CAPS\n"); + if (!display) { + r = -EINVAL; + break; + } + + memset(&p.caps, 0, sizeof(p.caps)); + if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) + p.caps.ctrl |= OMAPFB_CAPS_MANUAL_UPDATE; + if (display->caps & OMAP_DSS_DISPLAY_CAP_TEAR_ELIM) + p.caps.ctrl |= OMAPFB_CAPS_TEARSYNC; + + if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps))) + r = -EFAULT; + break; + + case OMAPFB_GET_OVERLAY_COLORMODE: + DBG("ioctl GET_OVERLAY_COLORMODE\n"); + if (copy_from_user(&p.ovl_colormode, (void __user *)arg, + sizeof(p.ovl_colormode))) { + r = -EFAULT; + break; + } + r = omapfb_get_ovl_colormode(fbdev, &p.ovl_colormode); + if (r < 0) + break; + if (copy_to_user((void __user *)arg, &p.ovl_colormode, + sizeof(p.ovl_colormode))) + r = -EFAULT; + break; + + case OMAPFB_SET_UPDATE_MODE: + DBG("ioctl SET_UPDATE_MODE\n"); + if (get_user(p.update_mode, (int __user *)arg)) + r = -EFAULT; + else + r = omapfb_set_update_mode(fbi, p.update_mode); + break; + + case OMAPFB_GET_UPDATE_MODE: + DBG("ioctl GET_UPDATE_MODE\n"); + r = omapfb_get_update_mode(fbi, &p.update_mode); + if (r) + break; + if (put_user(p.update_mode, + (enum omapfb_update_mode __user *)arg)) + r = -EFAULT; + break; + + case OMAPFB_SET_COLOR_KEY: + DBG("ioctl SET_COLOR_KEY\n"); + if (copy_from_user(&p.color_key, (void __user *)arg, + sizeof(p.color_key))) + r = -EFAULT; + else + r = omapfb_set_color_key(fbi, &p.color_key); + break; + + case OMAPFB_GET_COLOR_KEY: + DBG("ioctl GET_COLOR_KEY\n"); + r = omapfb_get_color_key(fbi, &p.color_key); + if (r) + break; + if (copy_to_user((void __user *)arg, &p.color_key, + sizeof(p.color_key))) + r = -EFAULT; + break; + + case OMAPFB_WAITFORVSYNC: + DBG("ioctl WAITFORVSYNC\n"); + if (!display) { + r = -EINVAL; + break; + } + + r = display->wait_vsync(display); + break; + + case OMAPFB_WAITFORGO: + DBG("ioctl WAITFORGO\n"); + if (!display) { + r = -EINVAL; + break; + } + + r = omapfb_wait_for_go(fbi); + break; + + /* LCD and CTRL tests do the same thing for backward + * compatibility */ + case OMAPFB_LCD_TEST: + DBG("ioctl LCD_TEST\n"); + if (get_user(p.test_num, (int __user *)arg)) { + r = -EFAULT; + break; + } + if (!display || !display->run_test) { + r = -EINVAL; + break; + } + + r = display->run_test(display, p.test_num); + + break; + + case OMAPFB_CTRL_TEST: + DBG("ioctl CTRL_TEST\n"); + if (get_user(p.test_num, (int __user *)arg)) { + r = -EFAULT; + break; + } + if (!display || !display->run_test) { + r = -EINVAL; + break; + } + + r = display->run_test(display, p.test_num); + + break; + + case OMAPFB_MEMORY_READ: + DBG("ioctl MEMORY_READ\n"); + + if (copy_from_user(&p.memory_read, (void __user *)arg, + sizeof(p.memory_read))) { + r = -EFAULT; + break; + } + + r = omapfb_memory_read(fbi, &p.memory_read); + + break; + + case OMAPFB_GET_VRAM_INFO: { + unsigned long vram, free, largest; + + DBG("ioctl GET_VRAM_INFO\n"); + + omap_vram_get_info(&vram, &free, &largest); + p.vram_info.total = vram; + p.vram_info.free = free; + p.vram_info.largest_free_block = largest; + + if (copy_to_user((void __user *)arg, &p.vram_info, + sizeof(p.vram_info))) + r = -EFAULT; + break; + } + + case OMAPFB_SET_TEARSYNC: { + DBG("ioctl SET_TEARSYNC\n"); + + if (copy_from_user(&p.tearsync_info, (void __user *)arg, + sizeof(p.tearsync_info))) { + r = -EFAULT; + break; + } + + if (!display->enable_te) { + r = -ENODEV; + break; + } + + r = display->enable_te(display, !!p.tearsync_info.enabled); + + break; + } + + default: + dev_err(fbdev->dev, "Unknown ioctl 0x%x\n", cmd); + r = -EINVAL; + } + + if (r < 0) + DBG("ioctl failed: %d\n", r); + + return r; +} + + diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c new file mode 100644 index 000000000000..ef299839858a --- /dev/null +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -0,0 +1,2261 @@ +/* + * linux/drivers/video/omap2/omapfb-main.c + * + * Copyright (C) 2008 Nokia Corporation + * Author: Tomi Valkeinen + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "omapfb.h" + +#define MODULE_NAME "omapfb" + +#define OMAPFB_PLANE_XRES_MIN 8 +#define OMAPFB_PLANE_YRES_MIN 8 + +static char *def_mode; +static char *def_vram; +static int def_vrfb; +static int def_rotate; +static int def_mirror; + +#ifdef DEBUG +unsigned int omapfb_debug; +module_param_named(debug, omapfb_debug, bool, 0644); +static unsigned int omapfb_test_pattern; +module_param_named(test, omapfb_test_pattern, bool, 0644); +#endif + +static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi); + +#ifdef DEBUG +static void draw_pixel(struct fb_info *fbi, int x, int y, unsigned color) +{ + struct fb_var_screeninfo *var = &fbi->var; + struct fb_fix_screeninfo *fix = &fbi->fix; + void __iomem *addr = fbi->screen_base; + const unsigned bytespp = var->bits_per_pixel >> 3; + const unsigned line_len = fix->line_length / bytespp; + + int r = (color >> 16) & 0xff; + int g = (color >> 8) & 0xff; + int b = (color >> 0) & 0xff; + + if (var->bits_per_pixel == 16) { + u16 __iomem *p = (u16 __iomem *)addr; + p += y * line_len + x; + + r = r * 32 / 256; + g = g * 64 / 256; + b = b * 32 / 256; + + __raw_writew((r << 11) | (g << 5) | (b << 0), p); + } else if (var->bits_per_pixel == 24) { + u8 __iomem *p = (u8 __iomem *)addr; + p += (y * line_len + x) * 3; + + __raw_writeb(b, p + 0); + __raw_writeb(g, p + 1); + __raw_writeb(r, p + 2); + } else if (var->bits_per_pixel == 32) { + u32 __iomem *p = (u32 __iomem *)addr; + p += y * line_len + x; + __raw_writel(color, p); + } +} + +static void fill_fb(struct fb_info *fbi) +{ + struct fb_var_screeninfo *var = &fbi->var; + const short w = var->xres_virtual; + const short h = var->yres_virtual; + void __iomem *addr = fbi->screen_base; + int y, x; + + if (!addr) + return; + + DBG("fill_fb %dx%d, line_len %d bytes\n", w, h, fbi->fix.line_length); + + for (y = 0; y < h; y++) { + for (x = 0; x < w; x++) { + if (x < 20 && y < 20) + draw_pixel(fbi, x, y, 0xffffff); + else if (x < 20 && (y > 20 && y < h - 20)) + draw_pixel(fbi, x, y, 0xff); + else if (y < 20 && (x > 20 && x < w - 20)) + draw_pixel(fbi, x, y, 0xff00); + else if (x > w - 20 && (y > 20 && y < h - 20)) + draw_pixel(fbi, x, y, 0xff0000); + else if (y > h - 20 && (x > 20 && x < w - 20)) + draw_pixel(fbi, x, y, 0xffff00); + else if (x == 20 || x == w - 20 || + y == 20 || y == h - 20) + draw_pixel(fbi, x, y, 0xffffff); + else if (x == y || w - x == h - y) + draw_pixel(fbi, x, y, 0xff00ff); + else if (w - x == y || x == h - y) + draw_pixel(fbi, x, y, 0x00ffff); + else if (x > 20 && y > 20 && x < w - 20 && y < h - 20) { + int t = x * 3 / w; + unsigned r = 0, g = 0, b = 0; + unsigned c; + if (var->bits_per_pixel == 16) { + if (t == 0) + b = (y % 32) * 256 / 32; + else if (t == 1) + g = (y % 64) * 256 / 64; + else if (t == 2) + r = (y % 32) * 256 / 32; + } else { + if (t == 0) + b = (y % 256); + else if (t == 1) + g = (y % 256); + else if (t == 2) + r = (y % 256); + } + c = (r << 16) | (g << 8) | (b << 0); + draw_pixel(fbi, x, y, c); + } else { + draw_pixel(fbi, x, y, 0); + } + } + } +} +#endif + +static unsigned omapfb_get_vrfb_offset(struct omapfb_info *ofbi, int rot) +{ + struct vrfb *vrfb = &ofbi->region.vrfb; + unsigned offset; + + switch (rot) { + case FB_ROTATE_UR: + offset = 0; + break; + case FB_ROTATE_CW: + offset = vrfb->yoffset; + break; + case FB_ROTATE_UD: + offset = vrfb->yoffset * OMAP_VRFB_LINE_LEN + vrfb->xoffset; + break; + case FB_ROTATE_CCW: + offset = vrfb->xoffset * OMAP_VRFB_LINE_LEN; + break; + default: + BUG(); + } + + offset *= vrfb->bytespp; + + return offset; +} + +static u32 omapfb_get_region_rot_paddr(struct omapfb_info *ofbi, int rot) +{ + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { + return ofbi->region.vrfb.paddr[rot] + + omapfb_get_vrfb_offset(ofbi, rot); + } else { + return ofbi->region.paddr; + } +} + +static u32 omapfb_get_region_paddr(struct omapfb_info *ofbi) +{ + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) + return ofbi->region.vrfb.paddr[0]; + else + return ofbi->region.paddr; +} + +static void __iomem *omapfb_get_region_vaddr(struct omapfb_info *ofbi) +{ + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) + return ofbi->region.vrfb.vaddr[0]; + else + return ofbi->region.vaddr; +} + +static struct omapfb_colormode omapfb_colormodes[] = { + { + .dssmode = OMAP_DSS_COLOR_UYVY, + .bits_per_pixel = 16, + .nonstd = OMAPFB_COLOR_YUV422, + }, { + .dssmode = OMAP_DSS_COLOR_YUV2, + .bits_per_pixel = 16, + .nonstd = OMAPFB_COLOR_YUY422, + }, { + .dssmode = OMAP_DSS_COLOR_ARGB16, + .bits_per_pixel = 16, + .red = { .length = 4, .offset = 8, .msb_right = 0 }, + .green = { .length = 4, .offset = 4, .msb_right = 0 }, + .blue = { .length = 4, .offset = 0, .msb_right = 0 }, + .transp = { .length = 4, .offset = 12, .msb_right = 0 }, + }, { + .dssmode = OMAP_DSS_COLOR_RGB16, + .bits_per_pixel = 16, + .red = { .length = 5, .offset = 11, .msb_right = 0 }, + .green = { .length = 6, .offset = 5, .msb_right = 0 }, + .blue = { .length = 5, .offset = 0, .msb_right = 0 }, + .transp = { .length = 0, .offset = 0, .msb_right = 0 }, + }, { + .dssmode = OMAP_DSS_COLOR_RGB24P, + .bits_per_pixel = 24, + .red = { .length = 8, .offset = 16, .msb_right = 0 }, + .green = { .length = 8, .offset = 8, .msb_right = 0 }, + .blue = { .length = 8, .offset = 0, .msb_right = 0 }, + .transp = { .length = 0, .offset = 0, .msb_right = 0 }, + }, { + .dssmode = OMAP_DSS_COLOR_RGB24U, + .bits_per_pixel = 32, + .red = { .length = 8, .offset = 16, .msb_right = 0 }, + .green = { .length = 8, .offset = 8, .msb_right = 0 }, + .blue = { .length = 8, .offset = 0, .msb_right = 0 }, + .transp = { .length = 0, .offset = 0, .msb_right = 0 }, + }, { + .dssmode = OMAP_DSS_COLOR_ARGB32, + .bits_per_pixel = 32, + .red = { .length = 8, .offset = 16, .msb_right = 0 }, + .green = { .length = 8, .offset = 8, .msb_right = 0 }, + .blue = { .length = 8, .offset = 0, .msb_right = 0 }, + .transp = { .length = 8, .offset = 24, .msb_right = 0 }, + }, { + .dssmode = OMAP_DSS_COLOR_RGBA32, + .bits_per_pixel = 32, + .red = { .length = 8, .offset = 24, .msb_right = 0 }, + .green = { .length = 8, .offset = 16, .msb_right = 0 }, + .blue = { .length = 8, .offset = 8, .msb_right = 0 }, + .transp = { .length = 8, .offset = 0, .msb_right = 0 }, + }, { + .dssmode = OMAP_DSS_COLOR_RGBX32, + .bits_per_pixel = 32, + .red = { .length = 8, .offset = 24, .msb_right = 0 }, + .green = { .length = 8, .offset = 16, .msb_right = 0 }, + .blue = { .length = 8, .offset = 8, .msb_right = 0 }, + .transp = { .length = 0, .offset = 0, .msb_right = 0 }, + }, +}; + +static bool cmp_var_to_colormode(struct fb_var_screeninfo *var, + struct omapfb_colormode *color) +{ + bool cmp_component(struct fb_bitfield *f1, struct fb_bitfield *f2) + { + return f1->length == f2->length && + f1->offset == f2->offset && + f1->msb_right == f2->msb_right; + } + + if (var->bits_per_pixel == 0 || + var->red.length == 0 || + var->blue.length == 0 || + var->green.length == 0) + return 0; + + return var->bits_per_pixel == color->bits_per_pixel && + cmp_component(&var->red, &color->red) && + cmp_component(&var->green, &color->green) && + cmp_component(&var->blue, &color->blue) && + cmp_component(&var->transp, &color->transp); +} + +static void assign_colormode_to_var(struct fb_var_screeninfo *var, + struct omapfb_colormode *color) +{ + var->bits_per_pixel = color->bits_per_pixel; + var->nonstd = color->nonstd; + var->red = color->red; + var->green = color->green; + var->blue = color->blue; + var->transp = color->transp; +} + +static int fb_mode_to_dss_mode(struct fb_var_screeninfo *var, + enum omap_color_mode *mode) +{ + enum omap_color_mode dssmode; + int i; + + /* first match with nonstd field */ + if (var->nonstd) { + for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { + struct omapfb_colormode *m = &omapfb_colormodes[i]; + if (var->nonstd == m->nonstd) { + assign_colormode_to_var(var, m); + *mode = m->dssmode; + return 0; + } + } + + return -EINVAL; + } + + /* then try exact match of bpp and colors */ + for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { + struct omapfb_colormode *m = &omapfb_colormodes[i]; + if (cmp_var_to_colormode(var, m)) { + assign_colormode_to_var(var, m); + *mode = m->dssmode; + return 0; + } + } + + /* match with bpp if user has not filled color fields + * properly */ + switch (var->bits_per_pixel) { + case 1: + dssmode = OMAP_DSS_COLOR_CLUT1; + break; + case 2: + dssmode = OMAP_DSS_COLOR_CLUT2; + break; + case 4: + dssmode = OMAP_DSS_COLOR_CLUT4; + break; + case 8: + dssmode = OMAP_DSS_COLOR_CLUT8; + break; + case 12: + dssmode = OMAP_DSS_COLOR_RGB12U; + break; + case 16: + dssmode = OMAP_DSS_COLOR_RGB16; + break; + case 24: + dssmode = OMAP_DSS_COLOR_RGB24P; + break; + case 32: + dssmode = OMAP_DSS_COLOR_RGB24U; + break; + default: + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { + struct omapfb_colormode *m = &omapfb_colormodes[i]; + if (dssmode == m->dssmode) { + assign_colormode_to_var(var, m); + *mode = m->dssmode; + return 0; + } + } + + return -EINVAL; +} + +static int check_fb_res_bounds(struct fb_var_screeninfo *var) +{ + int xres_min = OMAPFB_PLANE_XRES_MIN; + int xres_max = 2048; + int yres_min = OMAPFB_PLANE_YRES_MIN; + int yres_max = 2048; + + /* XXX: some applications seem to set virtual res to 0. */ + if (var->xres_virtual == 0) + var->xres_virtual = var->xres; + + if (var->yres_virtual == 0) + var->yres_virtual = var->yres; + + if (var->xres_virtual < xres_min || var->yres_virtual < yres_min) + return -EINVAL; + + if (var->xres < xres_min) + var->xres = xres_min; + if (var->yres < yres_min) + var->yres = yres_min; + if (var->xres > xres_max) + var->xres = xres_max; + if (var->yres > yres_max) + var->yres = yres_max; + + if (var->xres > var->xres_virtual) + var->xres = var->xres_virtual; + if (var->yres > var->yres_virtual) + var->yres = var->yres_virtual; + + return 0; +} + +static void shrink_height(unsigned long max_frame_size, + struct fb_var_screeninfo *var) +{ + DBG("can't fit FB into memory, reducing y\n"); + var->yres_virtual = max_frame_size / + (var->xres_virtual * var->bits_per_pixel >> 3); + + if (var->yres_virtual < OMAPFB_PLANE_YRES_MIN) + var->yres_virtual = OMAPFB_PLANE_YRES_MIN; + + if (var->yres > var->yres_virtual) + var->yres = var->yres_virtual; +} + +static void shrink_width(unsigned long max_frame_size, + struct fb_var_screeninfo *var) +{ + DBG("can't fit FB into memory, reducing x\n"); + var->xres_virtual = max_frame_size / var->yres_virtual / + (var->bits_per_pixel >> 3); + + if (var->xres_virtual < OMAPFB_PLANE_XRES_MIN) + var->xres_virtual = OMAPFB_PLANE_XRES_MIN; + + if (var->xres > var->xres_virtual) + var->xres = var->xres_virtual; +} + +static int check_vrfb_fb_size(unsigned long region_size, + const struct fb_var_screeninfo *var) +{ + unsigned long min_phys_size = omap_vrfb_min_phys_size(var->xres_virtual, + var->yres_virtual, var->bits_per_pixel >> 3); + + return min_phys_size > region_size ? -EINVAL : 0; +} + +static int check_fb_size(const struct omapfb_info *ofbi, + struct fb_var_screeninfo *var) +{ + unsigned long max_frame_size = ofbi->region.size; + int bytespp = var->bits_per_pixel >> 3; + unsigned long line_size = var->xres_virtual * bytespp; + + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { + /* One needs to check for both VRFB and OMAPFB limitations. */ + if (check_vrfb_fb_size(max_frame_size, var)) + shrink_height(omap_vrfb_max_height( + max_frame_size, var->xres_virtual, bytespp) * + line_size, var); + + if (check_vrfb_fb_size(max_frame_size, var)) { + DBG("cannot fit FB to memory\n"); + return -EINVAL; + } + + return 0; + } + + DBG("max frame size %lu, line size %lu\n", max_frame_size, line_size); + + if (line_size * var->yres_virtual > max_frame_size) + shrink_height(max_frame_size, var); + + if (line_size * var->yres_virtual > max_frame_size) { + shrink_width(max_frame_size, var); + line_size = var->xres_virtual * bytespp; + } + + if (line_size * var->yres_virtual > max_frame_size) { + DBG("cannot fit FB to memory\n"); + return -EINVAL; + } + + return 0; +} + +/* + * Consider if VRFB assisted rotation is in use and if the virtual space for + * the zero degree view needs to be mapped. The need for mapping also acts as + * the trigger for setting up the hardware on the context in question. This + * ensures that one does not attempt to access the virtual view before the + * hardware is serving the address translations. + */ +static int setup_vrfb_rotation(struct fb_info *fbi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_mem_region *rg = &ofbi->region; + struct vrfb *vrfb = &rg->vrfb; + struct fb_var_screeninfo *var = &fbi->var; + struct fb_fix_screeninfo *fix = &fbi->fix; + unsigned bytespp; + bool yuv_mode; + enum omap_color_mode mode; + int r; + bool reconf; + + if (!rg->size || ofbi->rotation_type != OMAP_DSS_ROT_VRFB) + return 0; + + DBG("setup_vrfb_rotation\n"); + + r = fb_mode_to_dss_mode(var, &mode); + if (r) + return r; + + bytespp = var->bits_per_pixel >> 3; + + yuv_mode = mode == OMAP_DSS_COLOR_YUV2 || mode == OMAP_DSS_COLOR_UYVY; + + /* We need to reconfigure VRFB if the resolution changes, if yuv mode + * is enabled/disabled, or if bytes per pixel changes */ + + /* XXX we shouldn't allow this when framebuffer is mmapped */ + + reconf = false; + + if (yuv_mode != vrfb->yuv_mode) + reconf = true; + else if (bytespp != vrfb->bytespp) + reconf = true; + else if (vrfb->xres != var->xres_virtual || + vrfb->yres != var->yres_virtual) + reconf = true; + + if (vrfb->vaddr[0] && reconf) { + fbi->screen_base = NULL; + fix->smem_start = 0; + fix->smem_len = 0; + iounmap(vrfb->vaddr[0]); + vrfb->vaddr[0] = NULL; + DBG("setup_vrfb_rotation: reset fb\n"); + } + + if (vrfb->vaddr[0]) + return 0; + + omap_vrfb_setup(&rg->vrfb, rg->paddr, + var->xres_virtual, + var->yres_virtual, + bytespp, yuv_mode); + + /* Now one can ioremap the 0 angle view */ + r = omap_vrfb_map_angle(vrfb, var->yres_virtual, 0); + if (r) + return r; + + /* used by open/write in fbmem.c */ + fbi->screen_base = ofbi->region.vrfb.vaddr[0]; + + fix->smem_start = ofbi->region.vrfb.paddr[0]; + + switch (var->nonstd) { + case OMAPFB_COLOR_YUV422: + case OMAPFB_COLOR_YUY422: + fix->line_length = + (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2; + break; + default: + fix->line_length = + (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3; + break; + } + + fix->smem_len = var->yres_virtual * fix->line_length; + + return 0; +} + +int dss_mode_to_fb_mode(enum omap_color_mode dssmode, + struct fb_var_screeninfo *var) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) { + struct omapfb_colormode *mode = &omapfb_colormodes[i]; + if (dssmode == mode->dssmode) { + assign_colormode_to_var(var, mode); + return 0; + } + } + return -ENOENT; +} + +void set_fb_fix(struct fb_info *fbi) +{ + struct fb_fix_screeninfo *fix = &fbi->fix; + struct fb_var_screeninfo *var = &fbi->var; + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_mem_region *rg = &ofbi->region; + + DBG("set_fb_fix\n"); + + /* used by open/write in fbmem.c */ + fbi->screen_base = (char __iomem *)omapfb_get_region_vaddr(ofbi); + + /* used by mmap in fbmem.c */ + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { + switch (var->nonstd) { + case OMAPFB_COLOR_YUV422: + case OMAPFB_COLOR_YUY422: + fix->line_length = + (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2; + break; + default: + fix->line_length = + (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3; + break; + } + + fix->smem_len = var->yres_virtual * fix->line_length; + } else { + fix->line_length = + (var->xres_virtual * var->bits_per_pixel) >> 3; + fix->smem_len = rg->size; + } + + fix->smem_start = omapfb_get_region_paddr(ofbi); + + fix->type = FB_TYPE_PACKED_PIXELS; + + if (var->nonstd) + fix->visual = FB_VISUAL_PSEUDOCOLOR; + else { + switch (var->bits_per_pixel) { + case 32: + case 24: + case 16: + case 12: + fix->visual = FB_VISUAL_TRUECOLOR; + /* 12bpp is stored in 16 bits */ + break; + case 1: + case 2: + case 4: + case 8: + fix->visual = FB_VISUAL_PSEUDOCOLOR; + break; + } + } + + fix->accel = FB_ACCEL_NONE; + + fix->xpanstep = 1; + fix->ypanstep = 1; +} + +/* check new var and possibly modify it to be ok */ +int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omap_dss_device *display = fb2display(fbi); + enum omap_color_mode mode = 0; + int i; + int r; + + DBG("check_fb_var %d\n", ofbi->id); + + if (ofbi->region.size == 0) + return 0; + + r = fb_mode_to_dss_mode(var, &mode); + if (r) { + DBG("cannot convert var to omap dss mode\n"); + return r; + } + + for (i = 0; i < ofbi->num_overlays; ++i) { + if ((ofbi->overlays[i]->supported_modes & mode) == 0) { + DBG("invalid mode\n"); + return -EINVAL; + } + } + + if (var->rotate < 0 || var->rotate > 3) + return -EINVAL; + + if (check_fb_res_bounds(var)) + return -EINVAL; + + if (check_fb_size(ofbi, var)) + return -EINVAL; + + if (var->xres + var->xoffset > var->xres_virtual) + var->xoffset = var->xres_virtual - var->xres; + if (var->yres + var->yoffset > var->yres_virtual) + var->yoffset = var->yres_virtual - var->yres; + + DBG("xres = %d, yres = %d, vxres = %d, vyres = %d\n", + var->xres, var->yres, + var->xres_virtual, var->yres_virtual); + + var->height = -1; + var->width = -1; + var->grayscale = 0; + + if (display && display->get_timings) { + struct omap_video_timings timings; + display->get_timings(display, &timings); + + /* pixclock in ps, the rest in pixclock */ + var->pixclock = timings.pixel_clock != 0 ? + KHZ2PICOS(timings.pixel_clock) : + 0; + var->left_margin = timings.hfp; + var->right_margin = timings.hbp; + var->upper_margin = timings.vfp; + var->lower_margin = timings.vbp; + var->hsync_len = timings.hsw; + var->vsync_len = timings.vsw; + } else { + var->pixclock = 0; + var->left_margin = 0; + var->right_margin = 0; + var->upper_margin = 0; + var->lower_margin = 0; + var->hsync_len = 0; + var->vsync_len = 0; + } + + /* TODO: get these from panel->config */ + var->vmode = FB_VMODE_NONINTERLACED; + var->sync = 0; + + return 0; +} + +/* + * --------------------------------------------------------------------------- + * fbdev framework callbacks + * --------------------------------------------------------------------------- + */ +static int omapfb_open(struct fb_info *fbi, int user) +{ + return 0; +} + +static int omapfb_release(struct fb_info *fbi, int user) +{ +#if 0 + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_dss_device *display = fb2display(fbi); + + DBG("Closing fb with plane index %d\n", ofbi->id); + + omapfb_lock(fbdev); + + if (display && display->get_update_mode && display->update) { + /* XXX this update should be removed, I think. But it's + * good for debugging */ + if (display->get_update_mode(display) == + OMAP_DSS_UPDATE_MANUAL) { + u16 w, h; + + if (display->sync) + display->sync(display); + + display->get_resolution(display, &w, &h); + display->update(display, 0, 0, w, h); + } + } + + if (display && display->sync) + display->sync(display); + + omapfb_unlock(fbdev); +#endif + return 0; +} + +static unsigned calc_rotation_offset_dma(struct fb_var_screeninfo *var, + struct fb_fix_screeninfo *fix, int rotation) +{ + unsigned offset; + + offset = var->yoffset * fix->line_length + + var->xoffset * (var->bits_per_pixel >> 3); + + return offset; +} + +static unsigned calc_rotation_offset_vrfb(struct fb_var_screeninfo *var, + struct fb_fix_screeninfo *fix, int rotation) +{ + unsigned offset; + + if (rotation == FB_ROTATE_UD) + offset = (var->yres_virtual - var->yres) * + fix->line_length; + else if (rotation == FB_ROTATE_CW) + offset = (var->yres_virtual - var->yres) * + (var->bits_per_pixel >> 3); + else + offset = 0; + + if (rotation == FB_ROTATE_UR) + offset += var->yoffset * fix->line_length + + var->xoffset * (var->bits_per_pixel >> 3); + else if (rotation == FB_ROTATE_UD) + offset -= var->yoffset * fix->line_length + + var->xoffset * (var->bits_per_pixel >> 3); + else if (rotation == FB_ROTATE_CW) + offset -= var->xoffset * fix->line_length + + var->yoffset * (var->bits_per_pixel >> 3); + else if (rotation == FB_ROTATE_CCW) + offset += var->xoffset * fix->line_length + + var->yoffset * (var->bits_per_pixel >> 3); + + return offset; +} + + +/* setup overlay according to the fb */ +static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, + u16 posx, u16 posy, u16 outw, u16 outh) +{ + int r = 0; + struct omapfb_info *ofbi = FB2OFB(fbi); + struct fb_var_screeninfo *var = &fbi->var; + struct fb_fix_screeninfo *fix = &fbi->fix; + enum omap_color_mode mode = 0; + int offset; + u32 data_start_p; + void __iomem *data_start_v; + struct omap_overlay_info info; + int xres, yres; + int screen_width; + int mirror; + int rotation = var->rotate; + int i; + + for (i = 0; i < ofbi->num_overlays; i++) { + if (ovl != ofbi->overlays[i]) + continue; + + rotation = (rotation + ofbi->rotation[i]) % 4; + break; + } + + DBG("setup_overlay %d, posx %d, posy %d, outw %d, outh %d\n", ofbi->id, + posx, posy, outw, outh); + + if (rotation == FB_ROTATE_CW || rotation == FB_ROTATE_CCW) { + xres = var->yres; + yres = var->xres; + } else { + xres = var->xres; + yres = var->yres; + } + + + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { + data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation); + data_start_v = NULL; + } else { + data_start_p = omapfb_get_region_paddr(ofbi); + data_start_v = omapfb_get_region_vaddr(ofbi); + } + + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) + offset = calc_rotation_offset_vrfb(var, fix, rotation); + else + offset = calc_rotation_offset_dma(var, fix, rotation); + + data_start_p += offset; + data_start_v += offset; + + if (offset) + DBG("offset %d, %d = %d\n", + var->xoffset, var->yoffset, offset); + + DBG("paddr %x, vaddr %p\n", data_start_p, data_start_v); + + r = fb_mode_to_dss_mode(var, &mode); + if (r) { + DBG("fb_mode_to_dss_mode failed"); + goto err; + } + + switch (var->nonstd) { + case OMAPFB_COLOR_YUV422: + case OMAPFB_COLOR_YUY422: + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { + screen_width = fix->line_length + / (var->bits_per_pixel >> 2); + break; + } + default: + screen_width = fix->line_length / (var->bits_per_pixel >> 3); + break; + } + + ovl->get_overlay_info(ovl, &info); + + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) + mirror = 0; + else + mirror = ofbi->mirror; + + info.paddr = data_start_p; + info.vaddr = data_start_v; + info.screen_width = screen_width; + info.width = xres; + info.height = yres; + info.color_mode = mode; + info.rotation_type = ofbi->rotation_type; + info.rotation = rotation; + info.mirror = mirror; + + info.pos_x = posx; + info.pos_y = posy; + info.out_width = outw; + info.out_height = outh; + + r = ovl->set_overlay_info(ovl, &info); + if (r) { + DBG("ovl->setup_overlay_info failed\n"); + goto err; + } + + return 0; + +err: + DBG("setup_overlay failed\n"); + return r; +} + +/* apply var to the overlay */ +int omapfb_apply_changes(struct fb_info *fbi, int init) +{ + int r = 0; + struct omapfb_info *ofbi = FB2OFB(fbi); + struct fb_var_screeninfo *var = &fbi->var; + struct omap_overlay *ovl; + u16 posx, posy; + u16 outw, outh; + int i; + +#ifdef DEBUG + if (omapfb_test_pattern) + fill_fb(fbi); +#endif + + for (i = 0; i < ofbi->num_overlays; i++) { + ovl = ofbi->overlays[i]; + + DBG("apply_changes, fb %d, ovl %d\n", ofbi->id, ovl->id); + + if (ofbi->region.size == 0) { + /* the fb is not available. disable the overlay */ + omapfb_overlay_enable(ovl, 0); + if (!init && ovl->manager) + ovl->manager->apply(ovl->manager); + continue; + } + + if (init || (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { + int rotation = (var->rotate + ofbi->rotation[i]) % 4; + if (rotation == FB_ROTATE_CW || + rotation == FB_ROTATE_CCW) { + outw = var->yres; + outh = var->xres; + } else { + outw = var->xres; + outh = var->yres; + } + } else { + outw = ovl->info.out_width; + outh = ovl->info.out_height; + } + + if (init) { + posx = 0; + posy = 0; + } else { + posx = ovl->info.pos_x; + posy = ovl->info.pos_y; + } + + r = omapfb_setup_overlay(fbi, ovl, posx, posy, outw, outh); + if (r) + goto err; + + if (!init && ovl->manager) + ovl->manager->apply(ovl->manager); + } + return 0; +err: + DBG("apply_changes failed\n"); + return r; +} + +/* checks var and eventually tweaks it to something supported, + * DO NOT MODIFY PAR */ +static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) +{ + int r; + + DBG("check_var(%d)\n", FB2OFB(fbi)->id); + + r = check_fb_var(fbi, var); + + return r; +} + +/* set the video mode according to info->var */ +static int omapfb_set_par(struct fb_info *fbi) +{ + int r; + + DBG("set_par(%d)\n", FB2OFB(fbi)->id); + + set_fb_fix(fbi); + + r = setup_vrfb_rotation(fbi); + if (r) + return r; + + r = omapfb_apply_changes(fbi, 0); + + return r; +} + +static int omapfb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fbi) +{ + struct fb_var_screeninfo new_var; + int r; + + DBG("pan_display(%d)\n", FB2OFB(fbi)->id); + + if (var->xoffset == fbi->var.xoffset && + var->yoffset == fbi->var.yoffset) + return 0; + + new_var = fbi->var; + new_var.xoffset = var->xoffset; + new_var.yoffset = var->yoffset; + + fbi->var = new_var; + + r = omapfb_apply_changes(fbi, 0); + + return r; +} + +static void mmap_user_open(struct vm_area_struct *vma) +{ + struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data; + + atomic_inc(&ofbi->map_count); +} + +static void mmap_user_close(struct vm_area_struct *vma) +{ + struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data; + + atomic_dec(&ofbi->map_count); +} + +static struct vm_operations_struct mmap_user_ops = { + .open = mmap_user_open, + .close = mmap_user_close, +}; + +static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct fb_fix_screeninfo *fix = &fbi->fix; + unsigned long off; + unsigned long start; + u32 len; + + if (vma->vm_end - vma->vm_start == 0) + return 0; + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + off = vma->vm_pgoff << PAGE_SHIFT; + + start = omapfb_get_region_paddr(ofbi); + len = fix->smem_len; + if (off >= len) + return -EINVAL; + if ((vma->vm_end - vma->vm_start + off) > len) + return -EINVAL; + + off += start; + + DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off); + + vma->vm_pgoff = off >> PAGE_SHIFT; + vma->vm_flags |= VM_IO | VM_RESERVED; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + vma->vm_ops = &mmap_user_ops; + vma->vm_private_data = ofbi; + if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return -EAGAIN; + /* vm_ops.open won't be called for mmap itself. */ + atomic_inc(&ofbi->map_count); + return 0; +} + +/* Store a single color palette entry into a pseudo palette or the hardware + * palette if one is available. For now we support only 16bpp and thus store + * the entry only to the pseudo palette. + */ +static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green, + u_int blue, u_int transp, int update_hw_pal) +{ + /*struct omapfb_info *ofbi = FB2OFB(fbi);*/ + /*struct omapfb2_device *fbdev = ofbi->fbdev;*/ + struct fb_var_screeninfo *var = &fbi->var; + int r = 0; + + enum omapfb_color_format mode = OMAPFB_COLOR_RGB24U; /* XXX */ + + /*switch (plane->color_mode) {*/ + switch (mode) { + case OMAPFB_COLOR_YUV422: + case OMAPFB_COLOR_YUV420: + case OMAPFB_COLOR_YUY422: + r = -EINVAL; + break; + case OMAPFB_COLOR_CLUT_8BPP: + case OMAPFB_COLOR_CLUT_4BPP: + case OMAPFB_COLOR_CLUT_2BPP: + case OMAPFB_COLOR_CLUT_1BPP: + /* + if (fbdev->ctrl->setcolreg) + r = fbdev->ctrl->setcolreg(regno, red, green, blue, + transp, update_hw_pal); + */ + /* Fallthrough */ + r = -EINVAL; + break; + case OMAPFB_COLOR_RGB565: + case OMAPFB_COLOR_RGB444: + case OMAPFB_COLOR_RGB24P: + case OMAPFB_COLOR_RGB24U: + if (r != 0) + break; + + if (regno < 0) { + r = -EINVAL; + break; + } + + if (regno < 16) { + u16 pal; + pal = ((red >> (16 - var->red.length)) << + var->red.offset) | + ((green >> (16 - var->green.length)) << + var->green.offset) | + (blue >> (16 - var->blue.length)); + ((u32 *)(fbi->pseudo_palette))[regno] = pal; + } + break; + default: + BUG(); + } + return r; +} + +static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info) +{ + DBG("setcolreg\n"); + + return _setcolreg(info, regno, red, green, blue, transp, 1); +} + +static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info) +{ + int count, index, r; + u16 *red, *green, *blue, *transp; + u16 trans = 0xffff; + + DBG("setcmap\n"); + + red = cmap->red; + green = cmap->green; + blue = cmap->blue; + transp = cmap->transp; + index = cmap->start; + + for (count = 0; count < cmap->len; count++) { + if (transp) + trans = *transp++; + r = _setcolreg(info, index++, *red++, *green++, *blue++, trans, + count == cmap->len - 1); + if (r != 0) + return r; + } + + return 0; +} + +static int omapfb_blank(int blank, struct fb_info *fbi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_dss_device *display = fb2display(fbi); + int do_update = 0; + int r = 0; + + omapfb_lock(fbdev); + + switch (blank) { + case FB_BLANK_UNBLANK: + if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) + goto exit; + + if (display->resume) + r = display->resume(display); + + if (r == 0 && display->get_update_mode && + display->get_update_mode(display) == + OMAP_DSS_UPDATE_MANUAL) + do_update = 1; + + break; + + case FB_BLANK_NORMAL: + /* FB_BLANK_NORMAL could be implemented. + * Needs DSS additions. */ + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_POWERDOWN: + if (display->state != OMAP_DSS_DISPLAY_ACTIVE) + goto exit; + + if (display->suspend) + r = display->suspend(display); + + break; + + default: + r = -EINVAL; + } + +exit: + omapfb_unlock(fbdev); + + if (r == 0 && do_update && display->update) { + u16 w, h; + display->get_resolution(display, &w, &h); + + r = display->update(display, 0, 0, w, h); + } + + return r; +} + +#if 0 +/* XXX fb_read and fb_write are needed for VRFB */ +ssize_t omapfb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + DBG("omapfb_write %d, %lu\n", count, (unsigned long)*ppos); + /* XXX needed for VRFB */ + return count; +} +#endif + +static struct fb_ops omapfb_ops = { + .owner = THIS_MODULE, + .fb_open = omapfb_open, + .fb_release = omapfb_release, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_blank = omapfb_blank, + .fb_ioctl = omapfb_ioctl, + .fb_check_var = omapfb_check_var, + .fb_set_par = omapfb_set_par, + .fb_pan_display = omapfb_pan_display, + .fb_mmap = omapfb_mmap, + .fb_setcolreg = omapfb_setcolreg, + .fb_setcmap = omapfb_setcmap, + /*.fb_write = omapfb_write,*/ +}; + +static void omapfb_free_fbmem(struct fb_info *fbi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omapfb2_mem_region *rg; + + rg = &ofbi->region; + + if (rg->paddr) + if (omap_vram_free(rg->paddr, rg->size)) + dev_err(fbdev->dev, "VRAM FREE failed\n"); + + if (rg->vaddr) + iounmap(rg->vaddr); + + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { + /* unmap the 0 angle rotation */ + if (rg->vrfb.vaddr[0]) { + iounmap(rg->vrfb.vaddr[0]); + omap_vrfb_release_ctx(&rg->vrfb); + } + } + + rg->vaddr = NULL; + rg->paddr = 0; + rg->alloc = 0; + rg->size = 0; +} + +static void clear_fb_info(struct fb_info *fbi) +{ + memset(&fbi->var, 0, sizeof(fbi->var)); + memset(&fbi->fix, 0, sizeof(fbi->fix)); + strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id)); +} + +static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev) +{ + int i; + + DBG("free all fbmem\n"); + + for (i = 0; i < fbdev->num_fbs; i++) { + struct fb_info *fbi = fbdev->fbs[i]; + omapfb_free_fbmem(fbi); + clear_fb_info(fbi); + } + + return 0; +} + +static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, + unsigned long paddr) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omapfb2_mem_region *rg; + void __iomem *vaddr; + int r; + + rg = &ofbi->region; + memset(rg, 0, sizeof(*rg)); + + size = PAGE_ALIGN(size); + + if (!paddr) { + DBG("allocating %lu bytes for fb %d\n", size, ofbi->id); + r = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &paddr); + } else { + DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr, + ofbi->id); + r = omap_vram_reserve(paddr, size); + } + + if (r) { + dev_err(fbdev->dev, "failed to allocate framebuffer\n"); + return -ENOMEM; + } + + if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) { + vaddr = ioremap_wc(paddr, size); + + if (!vaddr) { + dev_err(fbdev->dev, "failed to ioremap framebuffer\n"); + omap_vram_free(paddr, size); + return -ENOMEM; + } + + DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr); + } else { + r = omap_vrfb_request_ctx(&rg->vrfb); + if (r) { + dev_err(fbdev->dev, "vrfb create ctx failed\n"); + return r; + } + + vaddr = NULL; + } + + rg->paddr = paddr; + rg->vaddr = vaddr; + rg->size = size; + rg->alloc = 1; + + return 0; +} + +/* allocate fbmem using display resolution as reference */ +static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size, + unsigned long paddr) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omap_dss_device *display; + int bytespp; + + display = fb2display(fbi); + + if (!display) + return 0; + + switch (display->get_recommended_bpp(display)) { + case 16: + bytespp = 2; + break; + case 24: + bytespp = 4; + break; + default: + bytespp = 4; + break; + } + + if (!size) { + u16 w, h; + + display->get_resolution(display, &w, &h); + + if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) { + size = max(omap_vrfb_min_phys_size(w, h, bytespp), + omap_vrfb_min_phys_size(h, w, bytespp)); + + DBG("adjusting fb mem size for VRFB, %u -> %lu\n", + w * h * bytespp, size); + } else { + size = w * h * bytespp; + } + } + + if (!size) + return 0; + + return omapfb_alloc_fbmem(fbi, size, paddr); +} + +static enum omap_color_mode fb_format_to_dss_mode(enum omapfb_color_format fmt) +{ + enum omap_color_mode mode; + + switch (fmt) { + case OMAPFB_COLOR_RGB565: + mode = OMAP_DSS_COLOR_RGB16; + break; + case OMAPFB_COLOR_YUV422: + mode = OMAP_DSS_COLOR_YUV2; + break; + case OMAPFB_COLOR_CLUT_8BPP: + mode = OMAP_DSS_COLOR_CLUT8; + break; + case OMAPFB_COLOR_CLUT_4BPP: + mode = OMAP_DSS_COLOR_CLUT4; + break; + case OMAPFB_COLOR_CLUT_2BPP: + mode = OMAP_DSS_COLOR_CLUT2; + break; + case OMAPFB_COLOR_CLUT_1BPP: + mode = OMAP_DSS_COLOR_CLUT1; + break; + case OMAPFB_COLOR_RGB444: + mode = OMAP_DSS_COLOR_RGB12U; + break; + case OMAPFB_COLOR_YUY422: + mode = OMAP_DSS_COLOR_UYVY; + break; + case OMAPFB_COLOR_ARGB16: + mode = OMAP_DSS_COLOR_ARGB16; + break; + case OMAPFB_COLOR_RGB24U: + mode = OMAP_DSS_COLOR_RGB24U; + break; + case OMAPFB_COLOR_RGB24P: + mode = OMAP_DSS_COLOR_RGB24P; + break; + case OMAPFB_COLOR_ARGB32: + mode = OMAP_DSS_COLOR_ARGB32; + break; + case OMAPFB_COLOR_RGBA32: + mode = OMAP_DSS_COLOR_RGBA32; + break; + case OMAPFB_COLOR_RGBX32: + mode = OMAP_DSS_COLOR_RGBX32; + break; + default: + mode = -EINVAL; + } + + return mode; +} + +static int omapfb_parse_vram_param(const char *param, int max_entries, + unsigned long *sizes, unsigned long *paddrs) +{ + int fbnum; + unsigned long size; + unsigned long paddr = 0; + char *p, *start; + + start = (char *)param; + + while (1) { + p = start; + + fbnum = simple_strtoul(p, &p, 10); + + if (p == param) + return -EINVAL; + + if (*p != ':') + return -EINVAL; + + if (fbnum >= max_entries) + return -EINVAL; + + size = memparse(p + 1, &p); + + if (!size) + return -EINVAL; + + paddr = 0; + + if (*p == '@') { + paddr = simple_strtoul(p + 1, &p, 16); + + if (!paddr) + return -EINVAL; + + } + + paddrs[fbnum] = paddr; + sizes[fbnum] = size; + + if (*p == 0) + break; + + if (*p != ',') + return -EINVAL; + + ++p; + + start = p; + } + + return 0; +} + +static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev) +{ + int i, r; + unsigned long vram_sizes[10]; + unsigned long vram_paddrs[10]; + + memset(&vram_sizes, 0, sizeof(vram_sizes)); + memset(&vram_paddrs, 0, sizeof(vram_paddrs)); + + if (def_vram && omapfb_parse_vram_param(def_vram, 10, + vram_sizes, vram_paddrs)) { + dev_err(fbdev->dev, "failed to parse vram parameter\n"); + + memset(&vram_sizes, 0, sizeof(vram_sizes)); + memset(&vram_paddrs, 0, sizeof(vram_paddrs)); + } + + if (fbdev->dev->platform_data) { + struct omapfb_platform_data *opd; + opd = fbdev->dev->platform_data; + for (i = 0; i < opd->mem_desc.region_cnt; ++i) { + if (!vram_sizes[i]) { + unsigned long size; + unsigned long paddr; + + size = opd->mem_desc.region[i].size; + paddr = opd->mem_desc.region[i].paddr; + + vram_sizes[i] = size; + vram_paddrs[i] = paddr; + } + } + } + + for (i = 0; i < fbdev->num_fbs; i++) { + /* allocate memory automatically only for fb0, or if + * excplicitly defined with vram or plat data option */ + if (i == 0 || vram_sizes[i] != 0) { + r = omapfb_alloc_fbmem_display(fbdev->fbs[i], + vram_sizes[i], vram_paddrs[i]); + + if (r) + return r; + } + } + + for (i = 0; i < fbdev->num_fbs; i++) { + struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); + struct omapfb2_mem_region *rg; + rg = &ofbi->region; + + DBG("region%d phys %08x virt %p size=%lu\n", + i, + rg->paddr, + rg->vaddr, + rg->size); + } + + return 0; +} + +int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_dss_device *display = fb2display(fbi); + struct omapfb2_mem_region *rg = &ofbi->region; + unsigned long old_size = rg->size; + unsigned long old_paddr = rg->paddr; + int old_type = rg->type; + int r; + + if (type > OMAPFB_MEMTYPE_MAX) + return -EINVAL; + + size = PAGE_ALIGN(size); + + if (old_size == size && old_type == type) + return 0; + + if (display && display->sync) + display->sync(display); + + omapfb_free_fbmem(fbi); + + if (size == 0) { + clear_fb_info(fbi); + return 0; + } + + r = omapfb_alloc_fbmem(fbi, size, 0); + + if (r) { + if (old_size) + omapfb_alloc_fbmem(fbi, old_size, old_paddr); + + if (rg->size == 0) + clear_fb_info(fbi); + + return r; + } + + if (old_size == size) + return 0; + + if (old_size == 0) { + DBG("initializing fb %d\n", ofbi->id); + r = omapfb_fb_init(fbdev, fbi); + if (r) { + DBG("omapfb_fb_init failed\n"); + goto err; + } + r = omapfb_apply_changes(fbi, 1); + if (r) { + DBG("omapfb_apply_changes failed\n"); + goto err; + } + } else { + struct fb_var_screeninfo new_var; + memcpy(&new_var, &fbi->var, sizeof(new_var)); + r = check_fb_var(fbi, &new_var); + if (r) + goto err; + memcpy(&fbi->var, &new_var, sizeof(fbi->var)); + set_fb_fix(fbi); + r = setup_vrfb_rotation(fbi); + if (r) + goto err; + } + + return 0; +err: + omapfb_free_fbmem(fbi); + clear_fb_info(fbi); + return r; +} + +/* initialize fb_info, var, fix to something sane based on the display */ +static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) +{ + struct fb_var_screeninfo *var = &fbi->var; + struct omap_dss_device *display = fb2display(fbi); + struct omapfb_info *ofbi = FB2OFB(fbi); + int r = 0; + + fbi->fbops = &omapfb_ops; + fbi->flags = FBINFO_FLAG_DEFAULT; + fbi->pseudo_palette = fbdev->pseudo_palette; + + if (ofbi->region.size == 0) { + clear_fb_info(fbi); + return 0; + } + + var->nonstd = 0; + var->bits_per_pixel = 0; + + var->rotate = def_rotate; + + /* + * Check if there is a default color format set in the board file, + * and use this format instead the default deducted from the + * display bpp. + */ + if (fbdev->dev->platform_data) { + struct omapfb_platform_data *opd; + int id = ofbi->id; + + opd = fbdev->dev->platform_data; + if (opd->mem_desc.region[id].format_used) { + enum omap_color_mode mode; + enum omapfb_color_format format; + + format = opd->mem_desc.region[id].format; + mode = fb_format_to_dss_mode(format); + if (mode < 0) { + r = mode; + goto err; + } + r = dss_mode_to_fb_mode(mode, var); + if (r < 0) + goto err; + } + } + + if (display) { + u16 w, h; + int rotation = (var->rotate + ofbi->rotation[0]) % 4; + + display->get_resolution(display, &w, &h); + + if (rotation == FB_ROTATE_CW || + rotation == FB_ROTATE_CCW) { + var->xres = h; + var->yres = w; + } else { + var->xres = w; + var->yres = h; + } + + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + + if (!var->bits_per_pixel) { + switch (display->get_recommended_bpp(display)) { + case 16: + var->bits_per_pixel = 16; + break; + case 24: + var->bits_per_pixel = 32; + break; + default: + dev_err(fbdev->dev, "illegal display " + "bpp\n"); + return -EINVAL; + } + } + } else { + /* if there's no display, let's just guess some basic values */ + var->xres = 320; + var->yres = 240; + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + if (!var->bits_per_pixel) + var->bits_per_pixel = 16; + } + + r = check_fb_var(fbi, var); + if (r) + goto err; + + set_fb_fix(fbi); + r = setup_vrfb_rotation(fbi); + if (r) + goto err; + + r = fb_alloc_cmap(&fbi->cmap, 256, 0); + if (r) + dev_err(fbdev->dev, "unable to allocate color map memory\n"); + +err: + return r; +} + +static void fbinfo_cleanup(struct omapfb2_device *fbdev, struct fb_info *fbi) +{ + fb_dealloc_cmap(&fbi->cmap); +} + + +static void omapfb_free_resources(struct omapfb2_device *fbdev) +{ + int i; + + DBG("free_resources\n"); + + if (fbdev == NULL) + return; + + for (i = 0; i < fbdev->num_fbs; i++) + unregister_framebuffer(fbdev->fbs[i]); + + /* free the reserved fbmem */ + omapfb_free_all_fbmem(fbdev); + + for (i = 0; i < fbdev->num_fbs; i++) { + fbinfo_cleanup(fbdev, fbdev->fbs[i]); + framebuffer_release(fbdev->fbs[i]); + } + + for (i = 0; i < fbdev->num_displays; i++) { + if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED) + fbdev->displays[i]->disable(fbdev->displays[i]); + + omap_dss_put_device(fbdev->displays[i]); + } + + dev_set_drvdata(fbdev->dev, NULL); + kfree(fbdev); +} + +static int omapfb_create_framebuffers(struct omapfb2_device *fbdev) +{ + int r, i; + + fbdev->num_fbs = 0; + + DBG("create %d framebuffers\n", CONFIG_FB_OMAP2_NUM_FBS); + + /* allocate fb_infos */ + for (i = 0; i < CONFIG_FB_OMAP2_NUM_FBS; i++) { + struct fb_info *fbi; + struct omapfb_info *ofbi; + + fbi = framebuffer_alloc(sizeof(struct omapfb_info), + fbdev->dev); + + if (fbi == NULL) { + dev_err(fbdev->dev, + "unable to allocate memory for plane info\n"); + return -ENOMEM; + } + + clear_fb_info(fbi); + + fbdev->fbs[i] = fbi; + + ofbi = FB2OFB(fbi); + ofbi->fbdev = fbdev; + ofbi->id = i; + + /* assign these early, so that fb alloc can use them */ + ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB : + OMAP_DSS_ROT_DMA; + ofbi->mirror = def_mirror; + + fbdev->num_fbs++; + } + + DBG("fb_infos allocated\n"); + + /* assign overlays for the fbs */ + for (i = 0; i < min(fbdev->num_fbs, fbdev->num_overlays); i++) { + struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); + + ofbi->overlays[0] = fbdev->overlays[i]; + ofbi->num_overlays = 1; + } + + /* allocate fb memories */ + r = omapfb_allocate_all_fbs(fbdev); + if (r) { + dev_err(fbdev->dev, "failed to allocate fbmem\n"); + return r; + } + + DBG("fbmems allocated\n"); + + /* setup fb_infos */ + for (i = 0; i < fbdev->num_fbs; i++) { + r = omapfb_fb_init(fbdev, fbdev->fbs[i]); + if (r) { + dev_err(fbdev->dev, "failed to setup fb_info\n"); + return r; + } + } + + DBG("fb_infos initialized\n"); + + for (i = 0; i < fbdev->num_fbs; i++) { + r = register_framebuffer(fbdev->fbs[i]); + if (r != 0) { + dev_err(fbdev->dev, + "registering framebuffer %d failed\n", i); + return r; + } + } + + DBG("framebuffers registered\n"); + + for (i = 0; i < fbdev->num_fbs; i++) { + r = omapfb_apply_changes(fbdev->fbs[i], 1); + if (r) { + dev_err(fbdev->dev, "failed to change mode\n"); + return r; + } + } + + DBG("create sysfs for fbs\n"); + r = omapfb_create_sysfs(fbdev); + if (r) { + dev_err(fbdev->dev, "failed to create sysfs entries\n"); + return r; + } + + /* Enable fb0 */ + if (fbdev->num_fbs > 0) { + struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[0]); + + if (ofbi->num_overlays > 0) { + struct omap_overlay *ovl = ofbi->overlays[0]; + + r = omapfb_overlay_enable(ovl, 1); + + if (r) { + dev_err(fbdev->dev, + "failed to enable overlay\n"); + return r; + } + } + } + + DBG("create_framebuffers done\n"); + + return 0; +} + +static int omapfb_mode_to_timings(const char *mode_str, + struct omap_video_timings *timings, u8 *bpp) +{ + struct fb_info fbi; + struct fb_var_screeninfo var; + struct fb_ops fbops; + int r; + +#ifdef CONFIG_OMAP2_DSS_VENC + if (strcmp(mode_str, "pal") == 0) { + *timings = omap_dss_pal_timings; + *bpp = 0; + return 0; + } else if (strcmp(mode_str, "ntsc") == 0) { + *timings = omap_dss_ntsc_timings; + *bpp = 0; + return 0; + } +#endif + + /* this is quite a hack, but I wanted to use the modedb and for + * that we need fb_info and var, so we create dummy ones */ + + memset(&fbi, 0, sizeof(fbi)); + memset(&var, 0, sizeof(var)); + memset(&fbops, 0, sizeof(fbops)); + fbi.fbops = &fbops; + + r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24); + + if (r != 0) { + timings->pixel_clock = PICOS2KHZ(var.pixclock); + timings->hfp = var.left_margin; + timings->hbp = var.right_margin; + timings->vfp = var.upper_margin; + timings->vbp = var.lower_margin; + timings->hsw = var.hsync_len; + timings->vsw = var.vsync_len; + timings->x_res = var.xres; + timings->y_res = var.yres; + + switch (var.bits_per_pixel) { + case 16: + *bpp = 16; + break; + case 24: + case 32: + default: + *bpp = 24; + break; + } + + return 0; + } else { + return -EINVAL; + } +} + +static int omapfb_set_def_mode(struct omap_dss_device *display, char *mode_str) +{ + int r; + u8 bpp; + struct omap_video_timings timings; + + r = omapfb_mode_to_timings(mode_str, &timings, &bpp); + if (r) + return r; + + display->panel.recommended_bpp = bpp; + + if (!display->check_timings || !display->set_timings) + return -EINVAL; + + r = display->check_timings(display, &timings); + if (r) + return r; + + display->set_timings(display, &timings); + + return 0; +} + +static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) +{ + char *str, *options, *this_opt; + int r = 0; + + str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL); + strcpy(str, def_mode); + options = str; + + while (!r && (this_opt = strsep(&options, ",")) != NULL) { + char *p, *display_str, *mode_str; + struct omap_dss_device *display; + int i; + + p = strchr(this_opt, ':'); + if (!p) { + r = -EINVAL; + break; + } + + *p = 0; + display_str = this_opt; + mode_str = p + 1; + + display = NULL; + for (i = 0; i < fbdev->num_displays; ++i) { + if (strcmp(fbdev->displays[i]->name, + display_str) == 0) { + display = fbdev->displays[i]; + break; + } + } + + if (!display) { + r = -EINVAL; + break; + } + + r = omapfb_set_def_mode(display, mode_str); + if (r) + break; + } + + kfree(str); + + return r; +} + +static int omapfb_probe(struct platform_device *pdev) +{ + struct omapfb2_device *fbdev = NULL; + int r = 0; + int i; + struct omap_overlay *ovl; + struct omap_dss_device *def_display; + struct omap_dss_device *dssdev; + + DBG("omapfb_probe\n"); + + if (pdev->num_resources != 0) { + dev_err(&pdev->dev, "probed for an unknown device\n"); + r = -ENODEV; + goto err0; + } + + fbdev = kzalloc(sizeof(struct omapfb2_device), GFP_KERNEL); + if (fbdev == NULL) { + r = -ENOMEM; + goto err0; + } + + mutex_init(&fbdev->mtx); + + fbdev->dev = &pdev->dev; + platform_set_drvdata(pdev, fbdev); + + fbdev->num_displays = 0; + dssdev = NULL; + for_each_dss_dev(dssdev) { + omap_dss_get_device(dssdev); + fbdev->displays[fbdev->num_displays++] = dssdev; + } + + if (fbdev->num_displays == 0) { + dev_err(&pdev->dev, "no displays\n"); + r = -EINVAL; + goto cleanup; + } + + fbdev->num_overlays = omap_dss_get_num_overlays(); + for (i = 0; i < fbdev->num_overlays; i++) + fbdev->overlays[i] = omap_dss_get_overlay(i); + + fbdev->num_managers = omap_dss_get_num_overlay_managers(); + for (i = 0; i < fbdev->num_managers; i++) + fbdev->managers[i] = omap_dss_get_overlay_manager(i); + + if (def_mode && strlen(def_mode) > 0) { + if (omapfb_parse_def_modes(fbdev)) + dev_warn(&pdev->dev, "cannot parse default modes\n"); + } + + r = omapfb_create_framebuffers(fbdev); + if (r) + goto cleanup; + + for (i = 0; i < fbdev->num_managers; i++) { + struct omap_overlay_manager *mgr; + mgr = fbdev->managers[i]; + r = mgr->apply(mgr); + if (r) + dev_warn(fbdev->dev, "failed to apply dispc config\n"); + } + + DBG("mgr->apply'ed\n"); + + /* gfx overlay should be the default one. find a display + * connected to that, and use it as default display */ + ovl = omap_dss_get_overlay(0); + if (ovl->manager && ovl->manager->device) { + def_display = ovl->manager->device; + } else { + dev_warn(&pdev->dev, "cannot find default display\n"); + def_display = NULL; + } + + if (def_display) { +#ifndef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE + u16 w, h; +#endif + r = def_display->enable(def_display); + if (r) + dev_warn(fbdev->dev, "Failed to enable display '%s'\n", + def_display->name); + + /* set the update mode */ + if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { +#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE + if (def_display->enable_te) + def_display->enable_te(def_display, 1); + if (def_display->set_update_mode) + def_display->set_update_mode(def_display, + OMAP_DSS_UPDATE_AUTO); +#else /* MANUAL_UPDATE */ + if (def_display->enable_te) + def_display->enable_te(def_display, 0); + if (def_display->set_update_mode) + def_display->set_update_mode(def_display, + OMAP_DSS_UPDATE_MANUAL); + + def_display->get_resolution(def_display, &w, &h); + def_display->update(def_display, 0, 0, w, h); +#endif + } else { + if (def_display->set_update_mode) + def_display->set_update_mode(def_display, + OMAP_DSS_UPDATE_AUTO); + } + } + + return 0; + +cleanup: + omapfb_free_resources(fbdev); +err0: + dev_err(&pdev->dev, "failed to setup omapfb\n"); + return r; +} + +static int omapfb_remove(struct platform_device *pdev) +{ + struct omapfb2_device *fbdev = platform_get_drvdata(pdev); + + /* FIXME: wait till completion of pending events */ + + omapfb_remove_sysfs(fbdev); + + omapfb_free_resources(fbdev); + + return 0; +} + +static struct platform_driver omapfb_driver = { + .probe = omapfb_probe, + .remove = omapfb_remove, + .driver = { + .name = "omapfb", + .owner = THIS_MODULE, + }, +}; + +static int __init omapfb_init(void) +{ + DBG("omapfb_init\n"); + + if (platform_driver_register(&omapfb_driver)) { + printk(KERN_ERR "failed to register omapfb driver\n"); + return -ENODEV; + } + + return 0; +} + +static void __exit omapfb_exit(void) +{ + DBG("omapfb_exit\n"); + platform_driver_unregister(&omapfb_driver); +} + +module_param_named(mode, def_mode, charp, 0); +module_param_named(vram, def_vram, charp, 0); +module_param_named(rotate, def_rotate, int, 0); +module_param_named(vrfb, def_vrfb, bool, 0); +module_param_named(mirror, def_mirror, bool, 0); + +/* late_initcall to let panel/ctrl drivers loaded first. + * I guess better option would be a more dynamic approach, + * so that omapfb reacts to new panels when they are loaded */ +late_initcall(omapfb_init); +/*module_init(omapfb_init);*/ +module_exit(omapfb_exit); + +MODULE_AUTHOR("Tomi Valkeinen "); +MODULE_DESCRIPTION("OMAP2/3 Framebuffer"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c new file mode 100644 index 000000000000..62bb88f5c192 --- /dev/null +++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c @@ -0,0 +1,507 @@ +/* + * linux/drivers/video/omap2/omapfb-sysfs.c + * + * Copyright (C) 2008 Nokia Corporation + * Author: Tomi Valkeinen + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "omapfb.h" + +static ssize_t show_rotate_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + + return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type); +} + +static ssize_t store_rotate_type(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + enum omap_dss_rotation_type rot_type; + int r; + + rot_type = simple_strtoul(buf, NULL, 0); + + if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) + return -EINVAL; + + lock_fb_info(fbi); + + r = 0; + if (rot_type == ofbi->rotation_type) + goto out; + + if (ofbi->region.size) { + r = -EBUSY; + goto out; + } + + ofbi->rotation_type = rot_type; + + /* + * Since the VRAM for this FB is not allocated at the moment we don't + * need to do any further parameter checking at this point. + */ +out: + unlock_fb_info(fbi); + + return r ? r : count; +} + + +static ssize_t show_mirror(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + + return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror); +} + +static ssize_t store_mirror(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + bool mirror; + int r; + struct fb_var_screeninfo new_var; + + mirror = simple_strtoul(buf, NULL, 0); + + if (mirror != 0 && mirror != 1) + return -EINVAL; + + lock_fb_info(fbi); + + ofbi->mirror = mirror; + + memcpy(&new_var, &fbi->var, sizeof(new_var)); + r = check_fb_var(fbi, &new_var); + if (r) + goto out; + memcpy(&fbi->var, &new_var, sizeof(fbi->var)); + + set_fb_fix(fbi); + + r = omapfb_apply_changes(fbi, 0); + if (r) + goto out; + + r = count; +out: + unlock_fb_info(fbi); + + return r; +} + +static ssize_t show_overlays(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + ssize_t l = 0; + int t; + + omapfb_lock(fbdev); + lock_fb_info(fbi); + + for (t = 0; t < ofbi->num_overlays; t++) { + struct omap_overlay *ovl = ofbi->overlays[t]; + int ovlnum; + + for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum) + if (ovl == fbdev->overlays[ovlnum]) + break; + + l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", + t == 0 ? "" : ",", ovlnum); + } + + l += snprintf(buf + l, PAGE_SIZE - l, "\n"); + + unlock_fb_info(fbi); + omapfb_unlock(fbdev); + + return l; +} + +static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev, + struct omap_overlay *ovl) +{ + int i, t; + + for (i = 0; i < fbdev->num_fbs; i++) { + struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); + + for (t = 0; t < ofbi->num_overlays; t++) { + if (ofbi->overlays[t] == ovl) + return ofbi; + } + } + + return NULL; +} + +static ssize_t store_overlays(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB]; + struct omap_overlay *ovl; + int num_ovls, r, i; + int len; + bool added = false; + + num_ovls = 0; + + len = strlen(buf); + if (buf[len - 1] == '\n') + len = len - 1; + + omapfb_lock(fbdev); + lock_fb_info(fbi); + + if (len > 0) { + char *p = (char *)buf; + int ovlnum; + + while (p < buf + len) { + int found; + if (num_ovls == OMAPFB_MAX_OVL_PER_FB) { + r = -EINVAL; + goto out; + } + + ovlnum = simple_strtoul(p, &p, 0); + if (ovlnum > fbdev->num_overlays) { + r = -EINVAL; + goto out; + } + + found = 0; + for (i = 0; i < num_ovls; ++i) { + if (ovls[i] == fbdev->overlays[ovlnum]) { + found = 1; + break; + } + } + + if (!found) + ovls[num_ovls++] = fbdev->overlays[ovlnum]; + + p++; + } + } + + for (i = 0; i < num_ovls; ++i) { + struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]); + if (ofbi2 && ofbi2 != ofbi) { + dev_err(fbdev->dev, "overlay already in use\n"); + r = -EINVAL; + goto out; + } + } + + /* detach unused overlays */ + for (i = 0; i < ofbi->num_overlays; ++i) { + int t, found; + + ovl = ofbi->overlays[i]; + + found = 0; + + for (t = 0; t < num_ovls; ++t) { + if (ovl == ovls[t]) { + found = 1; + break; + } + } + + if (found) + continue; + + DBG("detaching %d\n", ofbi->overlays[i]->id); + + omapfb_overlay_enable(ovl, 0); + + if (ovl->manager) + ovl->manager->apply(ovl->manager); + + for (t = i + 1; t < ofbi->num_overlays; t++) { + ofbi->rotation[t-1] = ofbi->rotation[t]; + ofbi->overlays[t-1] = ofbi->overlays[t]; + } + + ofbi->num_overlays--; + i--; + } + + for (i = 0; i < num_ovls; ++i) { + int t, found; + + ovl = ovls[i]; + + found = 0; + + for (t = 0; t < ofbi->num_overlays; ++t) { + if (ovl == ofbi->overlays[t]) { + found = 1; + break; + } + } + + if (found) + continue; + ofbi->rotation[ofbi->num_overlays] = 0; + ofbi->overlays[ofbi->num_overlays++] = ovl; + + added = true; + } + + if (added) { + r = omapfb_apply_changes(fbi, 0); + if (r) + goto out; + } + + r = count; +out: + unlock_fb_info(fbi); + omapfb_unlock(fbdev); + + return r; +} + +static ssize_t show_overlays_rotate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + ssize_t l = 0; + int t; + + lock_fb_info(fbi); + + for (t = 0; t < ofbi->num_overlays; t++) { + l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", + t == 0 ? "" : ",", ofbi->rotation[t]); + } + + l += snprintf(buf + l, PAGE_SIZE - l, "\n"); + + unlock_fb_info(fbi); + + return l; +} + +static ssize_t store_overlays_rotate(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + int num_ovls = 0, r, i; + int len; + bool changed = false; + u8 rotation[OMAPFB_MAX_OVL_PER_FB]; + + len = strlen(buf); + if (buf[len - 1] == '\n') + len = len - 1; + + lock_fb_info(fbi); + + if (len > 0) { + char *p = (char *)buf; + + while (p < buf + len) { + int rot; + + if (num_ovls == ofbi->num_overlays) { + r = -EINVAL; + goto out; + } + + rot = simple_strtoul(p, &p, 0); + if (rot < 0 || rot > 3) { + r = -EINVAL; + goto out; + } + + if (ofbi->rotation[num_ovls] != rot) + changed = true; + + rotation[num_ovls++] = rot; + + p++; + } + } + + if (num_ovls != ofbi->num_overlays) { + r = -EINVAL; + goto out; + } + + if (changed) { + for (i = 0; i < num_ovls; ++i) + ofbi->rotation[i] = rotation[i]; + + r = omapfb_apply_changes(fbi, 0); + if (r) + goto out; + + /* FIXME error handling? */ + } + + r = count; +out: + unlock_fb_info(fbi); + + return r; +} + +static ssize_t show_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + + return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region.size); +} + +static ssize_t store_size(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + unsigned long size; + int r; + int i; + + size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0)); + + lock_fb_info(fbi); + + for (i = 0; i < ofbi->num_overlays; i++) { + if (ofbi->overlays[i]->info.enabled) { + r = -EBUSY; + goto out; + } + } + + if (size != ofbi->region.size) { + r = omapfb_realloc_fbmem(fbi, size, ofbi->region.type); + if (r) { + dev_err(dev, "realloc fbmem failed\n"); + goto out; + } + } + + r = count; +out: + unlock_fb_info(fbi); + + return r; +} + +static ssize_t show_phys(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + + return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region.paddr); +} + +static ssize_t show_virt(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + struct omapfb_info *ofbi = FB2OFB(fbi); + + return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region.vaddr); +} + +static struct device_attribute omapfb_attrs[] = { + __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, + store_rotate_type), + __ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror), + __ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size), + __ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays), + __ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate, + store_overlays_rotate), + __ATTR(phys_addr, S_IRUGO, show_phys, NULL), + __ATTR(virt_addr, S_IRUGO, show_virt, NULL), +}; + +int omapfb_create_sysfs(struct omapfb2_device *fbdev) +{ + int i; + int r; + + DBG("create sysfs for fbs\n"); + for (i = 0; i < fbdev->num_fbs; i++) { + int t; + for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) { + r = device_create_file(fbdev->fbs[i]->dev, + &omapfb_attrs[t]); + + if (r) { + dev_err(fbdev->dev, "failed to create sysfs " + "file\n"); + return r; + } + } + } + + return 0; +} + +void omapfb_remove_sysfs(struct omapfb2_device *fbdev) +{ + int i, t; + + DBG("remove sysfs for fbs\n"); + for (i = 0; i < fbdev->num_fbs; i++) { + for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) + device_remove_file(fbdev->fbs[i]->dev, + &omapfb_attrs[t]); + } +} + diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h new file mode 100644 index 000000000000..f7c9c739e5ef --- /dev/null +++ b/drivers/video/omap2/omapfb/omapfb.h @@ -0,0 +1,146 @@ +/* + * linux/drivers/video/omap2/omapfb.h + * + * Copyright (C) 2008 Nokia Corporation + * Author: Tomi Valkeinen + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __DRIVERS_VIDEO_OMAP2_OMAPFB_H__ +#define __DRIVERS_VIDEO_OMAP2_OMAPFB_H__ + +#ifdef CONFIG_FB_OMAP2_DEBUG_SUPPORT +#define DEBUG +#endif + +#include + +#ifdef DEBUG +extern unsigned int omapfb_debug; +#define DBG(format, ...) \ + if (omapfb_debug) \ + printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__) +#else +#define DBG(format, ...) +#endif + +#define FB2OFB(fb_info) ((struct omapfb_info *)(fb_info->par)) + +/* max number of overlays to which a framebuffer data can be direct */ +#define OMAPFB_MAX_OVL_PER_FB 3 + +struct omapfb2_mem_region { + u32 paddr; + void __iomem *vaddr; + struct vrfb vrfb; + unsigned long size; + u8 type; /* OMAPFB_PLANE_MEM_* */ + bool alloc; /* allocated by the driver */ + bool map; /* kernel mapped by the driver */ +}; + +/* appended to fb_info */ +struct omapfb_info { + int id; + struct omapfb2_mem_region region; + atomic_t map_count; + int num_overlays; + struct omap_overlay *overlays[OMAPFB_MAX_OVL_PER_FB]; + struct omapfb2_device *fbdev; + enum omap_dss_rotation_type rotation_type; + u8 rotation[OMAPFB_MAX_OVL_PER_FB]; + bool mirror; +}; + +struct omapfb2_device { + struct device *dev; + struct mutex mtx; + + u32 pseudo_palette[17]; + + int state; + + unsigned num_fbs; + struct fb_info *fbs[10]; + + unsigned num_displays; + struct omap_dss_device *displays[10]; + unsigned num_overlays; + struct omap_overlay *overlays[10]; + unsigned num_managers; + struct omap_overlay_manager *managers[10]; +}; + +struct omapfb_colormode { + enum omap_color_mode dssmode; + u32 bits_per_pixel; + u32 nonstd; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; +}; + +void set_fb_fix(struct fb_info *fbi); +int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var); +int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type); +int omapfb_apply_changes(struct fb_info *fbi, int init); + +int omapfb_create_sysfs(struct omapfb2_device *fbdev); +void omapfb_remove_sysfs(struct omapfb2_device *fbdev); + +int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg); + +int dss_mode_to_fb_mode(enum omap_color_mode dssmode, + struct fb_var_screeninfo *var); + +/* find the display connected to this fb, if any */ +static inline struct omap_dss_device *fb2display(struct fb_info *fbi) +{ + struct omapfb_info *ofbi = FB2OFB(fbi); + int i; + + /* XXX: returns the display connected to first attached overlay */ + for (i = 0; i < ofbi->num_overlays; i++) { + if (ofbi->overlays[i]->manager) + return ofbi->overlays[i]->manager->device; + } + + return NULL; +} + +static inline void omapfb_lock(struct omapfb2_device *fbdev) +{ + mutex_lock(&fbdev->mtx); +} + +static inline void omapfb_unlock(struct omapfb2_device *fbdev) +{ + mutex_unlock(&fbdev->mtx); +} + +static inline int omapfb_overlay_enable(struct omap_overlay *ovl, + int enable) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + info.enabled = enable; + return ovl->set_overlay_info(ovl, &info); +} + +#endif diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h index a8efa92c6c35..f46c40ac6d45 100644 --- a/include/linux/omapfb.h +++ b/include/linux/omapfb.h @@ -24,6 +24,7 @@ #ifndef __LINUX_OMAPFB_H__ #define __LINUX_OMAPFB_H__ +#include #include #include @@ -50,6 +51,12 @@ #define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window) #define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info) #define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info) +#define OMAPFB_WAITFORVSYNC OMAP_IO(57) +#define OMAPFB_MEMORY_READ OMAP_IOR(58, struct omapfb_memory_read) +#define OMAPFB_GET_OVERLAY_COLORMODE OMAP_IOR(59, struct omapfb_ovl_colormode) +#define OMAPFB_WAITFORGO OMAP_IO(60) +#define OMAPFB_GET_VRAM_INFO OMAP_IOR(61, struct omapfb_vram_info) +#define OMAPFB_SET_TEARSYNC OMAP_IOW(62, struct omapfb_tearsync_info) #define OMAPFB_CAPS_GENERIC_MASK 0x00000fff #define OMAPFB_CAPS_LCDC_MASK 0x00fff000 @@ -87,6 +94,13 @@ enum omapfb_color_format { OMAPFB_COLOR_CLUT_1BPP, OMAPFB_COLOR_RGB444, OMAPFB_COLOR_YUY422, + + OMAPFB_COLOR_ARGB16, + OMAPFB_COLOR_RGB24U, /* RGB24, 32-bit container */ + OMAPFB_COLOR_RGB24P, /* RGB24, 24-bit container */ + OMAPFB_COLOR_ARGB32, + OMAPFB_COLOR_RGBA32, + OMAPFB_COLOR_RGBX32, }; struct omapfb_update_window { @@ -158,6 +172,40 @@ enum omapfb_update_mode { OMAPFB_MANUAL_UPDATE }; +struct omapfb_memory_read { + __u16 x; + __u16 y; + __u16 w; + __u16 h; + size_t buffer_size; + void __user *buffer; +}; + +struct omapfb_ovl_colormode { + __u8 overlay_idx; + __u8 mode_idx; + __u32 bits_per_pixel; + __u32 nonstd; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; +}; + +struct omapfb_vram_info { + __u32 total; + __u32 free; + __u32 largest_free_block; + __u32 reserved[5]; +}; + +struct omapfb_tearsync_info { + __u8 enabled; + __u8 reserved1[3]; + __u16 line; + __u16 reserved2; +}; + #ifdef __KERNEL__ #include @@ -173,6 +221,11 @@ struct omapfb_mem_region { void __iomem *vaddr; unsigned long size; u8 type; /* OMAPFB_PLANE_MEM_* */ + enum omapfb_color_format format;/* OMAPFB_COLOR_* */ + unsigned format_used:1; /* Must be set when format is set. + * Needed b/c of the badly chosen 0 + * base for OMAPFB_COLOR_* values + */ unsigned alloc:1; /* allocated by the driver */ unsigned map:1; /* kernel mapped by the driver */ }; @@ -189,6 +242,7 @@ struct omapfb_platform_data { }; /* in arch/arm/plat-omap/fb.c */ +extern void omapfb_set_platform_data(struct omapfb_platform_data *data); extern void omapfb_set_ctrl_platform_data(void *pdata); extern void omapfb_reserve_sdram(void); -- cgit v1.2.3 From a63ce5b306855bccdacba95c03bfc293316c8ae3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 7 Dec 2009 09:11:39 -0500 Subject: tracing: Buffer the output of seq_file in case of filled buffer If the seq_read fills the buffer it will call s_start again on the next itertation with the same position. This causes a problem with the function_graph tracer because it consumes the iteration in order to determine leaf functions. What happens is that the iterator stores the entry, and the function graph plugin will look at the next entry. If that next entry is a return of the same function and task, then the function is a leaf and the function_graph plugin calls ring_buffer_read which moves the ring buffer iterator forward (the trace iterator still points to the function start entry). The copying of the trace_seq to the seq_file buffer will fail if the seq_file buffer is full. The seq_read will not show this entry. The next read by userspace will cause seq_read to again call s_start which will reuse the trace iterator entry (the function start entry). But the function return entry was already consumed. The function graph plugin will think that this entry is a nested function and not a leaf. To solve this, the trace code now checks the return status of the seq_printf (trace_print_seq). If the writing to the seq_file buffer fails, we set a flag in the iterator (leftover) and we do not reset the trace_seq buffer. On the next call to s_start, we check the leftover flag, and if it is set, we just reuse the trace_seq buffer and do not call into the plugin print functions. Before this patch: 2) | fput() { 2) | __fput() { 2) 0.550 us | inotify_inode_queue_event(); 2) | __fsnotify_parent() { 2) 0.540 us | inotify_dentry_parent_queue_event(); After the patch: 2) | fput() { 2) | __fput() { 2) 0.550 us | inotify_inode_queue_event(); 2) 0.548 us | __fsnotify_parent(); 2) 0.540 us | inotify_dentry_parent_queue_event(); [ Updated the patch to fix a missing return 0 from the trace_print_seq() stub when CONFIG_TRACING is disabled. Reported-by: Ingo Molnar ] Reported-by: Jiri Olsa Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt --- include/linux/ftrace_event.h | 1 + include/linux/trace_seq.h | 5 +++-- kernel/trace/trace.c | 35 ++++++++++++++++++++++++++++++++--- kernel/trace/trace_output.c | 14 +++++++++++--- 4 files changed, 47 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 47bbdf9c38d0..38f8d6553831 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -57,6 +57,7 @@ struct trace_iterator { /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; + int leftover; int cpu; u64 ts; diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 09077f6ed128..fad6857bc0f3 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -33,7 +33,7 @@ extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) __attribute__ ((format (printf, 2, 0))); extern int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); -extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); +extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt); extern int trace_seq_puts(struct trace_seq *s, const char *str); @@ -55,8 +55,9 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) return 0; } -static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) +static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) { + return 0; } static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dc937e1baa91..484114d70743 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1516,6 +1516,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) int i = (int)*pos; void *ent; + WARN_ON_ONCE(iter->leftover); + (*pos)++; /* can't go backwards */ @@ -1614,8 +1616,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) ; } else { - l = *pos - 1; - p = s_next(m, p, &l); + /* + * If we overflowed the seq_file before, then we want + * to just reuse the trace_seq buffer again. + */ + if (iter->leftover) + p = iter; + else { + l = *pos - 1; + p = s_next(m, p, &l); + } } trace_event_read_lock(); @@ -1923,6 +1933,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; + int ret; if (iter->ent == NULL) { if (iter->tr) { @@ -1942,9 +1953,27 @@ static int s_show(struct seq_file *m, void *v) if (!(trace_flags & TRACE_ITER_VERBOSE)) print_func_help_header(m); } + } else if (iter->leftover) { + /* + * If we filled the seq_file buffer earlier, we + * want to just show it now. + */ + ret = trace_print_seq(m, &iter->seq); + + /* ret should this time be zero, but you never know */ + iter->leftover = ret; + } else { print_trace_line(iter); - trace_print_seq(m, &iter->seq); + ret = trace_print_seq(m, &iter->seq); + /* + * If we overflow the seq_file buffer, then it will + * ask us for this data again at start up. + * Use that instead. + * ret is 0 if seq_file write succeeded. + * -1 otherwise. + */ + iter->leftover = ret; } return 0; diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index b6c12c6a1bcd..e5cf90fef34e 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -23,13 +23,21 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static int next_event_type = __TRACE_LAST_TYPE + 1; -void trace_print_seq(struct seq_file *m, struct trace_seq *s) +int trace_print_seq(struct seq_file *m, struct trace_seq *s) { int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; + int ret; + + ret = seq_write(m, s->buffer, len); - seq_write(m, s->buffer, len); + /* + * Only reset this buffer if we successfully wrote to the + * seq_file buffer. + */ + if (!ret) + trace_seq_init(s); - trace_seq_init(s); + return ret; } enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) -- cgit v1.2.3 From d184b31c0e403580aafb3f8955ecc185a3d04801 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 25 Nov 2009 16:10:14 +0100 Subject: tracing: Add full state to trace_seq The trace_seq buffer might fill up, and right now one needs to check the return value of each printf into the buffer to check for that. Instead, have the buffer keep track of whether it is full or not, and reject more input if it is full or would have overflowed with an input that wasn't added. Cc: Lai Jiangshan Signed-off-by: Johannes Berg Signed-off-by: Steven Rostedt --- include/linux/trace_seq.h | 2 ++ kernel/trace/trace_output.c | 61 +++++++++++++++++++++++++++++++++++++-------- 2 files changed, 52 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index fad6857bc0f3..5cf397ceb726 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -14,6 +14,7 @@ struct trace_seq { unsigned char buffer[PAGE_SIZE]; unsigned int len; unsigned int readpos; + int full; }; static inline void @@ -21,6 +22,7 @@ trace_seq_init(struct trace_seq *s) { s->len = 0; s->readpos = 0; + s->full = 0; } /* diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index e5cf90fef34e..8e46b3323cdc 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -93,7 +93,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) va_list ap; int ret; - if (!len) + if (s->full || !len) return 0; va_start(ap, fmt); @@ -101,8 +101,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) va_end(ap); /* If we can't write it all, don't bother writing anything */ - if (ret >= len) + if (ret >= len) { + s->full = 1; return 0; + } s->len += ret; @@ -127,14 +129,16 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) int len = (PAGE_SIZE - 1) - s->len; int ret; - if (!len) + if (s->full || !len) return 0; ret = vsnprintf(s->buffer + s->len, len, fmt, args); /* If we can't write it all, don't bother writing anything */ - if (ret >= len) + if (ret >= len) { + s->full = 1; return 0; + } s->len += ret; @@ -147,14 +151,16 @@ int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) int len = (PAGE_SIZE - 1) - s->len; int ret; - if (!len) + if (s->full || !len) return 0; ret = bstr_printf(s->buffer + s->len, len, fmt, binary); /* If we can't write it all, don't bother writing anything */ - if (ret >= len) + if (ret >= len) { + s->full = 1; return 0; + } s->len += ret; @@ -175,9 +181,14 @@ int trace_seq_puts(struct trace_seq *s, const char *str) { int len = strlen(str); - if (len > ((PAGE_SIZE - 1) - s->len)) + if (s->full) return 0; + if (len > ((PAGE_SIZE - 1) - s->len)) { + s->full = 1; + return 0; + } + memcpy(s->buffer + s->len, str, len); s->len += len; @@ -186,9 +197,14 @@ int trace_seq_puts(struct trace_seq *s, const char *str) int trace_seq_putc(struct trace_seq *s, unsigned char c) { - if (s->len >= (PAGE_SIZE - 1)) + if (s->full) return 0; + if (s->len >= (PAGE_SIZE - 1)) { + s->full = 1; + return 0; + } + s->buffer[s->len++] = c; return 1; @@ -196,9 +212,14 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c) int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) { - if (len > ((PAGE_SIZE - 1) - s->len)) + if (s->full) return 0; + if (len > ((PAGE_SIZE - 1) - s->len)) { + s->full = 1; + return 0; + } + memcpy(s->buffer + s->len, mem, len); s->len += len; @@ -211,6 +232,9 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len) const unsigned char *data = mem; int i, j; + if (s->full) + return 0; + #ifdef __BIG_ENDIAN for (i = 0, j = 0; i < len; i++) { #else @@ -228,8 +252,13 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len) { void *ret; - if (len > ((PAGE_SIZE - 1) - s->len)) + if (s->full) + return 0; + + if (len > ((PAGE_SIZE - 1) - s->len)) { + s->full = 1; return NULL; + } ret = s->buffer + s->len; s->len += len; @@ -241,8 +270,14 @@ int trace_seq_path(struct trace_seq *s, struct path *path) { unsigned char *p; - if (s->len >= (PAGE_SIZE - 1)) + if (s->full) return 0; + + if (s->len >= (PAGE_SIZE - 1)) { + s->full = 1; + return 0; + } + p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); if (!IS_ERR(p)) { p = mangle_path(s->buffer + s->len, p, "\n"); @@ -255,6 +290,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path) return 1; } + s->full = 1; return 0; } @@ -381,6 +417,9 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, unsigned long vmstart = 0; int ret = 1; + if (s->full) + return 0; + if (mm) { const struct vm_area_struct *vma; -- cgit v1.2.3 From 876fba43cc810e3c37ce26995933f9547b83cb0e Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Wed, 11 Nov 2009 15:22:15 +0100 Subject: ACPI: add const to acpi_check_resource_conflict() acpi_check_resource_conflict() doesn't change the resource it operates on, so the res parameter can be marked const. Signed-off-by: Jean Delvare Signed-off-by: Len Brown --- drivers/acpi/osl.c | 2 +- include/linux/acpi.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 7c1c59ea9ec6..02e8464e480f 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1118,7 +1118,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); /* Check for resource conflicts between ACPI OperationRegions and native * drivers */ -int acpi_check_resource_conflict(struct resource *res) +int acpi_check_resource_conflict(const struct resource *res) { struct acpi_res_list *res_list_elem; int ioport; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index dfcd920c3e54..c920d2def4d3 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -240,7 +240,7 @@ extern int pnpacpi_disabled; #define PXM_INVAL (-1) #define NID_INVAL (-1) -int acpi_check_resource_conflict(struct resource *res); +int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name); -- cgit v1.2.3 From 55464d461bdcffc4422aebfb750eacf99e3c0f27 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 9 Dec 2009 14:20:04 -0800 Subject: IB: Clarify the documentation of ib_post_send() Clarify the behavior of ib_post_send() when a list of work requests is passed in and an immediate error is returned. Signed-off-by: Bart Van Assche Signed-off-by: Roland Dreier --- include/rdma/ib_verbs.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c179318edd92..09509edb1c5f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1425,6 +1425,11 @@ int ib_destroy_qp(struct ib_qp *qp); * @send_wr: A list of work requests to post on the send queue. * @bad_send_wr: On an immediate failure, this parameter will reference * the work request that failed to be posted on the QP. + * + * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate + * error is returned, the QP state shall not be affected, + * ib_post_send() will return an immediate error after queueing any + * earlier work requests in the list. */ static inline int ib_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr, -- cgit v1.2.3 From a2e68e92d384d37c8cc6bb7206d43b1eb9bc3f08 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Mon, 7 Dec 2009 15:52:56 +0100 Subject: drm: Add search/get functions to get a block in a specific range These are required for changes to TTM. Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_mm.h | 34 +++++++++++++++++++ 2 files changed, 122 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 1f0d717dbad6..a5c2773ccf27 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, } EXPORT_SYMBOL(drm_mm_get_block_generic); +struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, + unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end, + int atomic) +{ + struct drm_mm_node *align_splitoff = NULL; + unsigned tmp = 0; + unsigned wasted = 0; + + if (node->start < start) + wasted += start - node->start; + if (alignment) + tmp = ((node->start + wasted) % alignment); + + if (tmp) + wasted += alignment - tmp; + if (wasted) { + align_splitoff = drm_mm_split_at_start(node, wasted, atomic); + if (unlikely(align_splitoff == NULL)) + return NULL; + } + + if (node->size == size) { + list_del_init(&node->fl_entry); + node->free = 0; + } else { + node = drm_mm_split_at_start(node, size, atomic); + } + + if (align_splitoff) + drm_mm_put_block(align_splitoff); + + return node; +} +EXPORT_SYMBOL(drm_mm_get_block_range_generic); + /* * Put a block. Merge with the previous and / or next block if they are free. * Otherwise add to the free stack. @@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, } EXPORT_SYMBOL(drm_mm_search_free); +struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end, + int best_match) +{ + struct list_head *list; + const struct list_head *free_stack = &mm->fl_entry; + struct drm_mm_node *entry; + struct drm_mm_node *best; + unsigned long best_size; + unsigned wasted; + + best = NULL; + best_size = ~0UL; + + list_for_each(list, free_stack) { + entry = list_entry(list, struct drm_mm_node, fl_entry); + wasted = 0; + + if (entry->size < size) + continue; + + if (entry->start > end || (entry->start+entry->size) < start) + continue; + + if (entry->start < start) + wasted += start - entry->start; + + if (alignment) { + register unsigned tmp = (entry->start + wasted) % alignment; + if (tmp) + wasted += alignment - tmp; + } + + if (entry->size >= size + wasted) { + if (!best_match) + return entry; + if (size < best_size) { + best = entry; + best_size = entry->size; + } + } + } + + return best; +} +EXPORT_SYMBOL(drm_mm_search_free_in_range); + int drm_mm_clean(struct drm_mm * mm) { struct list_head *head = &mm->ml_entry; diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 62329f9a42cb..b40b2f062039 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, unsigned long size, unsigned alignment, int atomic); +extern struct drm_mm_node *drm_mm_get_block_range_generic( + struct drm_mm_node *node, + unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end, + int atomic); static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, unsigned long size, unsigned alignment) @@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa { return drm_mm_get_block_generic(parent, size, alignment, 1); } +static inline struct drm_mm_node *drm_mm_get_block_range( + struct drm_mm_node *parent, + unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end) +{ + return drm_mm_get_block_range_generic(parent, size, alignment, + start, end, 0); +} +static inline struct drm_mm_node *drm_mm_get_block_atomic_range( + struct drm_mm_node *parent, + unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end) +{ + return drm_mm_get_block_range_generic(parent, size, alignment, + start, end, 1); +} extern void drm_mm_put_block(struct drm_mm_node *cur); extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match); +extern struct drm_mm_node *drm_mm_search_free_in_range( + const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end, + int best_match); extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); extern void drm_mm_takedown(struct drm_mm *mm); -- cgit v1.2.3 From ca262a9998d46196750bb19a9dc4bd465b170ff7 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Tue, 8 Dec 2009 15:33:32 +0100 Subject: drm/ttm: Rework validation & memory space allocation (V3) This change allow driver to pass sorted memory placement, from most prefered placement to least prefered placement. In order to avoid long function prototype a structure is used to gather memory placement informations such as range restriction (if you need a buffer to be in given range). Range restriction is determined by fpfn & lpfn which are the first page and last page number btw which allocation can happen. If those fields are set to 0 ttm will assume buffer can be put anywhere in the address space (thus it avoids putting a burden on the driver to always properly set those fields). This patch also factor few functions like evicting first entry of lru list or getting a memory space. This avoid code duplication. V2: Change API to use placement flags and array instead of packing placement order into a quadword. V3: Make sure we set the appropriate mem.placement flag when validating or allocation memory space. [Pending Thomas Hellstrom further review but okay from preliminary review so far]. Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 463 +++++++++++++++++++--------------------- include/drm/ttm/ttm_bo_api.h | 42 ++-- include/drm/ttm/ttm_bo_driver.h | 20 +- 3 files changed, 256 insertions(+), 269 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e13fd23f3334..60d8179a8bcd 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -27,6 +27,14 @@ /* * Authors: Thomas Hellstrom */ +/* Notes: + * + * We store bo pointer in drm_mm_node struct so we know which bo own a + * specific node. There is no protection on the pointer, thus to make + * sure things don't go berserk you have to access this pointer while + * holding the global lru lock and make sure anytime you free a node you + * reset the pointer to NULL. + */ #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" @@ -247,7 +255,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve); /* * Call bo->mutex locked. */ - static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; @@ -329,14 +336,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } if (bo->mem.mem_type == TTM_PL_SYSTEM) { - - struct ttm_mem_reg *old_mem = &bo->mem; - uint32_t save_flags = old_mem->placement; - - *old_mem = *mem; + bo->mem = *mem; mem->mm_node = NULL; - ttm_flag_masked(&save_flags, mem->placement, - TTM_PL_MASK_MEMTYPE); goto moved; } @@ -419,6 +420,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) kref_put(&bo->list_kref, ttm_bo_ref_bug); } if (bo->mem.mm_node) { + bo->mem.mm_node->private = NULL; drm_mm_put_block(bo->mem.mm_node); bo->mem.mm_node = NULL; } @@ -555,17 +557,14 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) } EXPORT_SYMBOL(ttm_bo_unref); -static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, - bool interruptible, bool no_wait) +static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, + bool no_wait) { - int ret = 0; struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; struct ttm_mem_reg evict_mem; - uint32_t proposed_placement; - - if (bo->mem.mem_type != mem_type) - goto out; + struct ttm_placement placement; + int ret = 0; spin_lock(&bo->lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait); @@ -585,14 +584,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, evict_mem = bo->mem; evict_mem.mm_node = NULL; - proposed_placement = bdev->driver->evict_flags(bo); - - ret = ttm_bo_mem_space(bo, proposed_placement, - &evict_mem, interruptible, no_wait); - if (unlikely(ret != 0 && ret != -ERESTART)) - ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, - &evict_mem, interruptible, no_wait); - + bdev->driver->evict_flags(bo, &placement); + ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, + no_wait); if (ret) { if (ret != -ERESTART) printk(KERN_ERR TTM_PFX @@ -606,95 +600,117 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, if (ret) { if (ret != -ERESTART) printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); + spin_lock(&glob->lru_lock); + if (evict_mem.mm_node) { + evict_mem.mm_node->private = NULL; + drm_mm_put_block(evict_mem.mm_node); + evict_mem.mm_node = NULL; + } + spin_unlock(&glob->lru_lock); goto out; } + bo->evicted = true; +out: + return ret; +} + +static int ttm_mem_evict_first(struct ttm_bo_device *bdev, + uint32_t mem_type, + bool interruptible, bool no_wait) +{ + struct ttm_bo_global *glob = bdev->glob; + struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct ttm_buffer_object *bo; + int ret, put_count = 0; spin_lock(&glob->lru_lock); - if (evict_mem.mm_node) { - drm_mm_put_block(evict_mem.mm_node); - evict_mem.mm_node = NULL; - } + bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); + kref_get(&bo->list_kref); + ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0); + if (likely(ret == 0)) + put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - bo->evicted = true; -out: + if (unlikely(ret != 0)) + return ret; + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_ref_bug); + ret = ttm_bo_evict(bo, interruptible, no_wait); + ttm_bo_unreserve(bo); + kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } +static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, + struct ttm_mem_type_manager *man, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + struct drm_mm_node **node) +{ + struct ttm_bo_global *glob = bo->glob; + unsigned long lpfn; + int ret; + + lpfn = placement->lpfn; + if (!lpfn) + lpfn = man->size; + *node = NULL; + do { + ret = drm_mm_pre_get(&man->manager); + if (unlikely(ret)) + return ret; + + spin_lock(&glob->lru_lock); + *node = drm_mm_search_free_in_range(&man->manager, + mem->num_pages, mem->page_alignment, + placement->fpfn, lpfn, 1); + if (unlikely(*node == NULL)) { + spin_unlock(&glob->lru_lock); + return 0; + } + *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, + mem->page_alignment, + placement->fpfn, + lpfn); + spin_unlock(&glob->lru_lock); + } while (*node == NULL); + return 0; +} + /** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ -static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, - uint32_t mem_type, - bool interruptible, bool no_wait) +static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, + uint32_t mem_type, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + bool interruptible, bool no_wait) { + struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bdev->glob; - struct drm_mm_node *node; - struct ttm_buffer_object *entry; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct list_head *lru; - unsigned long num_pages = mem->num_pages; - int put_count = 0; + struct drm_mm_node *node; int ret; -retry_pre_get: - ret = drm_mm_pre_get(&man->manager); - if (unlikely(ret != 0)) - return ret; - - spin_lock(&glob->lru_lock); do { - node = drm_mm_search_free(&man->manager, num_pages, - mem->page_alignment, 1); + ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); + if (unlikely(ret != 0)) + return ret; if (node) break; - - lru = &man->lru; - if (list_empty(lru)) + spin_lock(&glob->lru_lock); + if (list_empty(&man->lru)) { + spin_unlock(&glob->lru_lock); break; - - entry = list_first_entry(lru, struct ttm_buffer_object, lru); - kref_get(&entry->list_kref); - - ret = - ttm_bo_reserve_locked(entry, interruptible, no_wait, - false, 0); - - if (likely(ret == 0)) - put_count = ttm_bo_del_from_lru(entry); - + } spin_unlock(&glob->lru_lock); - + ret = ttm_mem_evict_first(bdev, mem_type, interruptible, + no_wait); if (unlikely(ret != 0)) return ret; - - while (put_count--) - kref_put(&entry->list_kref, ttm_bo_ref_bug); - - ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); - - ttm_bo_unreserve(entry); - - kref_put(&entry->list_kref, ttm_bo_release_list); - if (ret) - return ret; - - spin_lock(&glob->lru_lock); } while (1); - - if (!node) { - spin_unlock(&glob->lru_lock); + if (node == NULL) return -ENOMEM; - } - - node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); - if (unlikely(!node)) { - spin_unlock(&glob->lru_lock); - goto retry_pre_get; - } - - spin_unlock(&glob->lru_lock); mem->mm_node = node; mem->mem_type = mem_type; return 0; @@ -725,7 +741,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, return result; } - static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, bool disallow_fixed, uint32_t mem_type, @@ -749,6 +764,18 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, return true; } +static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) +{ + int i; + + for (i = 0; i <= TTM_PL_PRIV5; i++) + if (flags & (1 << i)) { + *mem_type = i; + return 0; + } + return -EINVAL; +} + /** * Creates space for memory region @mem according to its type. * @@ -758,66 +785,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, * space. */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, - uint32_t proposed_placement, - struct ttm_mem_reg *mem, - bool interruptible, bool no_wait) + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + bool interruptible, bool no_wait) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bo->glob; struct ttm_mem_type_manager *man; - - uint32_t num_prios = bdev->driver->num_mem_type_prio; - const uint32_t *prios = bdev->driver->mem_type_prio; - uint32_t i; uint32_t mem_type = TTM_PL_SYSTEM; uint32_t cur_flags = 0; bool type_found = false; bool type_ok = false; bool has_eagain = false; struct drm_mm_node *node = NULL; - int ret; + int i, ret; mem->mm_node = NULL; - for (i = 0; i < num_prios; ++i) { - mem_type = prios[i]; + for (i = 0; i <= placement->num_placement; ++i) { + ret = ttm_mem_type_from_flags(placement->placement[i], + &mem_type); + if (ret) + return ret; man = &bdev->man[mem_type]; type_ok = ttm_bo_mt_compatible(man, - bo->type == ttm_bo_type_user, - mem_type, proposed_placement, - &cur_flags); + bo->type == ttm_bo_type_user, + mem_type, + placement->placement[i], + &cur_flags); if (!type_ok) continue; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); + /* + * Use the access and other non-mapping-related flag bits from + * the memory placement flags to the current flags + */ + ttm_flag_masked(&cur_flags, placement->placement[i], + ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) break; if (man->has_type && man->use_type) { type_found = true; - do { - ret = drm_mm_pre_get(&man->manager); - if (unlikely(ret)) - return ret; - - spin_lock(&glob->lru_lock); - node = drm_mm_search_free(&man->manager, - mem->num_pages, - mem->page_alignment, - 1); - if (unlikely(!node)) { - spin_unlock(&glob->lru_lock); - break; - } - node = drm_mm_get_block_atomic(node, - mem->num_pages, - mem-> - page_alignment); - spin_unlock(&glob->lru_lock); - } while (!node); + ret = ttm_bo_man_get_node(bo, man, placement, mem, + &node); + if (unlikely(ret)) + return ret; } if (node) break; @@ -827,43 +843,48 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, mem->mm_node = node; mem->mem_type = mem_type; mem->placement = cur_flags; + if (node) + node->private = bo; return 0; } if (!type_found) return -EINVAL; - num_prios = bdev->driver->num_mem_busy_prio; - prios = bdev->driver->mem_busy_prio; - - for (i = 0; i < num_prios; ++i) { - mem_type = prios[i]; + for (i = 0; i <= placement->num_busy_placement; ++i) { + ret = ttm_mem_type_from_flags(placement->placement[i], + &mem_type); + if (ret) + return ret; man = &bdev->man[mem_type]; - if (!man->has_type) continue; - if (!ttm_bo_mt_compatible(man, - bo->type == ttm_bo_type_user, - mem_type, - proposed_placement, &cur_flags)) + bo->type == ttm_bo_type_user, + mem_type, + placement->placement[i], + &cur_flags)) continue; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); + /* + * Use the access and other non-mapping-related flag bits from + * the memory placement flags to the current flags + */ + ttm_flag_masked(&cur_flags, placement->placement[i], + ~TTM_PL_MASK_MEMTYPE); - ret = ttm_bo_mem_force_space(bdev, mem, mem_type, - interruptible, no_wait); - + ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, + interruptible, no_wait); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; + mem->mm_node->private = bo; return 0; } - if (ret == -ERESTART) has_eagain = true; } - ret = (has_eagain) ? -ERESTART : -ENOMEM; return ret; } @@ -886,8 +907,8 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) } int ttm_bo_move_buffer(struct ttm_buffer_object *bo, - uint32_t proposed_placement, - bool interruptible, bool no_wait) + struct ttm_placement *placement, + bool interruptible, bool no_wait) { struct ttm_bo_global *glob = bo->glob; int ret = 0; @@ -900,101 +921,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * Have the driver move function wait for idle when necessary, * instead of doing it here. */ - spin_lock(&bo->lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait); spin_unlock(&bo->lock); - if (ret) return ret; - mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - /* * Determine where to move the buffer. */ - - ret = ttm_bo_mem_space(bo, proposed_placement, &mem, - interruptible, no_wait); + ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); if (ret) goto out_unlock; - ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); - out_unlock: if (ret && mem.mm_node) { spin_lock(&glob->lru_lock); + mem.mm_node->private = NULL; drm_mm_put_block(mem.mm_node); spin_unlock(&glob->lru_lock); } return ret; } -static int ttm_bo_mem_compat(uint32_t proposed_placement, +static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { - if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) - return 0; - if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) - return 0; - - return 1; + int i; + + for (i = 0; i < placement->num_placement; i++) { + if ((placement->placement[i] & mem->placement & + TTM_PL_MASK_CACHING) && + (placement->placement[i] & mem->placement & + TTM_PL_MASK_MEM)) + return i; + } + return -1; } int ttm_buffer_object_validate(struct ttm_buffer_object *bo, - uint32_t proposed_placement, - bool interruptible, bool no_wait) + struct ttm_placement *placement, + bool interruptible, bool no_wait) { int ret; BUG_ON(!atomic_read(&bo->reserved)); - bo->proposed_placement = proposed_placement; - - TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", - (unsigned long)proposed_placement, - (unsigned long)bo->mem.placement); - + /* Check that range is valid */ + if (placement->lpfn || placement->fpfn) + if (placement->fpfn > placement->lpfn || + (placement->lpfn - placement->fpfn) < bo->num_pages) + return -EINVAL; /* * Check whether we need to move buffer. */ - - if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { - ret = ttm_bo_move_buffer(bo, bo->proposed_placement, - interruptible, no_wait); - if (ret) { - if (ret != -ERESTART) - printk(KERN_ERR TTM_PFX - "Failed moving buffer. " - "Proposed placement 0x%08x\n", - bo->proposed_placement); - if (ret == -ENOMEM) - printk(KERN_ERR TTM_PFX - "Out of aperture space or " - "DRM memory quota.\n"); + ret = ttm_bo_mem_compat(placement, &bo->mem); + if (ret < 0) { + ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); + if (ret) return ret; - } + } else { + /* + * Use the access and other non-mapping-related flag bits from + * the compatible memory placement flags to the active flags + */ + ttm_flag_masked(&bo->mem.placement, placement->placement[ret], + ~TTM_PL_MASK_MEMTYPE); } - /* * We might need to add a TTM. */ - if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ret = ttm_bo_add_ttm(bo, true); if (ret) return ret; } - /* - * Validation has succeeded, move the access and other - * non-mapping-related flag bits from the proposed flags to - * the active flags - */ - - ttm_flag_masked(&bo->mem.placement, bo->proposed_placement, - ~TTM_PL_MASK_MEMTYPE); - return 0; } EXPORT_SYMBOL(ttm_buffer_object_validate); @@ -1042,8 +1044,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, size_t acc_size, void (*destroy) (struct ttm_buffer_object *)) { - int ret = 0; + int i, c, ret = 0; unsigned long num_pages; + uint32_t placements[8]; + struct ttm_placement placement; size += buffer_start & ~PAGE_MASK; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -1100,7 +1104,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, goto out_err; } - ret = ttm_buffer_object_validate(bo, flags, interruptible, false); + placement.fpfn = 0; + placement.lpfn = 0; + for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++) + if (flags & (1 << i)) + placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i); + placement.placement = placements; + placement.num_placement = c; + placement.busy_placement = placements; + placement.num_busy_placement = c; + ret = ttm_buffer_object_validate(bo, &placement, interruptible, false); if (ret) goto out_err; @@ -1135,8 +1148,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo; - int ret; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; + int ret; size_t acc_size = ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); @@ -1161,66 +1174,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, return ret; } -static int ttm_bo_leave_list(struct ttm_buffer_object *bo, - uint32_t mem_type, bool allow_errors) -{ - int ret; - - spin_lock(&bo->lock); - ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); - - if (ret && allow_errors) - goto out; - - if (bo->mem.mem_type == mem_type) - ret = ttm_bo_evict(bo, mem_type, false, false); - - if (ret) { - if (allow_errors) { - goto out; - } else { - ret = 0; - printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n"); - } - } - -out: - return ret; -} - static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, - struct list_head *head, - unsigned mem_type, bool allow_errors) + unsigned mem_type, bool allow_errors) { + struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; - struct ttm_buffer_object *entry; int ret; - int put_count; /* * Can't use standard list traversal since we're unlocking. */ spin_lock(&glob->lru_lock); - - while (!list_empty(head)) { - entry = list_first_entry(head, struct ttm_buffer_object, lru); - kref_get(&entry->list_kref); - ret = ttm_bo_reserve_locked(entry, false, false, false, 0); - put_count = ttm_bo_del_from_lru(entry); + while (!list_empty(&man->lru)) { spin_unlock(&glob->lru_lock); - while (put_count--) - kref_put(&entry->list_kref, ttm_bo_ref_bug); - BUG_ON(ret); - ret = ttm_bo_leave_list(entry, mem_type, allow_errors); - ttm_bo_unreserve(entry); - kref_put(&entry->list_kref, ttm_bo_release_list); + ret = ttm_mem_evict_first(bdev, mem_type, false, false); + if (ret) { + if (allow_errors) { + return ret; + } else { + printk(KERN_ERR TTM_PFX + "Cleanup eviction failed\n"); + } + } spin_lock(&glob->lru_lock); } - spin_unlock(&glob->lru_lock); - return 0; } @@ -1247,7 +1226,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ret = 0; if (mem_type > 0) { - ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); + ttm_bo_force_list_clean(bdev, mem_type, false); spin_lock(&glob->lru_lock); if (drm_mm_clean(&man->manager)) @@ -1280,12 +1259,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return 0; } - return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); + return ttm_bo_force_list_clean(bdev, mem_type, true); } EXPORT_SYMBOL(ttm_bo_evict_mm); int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_offset, unsigned long p_size) + unsigned long p_size) { int ret = -EINVAL; struct ttm_mem_type_manager *man; @@ -1315,7 +1294,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, type); return ret; } - ret = drm_mm_init(&man->manager, p_offset, p_size); + ret = drm_mm_init(&man->manager, 0, p_size); if (ret) return ret; } @@ -1464,7 +1443,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ - ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); + ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); if (unlikely(ret != 0)) goto out_no_sys; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 491146170522..2f7f56da2147 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -44,6 +44,29 @@ struct ttm_bo_device; struct drm_mm_node; + +/** + * struct ttm_placement + * + * @fpfn: first valid page frame number to put the object + * @lpfn: last valid page frame number to put the object + * @num_placement: number of prefered placements + * @placement: prefered placements + * @num_busy_placement: number of prefered placements when need to evict buffer + * @busy_placement: prefered placements when need to evict buffer + * + * Structure indicating the placement you request for an object. + */ +struct ttm_placement { + unsigned fpfn; + unsigned lpfn; + unsigned num_placement; + const uint32_t *placement; + unsigned num_busy_placement; + const uint32_t *busy_placement; +}; + + /** * struct ttm_mem_reg * @@ -109,10 +132,6 @@ struct ttm_tt; * the object is destroyed. * @event_queue: Queue for processes waiting on buffer object status change. * @lock: spinlock protecting mostly synchronization members. - * @proposed_placement: Proposed placement for the buffer. Changed only by the - * creator prior to validation as opposed to bo->mem.proposed_flags which is - * changed by the implementation prior to a buffer move if it wants to outsmart - * the buffer creator / user. This latter happens, for example, at eviction. * @mem: structure describing current placement. * @persistant_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member @@ -177,7 +196,6 @@ struct ttm_buffer_object { * Members protected by the bo::reserved lock. */ - uint32_t proposed_placement; struct ttm_mem_reg mem; struct file *persistant_swap_storage; struct ttm_tt *ttm; @@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, * ttm_buffer_object_validate * * @bo: The buffer object. - * @proposed_placement: Proposed_placement for the buffer object. + * @placement: Proposed placement for the buffer object. * @interruptible: Sleep interruptible if sleeping. * @no_wait: Return immediately if the buffer is busy. * * Changes placement and caching policy of the buffer object - * according to bo::proposed_flags. + * according proposed placement. * Returns - * -EINVAL on invalid proposed_flags. + * -EINVAL on invalid proposed placement. * -ENOMEM on out-of-memory condition. * -EBUSY if no_wait is true and buffer busy. * -ERESTART if interrupted by a signal. */ extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, - uint32_t proposed_placement, - bool interruptible, bool no_wait); + struct ttm_placement *placement, + bool interruptible, bool no_wait); + /** * ttm_bo_unref * @@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, * * @bdev: Pointer to a ttm_bo_device struct. * @mem_type: The memory type. - * @p_offset: offset for managed area in pages. * @p_size: size managed area in pages. * * Initialize a manager for a given memory type. @@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, */ extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_offset, unsigned long p_size); + unsigned long p_size); /** * ttm_bo_clean_mm * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 7a39ab9aa1d1..fa5c9e51ee7e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -242,12 +242,6 @@ struct ttm_mem_type_manager { /** * struct ttm_bo_driver * - * @mem_type_prio: Priority array of memory types to place a buffer object in - * if it fits without evicting buffers from any of these memory types. - * @mem_busy_prio: Priority array of memory types to place a buffer object in - * if it needs to evict buffers to make room. - * @num_mem_type_prio: Number of elements in the @mem_type_prio array. - * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array. * @create_ttm_backend_entry: Callback to create a struct ttm_backend. * @invalidate_caches: Callback to invalidate read caches when a buffer object * has been evicted. @@ -265,11 +259,6 @@ struct ttm_mem_type_manager { */ struct ttm_bo_driver { - const uint32_t *mem_type_prio; - const uint32_t *mem_busy_prio; - uint32_t num_mem_type_prio; - uint32_t num_mem_busy_prio; - /** * struct ttm_bo_driver member create_ttm_backend_entry * @@ -306,7 +295,8 @@ struct ttm_bo_driver { * finished, they'll end up in bo->mem.flags */ - uint32_t(*evict_flags) (struct ttm_buffer_object *bo); + void(*evict_flags) (struct ttm_buffer_object *bo, + struct ttm_placement *placement); /** * struct ttm_bo_driver member move: * @@ -651,9 +641,9 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, * -ERESTART: An interruptible sleep was interrupted by a signal. */ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, - uint32_t proposed_placement, - struct ttm_mem_reg *mem, - bool interruptible, bool no_wait); + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + bool interruptible, bool no_wait); /** * ttm_bo_wait_for_cpu * -- cgit v1.2.3 From 98ffc4158e12008102cb6ae242a7fc46f9243f0d Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 7 Dec 2009 18:36:18 +0100 Subject: drm/ttm: Have the TTM code return -ERESTARTSYS instead of -ERESTART. Return -ERESTARTSYS instead of -ERESTART when interrupted by a signal. The -ERESTARTSYS is converted to an -EINTR by the kernel signal layer before returned to user-space. Signed-off-by: Thomas Hellstrom Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 29 +++++++++++------------------ drivers/gpu/drm/ttm/ttm_bo_vm.c | 7 +------ include/drm/ttm/ttm_bo_api.h | 14 +++++++------- include/drm/ttm/ttm_bo_driver.h | 8 ++++---- 4 files changed, 23 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 60d8179a8bcd..640fb265dd5a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -125,7 +125,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) ret = wait_event_interruptible(bo->event_queue, atomic_read(&bo->reserved) == 0); if (unlikely(ret != 0)) - return -ERESTART; + return ret; } else { wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); } @@ -571,7 +571,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, spin_unlock(&bo->lock); if (unlikely(ret != 0)) { - if (ret != -ERESTART) { + if (ret != -ERESTARTSYS) { printk(KERN_ERR TTM_PFX "Failed to expire sync object before " "buffer eviction.\n"); @@ -588,7 +588,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, no_wait); if (ret) { - if (ret != -ERESTART) + if (ret != -ERESTARTSYS) printk(KERN_ERR TTM_PFX "Failed to find memory space for " "buffer 0x%p eviction.\n", bo); @@ -598,7 +598,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait); if (ret) { - if (ret != -ERESTART) + if (ret != -ERESTARTSYS) printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); spin_lock(&glob->lru_lock); if (evict_mem.mm_node) { @@ -795,7 +795,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, uint32_t cur_flags = 0; bool type_found = false; bool type_ok = false; - bool has_eagain = false; + bool has_erestartsys = false; struct drm_mm_node *node = NULL; int i, ret; @@ -882,28 +882,21 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, mem->mm_node->private = bo; return 0; } - if (ret == -ERESTART) - has_eagain = true; + if (ret == -ERESTARTSYS) + has_erestartsys = true; } - ret = (has_eagain) ? -ERESTART : -ENOMEM; + ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; return ret; } EXPORT_SYMBOL(ttm_bo_mem_space); int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) { - int ret = 0; - if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) return -EBUSY; - ret = wait_event_interruptible(bo->event_queue, - atomic_read(&bo->cpu_writers) == 0); - - if (ret == -ERESTARTSYS) - ret = -ERESTART; - - return ret; + return wait_event_interruptible(bo->event_queue, + atomic_read(&bo->cpu_writers) == 0); } int ttm_bo_move_buffer(struct ttm_buffer_object *bo, @@ -1673,7 +1666,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, ret = wait_event_interruptible (bo->event_queue, atomic_read(&bo->reserved) == 0); if (unlikely(ret != 0)) - return -ERESTART; + return ret; } else { wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 1c040d040338..609a85a4d855 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = ttm_bo_wait(bo, false, true, false); spin_unlock(&bo->lock); if (unlikely(ret != 0)) { - retval = (ret != -ERESTART) ? + retval = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; } @@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, switch (ret) { case 0: break; - case -ERESTART: - ret = -EINTR; - goto out_unref; case -EBUSY: ret = -EAGAIN; goto out_unref; @@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, switch (ret) { case 0: break; - case -ERESTART: - return -EINTR; case -EBUSY: return -EAGAIN; default: diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 2f7f56da2147..4fd498523ce3 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -303,7 +303,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) * Note: It might be necessary to block validations before the * wait by reserving the buffer. * Returns -EBUSY if no_wait is true and the buffer is busy. - * Returns -ERESTART if interrupted by a signal. + * Returns -ERESTARTSYS if interrupted by a signal. */ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait); @@ -321,7 +321,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, * -EINVAL on invalid proposed placement. * -ENOMEM on out-of-memory condition. * -EBUSY if no_wait is true and buffer busy. - * -ERESTART if interrupted by a signal. + * -ERESTARTSYS if interrupted by a signal. */ extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, @@ -347,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo); * waiting for buffer idle. This lock is recursive. * Returns * -EBUSY if the buffer is busy and no_wait is true. - * -ERESTART if interrupted by a signal. + * -ERESTARTSYS if interrupted by a signal. */ extern int @@ -390,7 +390,7 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid placement flags. - * -ERESTART: Interrupted by signal while sleeping waiting for resources. + * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, @@ -430,7 +430,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid placement flags. - * -ERESTART: Interrupted by signal while waiting for resources. + * -ERESTARTSYS: Interrupted by signal while waiting for resources. */ extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, @@ -521,7 +521,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); * * Returns: * -EINVAL: Invalid or uninitialized memory type. - * -ERESTART: The call was interrupted by a signal while waiting to + * -ERESTARTSYS: The call was interrupted by a signal while waiting to * evict a buffer. */ @@ -624,7 +624,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, * be called from the fops::read and fops::write method. * Returns: * See man (2) write, man(2) read. In particular, - * the function may return -EINTR if + * the function may return -ERESTARTSYS if * interrupted by a signal. */ diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index fa5c9e51ee7e..ff7664e0c3cd 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -638,7 +638,7 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, * -EBUSY: No space available (only if no_wait == 1). * -ENOMEM: Could not allocate memory for the buffer object, either due to * fragmentation or concurrent allocators. - * -ERESTART: An interruptible sleep was interrupted by a signal. + * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. */ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, @@ -653,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, * Wait until a buffer object is no longer sync'ed for CPU access. * Returns: * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). - * -ERESTART: An interruptible sleep was interrupted by a signal. + * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. */ extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); @@ -757,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); * -EAGAIN: The reservation may cause a deadlock. * Release all buffer reservations, wait for @bo to become unreserved and * try again. (only if use_sequence == 1). - * -ERESTART: A wait for the buffer to become unreserved was interrupted by + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * a signal. Release all buffer reservations and return to user-space. */ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, @@ -798,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, * * Returns: * -EBUSY: If no_wait == 1 and the buffer is already reserved. - * -ERESTART: If interruptible == 1 and the process received a signal + * -ERESTARTSYS: If interruptible == 1 and the process received a signal * while sleeping. */ extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, -- cgit v1.2.3 From 99d7e48e8cb867f303439ad40e995e203841bd94 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Wed, 9 Dec 2009 21:55:09 +0100 Subject: drm: Add memory manager debug function drm_mm_debug_table will print the memory manager state in table allowing to give a snapshot of the manager at given point in time. Usefull for debugging. Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 20 ++++++++++++++++++++ include/drm/drm_mm.h | 1 + 2 files changed, 21 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index a5c2773ccf27..d7d7eac3ddd2 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -469,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm) } EXPORT_SYMBOL(drm_mm_takedown); +void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) +{ + struct drm_mm_node *entry; + int total_used = 0, total_free = 0, total = 0; + + list_for_each_entry(entry, &mm->ml_entry, ml_entry) { + printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", + prefix, entry->start, entry->start + entry->size, + entry->size, entry->free ? "free" : "used"); + total += entry->size; + if (entry->free) + total_free += entry->size; + else + total_used += entry->size; + } + printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total, + total_used, total_free); +} +EXPORT_SYMBOL(drm_mm_debug_table); + #if defined(CONFIG_DEBUG_FS) int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) { diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index b40b2f062039..4c10be39a43b 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -133,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) return block->mm; } +extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); #ifdef CONFIG_DEBUG_FS int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); #endif -- cgit v1.2.3 From 1bbfa6f25673019dc0acc9308b667c96f6cda8bf Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Wed, 9 Dec 2009 20:07:03 -0500 Subject: sched: Mark sched_clock() as notrace The core ftrace code (trace_clock_local) calls sched_clock() directly, so we don't want to recurisvely trigger the ftrace code. Rather than update every sched_clock() definition, tag the prototype for everyone as notrace. Signed-off-by: Mike Frysinger Cc: Peter Zijlstra LKML-Reference: <1260407223-10900-1-git-send-email-vapier@gentoo.org> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 75e6e60bf583..576d838adf68 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1836,7 +1836,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern int sched_clock_stable; #endif -extern unsigned long long sched_clock(void); +/* ftrace calls sched_clock() directly */ +extern unsigned long long notrace sched_clock(void); extern void sched_clock_init(void); extern u64 sched_clock_cpu(int cpu); -- cgit v1.2.3 From 41d2e494937715d3150e5c75d01f0e75ae899337 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 13 Nov 2009 17:05:44 +0100 Subject: hrtimer: Tune hrtimer_interrupt hang logic The hrtimer_interrupt hang logic adjusts min_delta_ns based on the execution time of the hrtimer callbacks. This is error-prone for virtual machines, where a guest vcpu can be scheduled out during the execution of the callbacks (and the callbacks themselves can do operations that translate to blocking operations in the hypervisor), which in can lead to large min_delta_ns rendering the system unusable. Replace the current heuristics with something more reliable. Allow the interrupt code to try 3 times to catch up with the lost time. If that fails use the total time spent in the interrupt handler to defer the next timer interrupt so the system can catch up with other things which got delayed. Limit that deferment to 100ms. The retry events and the maximum time spent in the interrupt handler are recorded and exposed via /proc/timer_list Inspired by a patch from Marcelo. Reported-by: Michael Tokarev Signed-off-by: Thomas Gleixner Tested-by: Marcelo Tosatti Cc: kvm@vger.kernel.org --- include/linux/hrtimer.h | 13 +++++-- kernel/hrtimer.c | 97 ++++++++++++++++++++++++++++-------------------- kernel/time/timer_list.c | 5 ++- 3 files changed, 70 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 9bace4b9f4fe..040b6796ab4d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -162,10 +162,11 @@ struct hrtimer_clock_base { * @expires_next: absolute time of the next event which was scheduled * via clock_set_next_event() * @hres_active: State of high resolution mode - * @check_clocks: Indictator, when set evaluate time source and clock - * event devices whether high resolution mode can be - * activated. - * @nr_events: Total number of timer interrupt events + * @hang_detected: The last hrtimer interrupt detected a hang + * @nr_events: Total number of hrtimer interrupt events + * @nr_retries: Total number of hrtimer interrupt retries + * @nr_hangs: Total number of hrtimer interrupt hangs + * @max_hang_time: Maximum time spent in hrtimer_interrupt */ struct hrtimer_cpu_base { spinlock_t lock; @@ -173,7 +174,11 @@ struct hrtimer_cpu_base { #ifdef CONFIG_HIGH_RES_TIMERS ktime_t expires_next; int hres_active; + int hang_detected; unsigned long nr_events; + unsigned long nr_retries; + unsigned long nr_hangs; + ktime_t max_hang_time; #endif }; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ede527708123..931a4d99bc55 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -557,7 +557,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) static int hrtimer_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base) { - ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); int res; @@ -582,7 +582,16 @@ static int hrtimer_reprogram(struct hrtimer *timer, if (expires.tv64 < 0) return -ETIME; - if (expires.tv64 >= expires_next->tv64) + if (expires.tv64 >= cpu_base->expires_next.tv64) + return 0; + + /* + * If a hang was detected in the last timer interrupt then we + * do not schedule a timer which is earlier than the expiry + * which we enforced in the hang detection. We want the system + * to make progress. + */ + if (cpu_base->hang_detected) return 0; /* @@ -590,7 +599,7 @@ static int hrtimer_reprogram(struct hrtimer *timer, */ res = tick_program_event(expires, 0); if (!IS_ERR_VALUE(res)) - *expires_next = expires; + cpu_base->expires_next = expires; return res; } @@ -1217,30 +1226,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) #ifdef CONFIG_HIGH_RES_TIMERS -static int force_clock_reprogram; - -/* - * After 5 iteration's attempts, we consider that hrtimer_interrupt() - * is hanging, which could happen with something that slows the interrupt - * such as the tracing. Then we force the clock reprogramming for each future - * hrtimer interrupts to avoid infinite loops and use the min_delta_ns - * threshold that we will overwrite. - * The next tick event will be scheduled to 3 times we currently spend on - * hrtimer_interrupt(). This gives a good compromise, the cpus will spend - * 1/4 of their time to process the hrtimer interrupts. This is enough to - * let it running without serious starvation. - */ - -static inline void -hrtimer_interrupt_hanging(struct clock_event_device *dev, - ktime_t try_time) -{ - force_clock_reprogram = 1; - dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; - printk(KERN_WARNING "hrtimer: interrupt too slow, " - "forcing clock min delta to %llu ns\n", - (unsigned long long) dev->min_delta_ns); -} /* * High resolution timer interrupt * Called with interrupts disabled @@ -1249,21 +1234,15 @@ void hrtimer_interrupt(struct clock_event_device *dev) { struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base; - ktime_t expires_next, now; - int nr_retries = 0; - int i; + ktime_t expires_next, now, entry_time, delta; + int i, retries = 0; BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; dev->next_event.tv64 = KTIME_MAX; - retry: - /* 5 retries is enough to notice a hang */ - if (!(++nr_retries % 5)) - hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); - - now = ktime_get(); - + entry_time = now = ktime_get(); +retry: expires_next.tv64 = KTIME_MAX; spin_lock(&cpu_base->lock); @@ -1325,10 +1304,48 @@ void hrtimer_interrupt(struct clock_event_device *dev) spin_unlock(&cpu_base->lock); /* Reprogramming necessary ? */ - if (expires_next.tv64 != KTIME_MAX) { - if (tick_program_event(expires_next, force_clock_reprogram)) - goto retry; + if (expires_next.tv64 == KTIME_MAX || + !tick_program_event(expires_next, 0)) { + cpu_base->hang_detected = 0; + return; } + + /* + * The next timer was already expired due to: + * - tracing + * - long lasting callbacks + * - being scheduled away when running in a VM + * + * We need to prevent that we loop forever in the hrtimer + * interrupt routine. We give it 3 attempts to avoid + * overreacting on some spurious event. + */ + now = ktime_get(); + cpu_base->nr_retries++; + if (++retries < 3) + goto retry; + /* + * Give the system a chance to do something else than looping + * here. We stored the entry time, so we know exactly how long + * we spent here. We schedule the next event this amount of + * time away. + */ + cpu_base->nr_hangs++; + cpu_base->hang_detected = 1; + delta = ktime_sub(now, entry_time); + if (delta.tv64 > cpu_base->max_hang_time.tv64) + cpu_base->max_hang_time = delta; + /* + * Limit it to a sensible value as we enforce a longer + * delay. Give the CPU at least 100ms to catch up. + */ + if (delta.tv64 > 100 * NSEC_PER_MSEC) + expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); + else + expires_next = ktime_add(now, delta); + tick_program_event(expires_next, 1); + printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", + ktime_to_ns(delta)); } /* diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 665c76edbf17..9d80db4747d4 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -150,6 +150,9 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) P_ns(expires_next); P(hres_active); P(nr_events); + P(nr_retries); + P(nr_hangs); + P_ns(max_hang_time); #endif #undef P #undef P_ns @@ -254,7 +257,7 @@ static int timer_list_show(struct seq_file *m, void *v) u64 now = ktime_to_ns(ktime_get()); int cpu; - SEQ_printf(m, "Timer List Version: v0.4\n"); + SEQ_printf(m, "Timer List Version: v0.5\n"); SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); -- cgit v1.2.3 From 5f201907dfe4ad42c44006ddfcec00ed12e59497 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 10 Dec 2009 10:56:29 +0100 Subject: hrtimer: move timer stats helper functions to hrtimer.c There is no reason to make timer_stats_hrtimer_set_start_info and friends visible to the rest of the kernel. So move all of them to hrtimer.c. Also make timer_stats_hrtimer_set_start_info a static inline function so it gets inlined and we avoid another function call. Based on a patch by Thomas Gleixner. Signed-off-by: Heiko Carstens LKML-Reference: <20091210095629.GC4144@osiris.boeblingen.de.ibm.com> Signed-off-by: Thomas Gleixner --- include/linux/hrtimer.h | 43 ------------------------------------------- kernel/hrtimer.c | 24 ++++++++++++++++++++---- 2 files changed, 20 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 040b6796ab4d..af634e95871d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -440,47 +440,4 @@ extern u64 ktime_divns(const ktime_t kt, s64 div); /* Show pending timers: */ extern void sysrq_timer_list_show(void); -/* - * Timer-statistics info: - */ -#ifdef CONFIG_TIMER_STATS - -extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, - void *timerf, char *comm, - unsigned int timer_flag); - -static inline void timer_stats_account_hrtimer(struct hrtimer *timer) -{ - if (likely(!timer_stats_active)) - return; - timer_stats_update_stats(timer, timer->start_pid, timer->start_site, - timer->function, timer->start_comm, 0); -} - -extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, - void *addr); - -static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) -{ - __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); -} - -static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) -{ - timer->start_site = NULL; -} -#else -static inline void timer_stats_account_hrtimer(struct hrtimer *timer) -{ -} - -static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) -{ -} - -static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) -{ -} -#endif - #endif diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 931a4d99bc55..d2f9239dc6ba 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -756,17 +756,33 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } #endif /* CONFIG_HIGH_RES_TIMERS */ -#ifdef CONFIG_TIMER_STATS -void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) +static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) { +#ifdef CONFIG_TIMER_STATS if (timer->start_site) return; - - timer->start_site = addr; + timer->start_site = __builtin_return_address(0); memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); timer->start_pid = current->pid; +#endif } + +static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) +{ +#ifdef CONFIG_TIMER_STATS + timer->start_site = NULL; #endif +} + +static inline void timer_stats_account_hrtimer(struct hrtimer *timer) +{ +#ifdef CONFIG_TIMER_STATS + if (likely(!timer_stats_active)) + return; + timer_stats_update_stats(timer, timer->start_pid, timer->start_site, + timer->function, timer->start_comm, 0); +#endif +} /* * Counterpart to lock_hrtimer_base above: -- cgit v1.2.3 From e9c0748b687aa70179a9e6d8ffc24b2874fe350b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Dec 2009 13:23:19 +0100 Subject: itimer: Fix the itimer trace print format Compiling powerpc64 results in: include/trace/events/timer.h:279: warning: format '%lu' expects type 'long unsigned int', but argument 4 has type 'cputime_t' .... cputime_t on power is u64, which triggers the above warning. Cast the cputime_t to unsigned long long and fix the print format string. That works on both 32 and 64 bit architectures. While at it change the print format for long variables from %lu to %ld. Signed-off-by: Thomas Gleixner Cc: Xiao Guangrong --- include/trace/events/timer.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index e5ce87a0498d..9496b965d62a 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -301,8 +301,8 @@ TRACE_EVENT(itimer_state, __entry->interval_usec = value->it_interval.tv_usec; ), - TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu", - __entry->which, __entry->expires, + TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld", + __entry->which, (unsigned long long)__entry->expires, __entry->value_sec, __entry->value_usec, __entry->interval_sec, __entry->interval_usec) ); @@ -331,8 +331,8 @@ TRACE_EVENT(itimer_expire, __entry->pid = pid_nr(pid); ), - TP_printk("which=%d pid=%d now=%lu", __entry->which, - (int) __entry->pid, __entry->now) + TP_printk("which=%d pid=%d now=%llu", __entry->which, + (int) __entry->pid, (unsigned long long)__entry->now) ); #endif /* _TRACE_TIMER_H */ -- cgit v1.2.3 From 6b2f3d1f769be5779b479c37800229d9a4809fc3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 27 Oct 2009 11:05:28 +0100 Subject: vfs: Implement proper O_SYNC semantics While Linux provided an O_SYNC flag basically since day 1, it took until Linux 2.4.0-test12pre2 to actually get it implemented for filesystems, since that day we had generic_osync_around with only minor changes and the great "For now, when the user asks for O_SYNC, we'll actually give O_DSYNC" comment. This patch intends to actually give us real O_SYNC semantics in addition to the O_DSYNC semantics. After Jan's O_SYNC patches which are required before this patch it's actually surprisingly simple, we just need to figure out when to set the datasync flag to vfs_fsync_range and when not. This patch renames the existing O_SYNC flag to O_DSYNC while keeping it's numerical value to keep binary compatibility, and adds a new real O_SYNC flag. To guarantee backwards compatiblity it is defined as expanding to both the O_DSYNC and the new additional binary flag (__O_SYNC) to make sure we are backwards-compatible when compiled against the new headers. This also means that all places that don't care about the differences can just check O_DSYNC and get the right behaviour for O_SYNC, too - only places that actuall care need to check __O_SYNC in addition. Drivers and network filesystems have been updated in a fail safe way to always do the full sync magic if O_DSYNC is set. The few places setting O_SYNC for lower layers are kept that way for now to stay failsafe. We enforce that O_DSYNC is set when __O_SYNC is set early in the open path to make sure we always get these sane options. Note that parisc really screwed up their headers as they already define a O_DSYNC that has always been a no-op. We try to repair it by using it for the new O_DSYNC and redefinining O_SYNC to send both the traditional O_SYNC numerical value _and_ the O_DSYNC one. Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Grant Grundler Cc: "David S. Miller" Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: Al Viro Cc: Andreas Dilger Acked-by: Trond Myklebust Acked-by: Kyle McMartin Acked-by: Ulrich Drepper Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Jan Kara --- arch/alpha/include/asm/fcntl.h | 19 ++++++++++++++++--- arch/blackfin/include/asm/fcntl.h | 2 -- arch/mips/include/asm/fcntl.h | 17 ++++++++++++++++- arch/mips/kernel/kspd.c | 1 + arch/mips/loongson/common/mem.c | 2 +- arch/mips/mm/cache.c | 2 +- arch/parisc/include/asm/fcntl.h | 5 ++--- arch/sparc/include/asm/fcntl.h | 19 ++++++++++++++++--- arch/x86/mm/pat.c | 3 +-- drivers/char/mem.c | 6 +++--- drivers/usb/gadget/file_storage.c | 2 +- fs/afs/write.c | 5 +++-- fs/btrfs/file.c | 4 ++-- fs/cifs/dir.c | 3 ++- fs/cifs/file.c | 6 ++++-- fs/namei.c | 9 +++++++++ fs/nfs/file.c | 4 ++-- fs/nfs/write.c | 2 +- fs/ocfs2/file.c | 2 +- fs/sync.c | 5 +++-- fs/ubifs/file.c | 2 +- fs/xfs/linux-2.6/xfs_lrw.c | 2 +- include/asm-generic/fcntl.h | 25 +++++++++++++++++++++---- sound/core/rawmidi.c | 2 +- 24 files changed, 109 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h index 25da0017ec87..21b1117a0c61 100644 --- a/arch/alpha/include/asm/fcntl.h +++ b/arch/alpha/include/asm/fcntl.h @@ -1,8 +1,6 @@ #ifndef _ALPHA_FCNTL_H #define _ALPHA_FCNTL_H -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ #define O_CREAT 01000 /* not fcntl */ #define O_TRUNC 02000 /* not fcntl */ #define O_EXCL 04000 /* not fcntl */ @@ -10,13 +8,28 @@ #define O_NONBLOCK 00004 #define O_APPEND 00010 -#define O_SYNC 040000 +#define O_DSYNC 040000 /* used to be O_SYNC, see below */ #define O_DIRECTORY 0100000 /* must be a directory */ #define O_NOFOLLOW 0200000 /* don't follow links */ #define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ #define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */ #define O_NOATIME 04000000 #define O_CLOEXEC 010000000 /* set close_on_exec */ +/* + * Before Linux 2.6.32 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 020000000 +#define O_SYNC (__O_SYNC|O_DSYNC) #define F_GETLK 7 #define F_SETLK 8 diff --git a/arch/blackfin/include/asm/fcntl.h b/arch/blackfin/include/asm/fcntl.h index 8727b2b382f1..251c911d59c1 100644 --- a/arch/blackfin/include/asm/fcntl.h +++ b/arch/blackfin/include/asm/fcntl.h @@ -7,8 +7,6 @@ #ifndef _BFIN_FCNTL_H #define _BFIN_FCNTL_H -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ #define O_DIRECTORY 040000 /* must be a directory */ #define O_NOFOLLOW 0100000 /* don't follow links */ #define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ diff --git a/arch/mips/include/asm/fcntl.h b/arch/mips/include/asm/fcntl.h index 2a52333a062d..7c6681aa2ab8 100644 --- a/arch/mips/include/asm/fcntl.h +++ b/arch/mips/include/asm/fcntl.h @@ -10,7 +10,7 @@ #define O_APPEND 0x0008 -#define O_SYNC 0x0010 +#define O_DSYNC 0x0010 /* used to be O_SYNC, see below */ #define O_NONBLOCK 0x0080 #define O_CREAT 0x0100 /* not fcntl */ #define O_TRUNC 0x0200 /* not fcntl */ @@ -18,6 +18,21 @@ #define O_NOCTTY 0x0800 /* not fcntl */ #define FASYNC 0x1000 /* fcntl, for BSD compatibility */ #define O_LARGEFILE 0x2000 /* allow large file opens */ +/* + * Before Linux 2.6.32 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 0x4000 +#define O_SYNC (__O_SYNC|O_DSYNC) #define O_DIRECT 0x8000 /* direct disk access hint */ #define F_GETLK 14 diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index ad4e017ed2f3..80e2ba694bab 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c @@ -82,6 +82,7 @@ static int sp_stopping; #define MTSP_O_SHLOCK 0x0010 #define MTSP_O_EXLOCK 0x0020 #define MTSP_O_ASYNC 0x0040 +/* XXX: check which of these is actually O_SYNC vs O_DSYNC */ #define MTSP_O_FSYNC O_SYNC #define MTSP_O_NOFOLLOW 0x0100 #define MTSP_O_SYNC 0x0080 diff --git a/arch/mips/loongson/common/mem.c b/arch/mips/loongson/common/mem.c index 7c92f79b6480..e94ef158f980 100644 --- a/arch/mips/loongson/common/mem.c +++ b/arch/mips/loongson/common/mem.c @@ -26,7 +26,7 @@ void __init prom_init_memory(void) /* override of arch/mips/mm/cache.c: __uncached_access */ int __uncached_access(struct file *file, unsigned long addr) { - if (file->f_flags & O_SYNC) + if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory) || diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 694d51f523d1..102b2dfa542a 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -194,7 +194,7 @@ void __devinit cpu_cache_init(void) int __weak __uncached_access(struct file *file, unsigned long addr) { - if (file->f_flags & O_SYNC) + if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h index 1e1c824764ee..f357fc693c89 100644 --- a/arch/parisc/include/asm/fcntl.h +++ b/arch/parisc/include/asm/fcntl.h @@ -1,14 +1,13 @@ #ifndef _PARISC_FCNTL_H #define _PARISC_FCNTL_H -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ #define O_APPEND 000000010 #define O_BLKSEEK 000000100 /* HPUX only */ #define O_CREAT 000000400 /* not fcntl */ #define O_EXCL 000002000 /* not fcntl */ #define O_LARGEFILE 000004000 -#define O_SYNC 000100000 +#define __O_SYNC 000100000 +#define O_SYNC (__O_SYNC|O_DSYNC) #define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */ #define O_NOCTTY 000400000 /* not fcntl */ #define O_DSYNC 001000000 /* HPUX only */ diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/asm/fcntl.h index d4d9c9d852c3..3b9cfb39175e 100644 --- a/arch/sparc/include/asm/fcntl.h +++ b/arch/sparc/include/asm/fcntl.h @@ -1,14 +1,12 @@ #ifndef _SPARC_FCNTL_H #define _SPARC_FCNTL_H -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ #define O_APPEND 0x0008 #define FASYNC 0x0040 /* fcntl, for BSD compatibility */ #define O_CREAT 0x0200 /* not fcntl */ #define O_TRUNC 0x0400 /* not fcntl */ #define O_EXCL 0x0800 /* not fcntl */ -#define O_SYNC 0x2000 +#define O_DSYNC 0x2000 /* used to be O_SYNC, see below */ #define O_NONBLOCK 0x4000 #if defined(__sparc__) && defined(__arch64__) #define O_NDELAY 0x0004 @@ -20,6 +18,21 @@ #define O_DIRECT 0x100000 /* direct disk access hint */ #define O_NOATIME 0x200000 #define O_CLOEXEC 0x400000 +/* + * Before Linux 2.6.32 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 0x800000 +#define O_SYNC (__O_SYNC|O_DSYNC) #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 66b55d6e69ed..ae9648eb1c7f 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -704,9 +704,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, if (!range_is_allowed(pfn, size)) return 0; - if (file->f_flags & O_SYNC) { + if (file->f_flags & O_DSYNC) flags = _PAGE_CACHE_UC_MINUS; - } #ifdef CONFIG_X86_32 /* diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 30eff80fed6f..fba76fb55abf 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -43,7 +43,7 @@ static inline int uncached_access(struct file *file, unsigned long addr) { #if defined(CONFIG_IA64) /* - * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. + * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases. */ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); #elif defined(CONFIG_MIPS) @@ -56,9 +56,9 @@ static inline int uncached_access(struct file *file, unsigned long addr) #else /* * Accessing memory above the top the kernel knows about or through a file pointer - * that was marked O_SYNC will be done non-cached. + * that was marked O_DSYNC will be done non-cached. */ - if (file->f_flags & O_SYNC) + if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); #endif diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index 1e6aa504d58a..5e14dbaf65bc 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c @@ -1713,7 +1713,7 @@ static int do_write(struct fsg_dev *fsg) } if (fsg->cmnd[1] & 0x08) { // FUA spin_lock(&curlun->filp->f_lock); - curlun->filp->f_flags |= O_SYNC; + curlun->filp->f_flags |= O_DSYNC; spin_unlock(&curlun->filp->f_lock); } } diff --git a/fs/afs/write.c b/fs/afs/write.c index c63a3c8beb73..6be1bc31616a 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -692,8 +692,9 @@ ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov, } /* return error values for O_SYNC and IS_SYNC() */ - if (IS_SYNC(&vnode->vfs_inode) || iocb->ki_filp->f_flags & O_SYNC) { - ret = afs_fsync(iocb->ki_filp, dentry, 1); + if (IS_SYNC(&vnode->vfs_inode) || iocb->ki_filp->f_flags & O_DSYNC) { + ret = afs_fsync(iocb->ki_filp, dentry, + (iocb->ki_filp->f_flags & __O_SYNC) ? 0 : 1); if (ret < 0) result = ret; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 06550affbd27..77f759302e12 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -909,7 +909,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, unsigned long last_index; int will_write; - will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) || + will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || (file->f_flags & O_DIRECT)); nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, @@ -1076,7 +1076,7 @@ out_nolock: if (err) num_written = err; - if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { + if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { trans = btrfs_start_transaction(root, 1); ret = btrfs_log_dentry_safe(trans, root, file->f_dentry); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 1f42f772865a..6ccf7262d1b7 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -214,7 +214,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode, posix_flags |= SMB_O_EXCL; if (oflags & O_TRUNC) posix_flags |= SMB_O_TRUNC; - if (oflags & O_SYNC) + /* be safe and imply O_SYNC for O_DSYNC */ + if (oflags & O_DSYNC) posix_flags |= SMB_O_SYNC; if (oflags & O_DIRECTORY) posix_flags |= SMB_O_DIRECTORY; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 429337eb7afe..057e1dae12ab 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -76,8 +76,10 @@ static inline fmode_t cifs_posix_convert_flags(unsigned int flags) reopening a file. They had their effect on the original open */ if (flags & O_APPEND) posix_flags |= (fmode_t)O_APPEND; - if (flags & O_SYNC) - posix_flags |= (fmode_t)O_SYNC; + if (flags & O_DSYNC) + posix_flags |= (fmode_t)O_DSYNC; + if (flags & __O_SYNC) + posix_flags |= (fmode_t)__O_SYNC; if (flags & O_DIRECTORY) posix_flags |= (fmode_t)O_DIRECTORY; if (flags & O_NOFOLLOW) diff --git a/fs/namei.c b/fs/namei.c index d11f404667e9..b83d38f614ff 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1678,6 +1678,15 @@ struct file *do_filp_open(int dfd, const char *pathname, int will_write; int flag = open_to_namei_flags(open_flag); + /* + * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only + * check for O_DSYNC if the need any syncing at all we enforce it's + * always set instead of having to deal with possibly weird behaviour + * for malicious applications setting only __O_SYNC. + */ + if (open_flag & __O_SYNC) + open_flag |= O_DSYNC; + if (!acc_mode) acc_mode = MAY_OPEN | ACC_MODE(flag); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f5fdd39e037a..6b891328f332 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -581,7 +581,7 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode) { struct nfs_open_context *ctx; - if (IS_SYNC(inode) || (filp->f_flags & O_SYNC)) + if (IS_SYNC(inode) || (filp->f_flags & O_DSYNC)) return 1; ctx = nfs_file_open_context(filp); if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags)) @@ -622,7 +622,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); result = generic_file_aio_write(iocb, iov, nr_segs, pos); - /* Return error values for O_SYNC and IS_SYNC() */ + /* Return error values for O_DSYNC and IS_SYNC() */ if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); if (err < 0) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c84b5cc1a943..b1ce2ea9b93b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -774,7 +774,7 @@ int nfs_updatepage(struct file *file, struct page *page, */ if (nfs_write_pageuptodate(page, inode) && inode->i_flock == NULL && - !(file->f_flags & O_SYNC)) { + !(file->f_flags & O_DSYNC)) { count = max(count + offset, nfs_page_length(page)); offset = 0; } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index de059f490586..3d30a1c974a8 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2006,7 +2006,7 @@ out_dio: /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); - if ((file->f_flags & O_SYNC && !direct_io) || IS_SYNC(inode)) { + if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode)) { ret = filemap_fdatawrite_range(file->f_mapping, pos, pos + count - 1); if (ret < 0) diff --git a/fs/sync.c b/fs/sync.c index d104591b066b..b75ca68dc081 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -295,10 +295,11 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd) */ int generic_write_sync(struct file *file, loff_t pos, loff_t count) { - if (!(file->f_flags & O_SYNC) && !IS_SYNC(file->f_mapping->host)) + if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) return 0; return vfs_fsync_range(file, file->f_path.dentry, pos, - pos + count - 1, 1); + pos + count - 1, + (file->f_flags & __O_SYNC) ? 0 : 1); } EXPORT_SYMBOL(generic_write_sync); diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 1009adc8d602..eaa3d480bc20 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1401,7 +1401,7 @@ static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov, if (ret < 0) return ret; - if (ret > 0 && (IS_SYNC(inode) || iocb->ki_filp->f_flags & O_SYNC)) { + if (ret > 0 && (IS_SYNC(inode) || iocb->ki_filp->f_flags & O_DSYNC)) { err = ubifs_sync_wbufs_by_inode(c, inode); if (err) return err; diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 072050f8d346..339c52b1a434 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -811,7 +811,7 @@ write_retry: XFS_STATS_ADD(xs_write_bytes, ret); /* Handle various SYNC-type writes */ - if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { + if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { loff_t end = pos + ret - 1; int error2; diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index 495dc8af4044..681ddf3e844c 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -3,8 +3,6 @@ #include -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ #define O_ACCMODE 00000003 #define O_RDONLY 00000000 #define O_WRONLY 00000001 @@ -27,8 +25,8 @@ #ifndef O_NONBLOCK #define O_NONBLOCK 00004000 #endif -#ifndef O_SYNC -#define O_SYNC 00010000 +#ifndef O_DSYNC +#define O_DSYNC 00010000 /* used to be O_SYNC, see below */ #endif #ifndef FASYNC #define FASYNC 00020000 /* fcntl, for BSD compatibility */ @@ -51,6 +49,25 @@ #ifndef O_CLOEXEC #define O_CLOEXEC 02000000 /* set close_on_exec */ #endif + +/* + * Before Linux 2.6.32 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#ifndef O_SYNC +#define __O_SYNC 04000000 +#define O_SYNC (__O_SYNC|O_DSYNC) +#endif + #ifndef O_NDELAY #define O_NDELAY O_NONBLOCK #endif diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 2f766123b158..0f5a194695d9 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -1257,7 +1257,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, break; count -= count1; } - if (file->f_flags & O_SYNC) { + if (file->f_flags & O_DSYNC) { spin_lock_irq(&runtime->lock); while (runtime->avail != runtime->buffer_size) { wait_queue_t wait; -- cgit v1.2.3 From 94004ed726f38a841cc51f97c4a3f9eda9fbd0d9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 30 Sep 2009 22:16:33 +0200 Subject: kill wait_on_page_writeback_range All callers really want the more logical filemap_fdatawait_range interface, so convert them to use it and merge wait_on_page_writeback_range into filemap_fdatawait_range. Signed-off-by: Christoph Hellwig Signed-off-by: Jan Kara --- Documentation/filesystems/vfs.txt | 2 +- fs/jbd2/commit.c | 2 +- fs/sync.c | 8 ++----- include/linux/fs.h | 2 -- mm/filemap.c | 49 +++++++++++---------------------------- 5 files changed, 18 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 623f094c9d8d..3de2f32edd90 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -472,7 +472,7 @@ __sync_single_inode) to check if ->writepages has been successful in writing out the whole address_space. The Writeback tag is used by filemap*wait* and sync_page* functions, -via wait_on_page_writeback_range, to wait for all writeback to +via filemap_fdatawait_range, to wait for all writeback to complete. While waiting ->sync_page (if defined) will be called on each page that is found to require writeback. diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index d4cfd6d2779e..c5edc13ccdd0 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -286,7 +286,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal, if (err) { /* * Because AS_EIO is cleared by - * wait_on_page_writeback_range(), set it again so + * filemap_fdatawait_range(), set it again so * that user process can get -EIO from fsync(). */ set_bit(AS_EIO, diff --git a/fs/sync.c b/fs/sync.c index b75ca68dc081..36752a683481 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -453,9 +453,7 @@ int do_sync_mapping_range(struct address_space *mapping, loff_t offset, ret = 0; if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { - ret = wait_on_page_writeback_range(mapping, - offset >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + ret = filemap_fdatawait_range(mapping, offset, endbyte); if (ret < 0) goto out; } @@ -468,9 +466,7 @@ int do_sync_mapping_range(struct address_space *mapping, loff_t offset, } if (flags & SYNC_FILE_RANGE_WAIT_AFTER) { - ret = wait_on_page_writeback_range(mapping, - offset >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + ret = filemap_fdatawait_range(mapping, offset, endbyte); } out: return ret; diff --git a/include/linux/fs.h b/include/linux/fs.h index 891f7d642e5c..a057f48eb156 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2091,8 +2091,6 @@ extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, extern int filemap_write_and_wait(struct address_space *mapping); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); -extern int wait_on_page_writeback_range(struct address_space *mapping, - pgoff_t start, pgoff_t end); extern int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); extern int filemap_fdatawrite_range(struct address_space *mapping, diff --git a/mm/filemap.c b/mm/filemap.c index c3d3506ecaba..8b4d88f9249e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -260,27 +260,27 @@ int filemap_flush(struct address_space *mapping) EXPORT_SYMBOL(filemap_flush); /** - * wait_on_page_writeback_range - wait for writeback to complete - * @mapping: target address_space - * @start: beginning page index - * @end: ending page index + * filemap_fdatawait_range - wait for writeback to complete + * @mapping: address space structure to wait for + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) * - * Wait for writeback to complete against pages indexed by start->end - * inclusive + * Walk the list of under-writeback pages of the given address space + * in the given range and wait for all of them. */ -int wait_on_page_writeback_range(struct address_space *mapping, - pgoff_t start, pgoff_t end) +int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, + loff_t end_byte) { + pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; + pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; struct pagevec pvec; int nr_pages; int ret = 0; - pgoff_t index; - if (end < start) + if (end_byte < start_byte) return 0; pagevec_init(&pvec, 0); - index = start; while ((index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_WRITEBACK, @@ -310,25 +310,6 @@ int wait_on_page_writeback_range(struct address_space *mapping, return ret; } - -/** - * filemap_fdatawait_range - wait for all under-writeback pages to complete in a given range - * @mapping: address space structure to wait for - * @start: offset in bytes where the range starts - * @end: offset in bytes where the range ends (inclusive) - * - * Walk the list of under-writeback pages of the given address space - * in the given range and wait for all of them. - * - * This is just a simple wrapper so that callers don't have to convert offsets - * to page indexes themselves - */ -int filemap_fdatawait_range(struct address_space *mapping, loff_t start, - loff_t end) -{ - return wait_on_page_writeback_range(mapping, start >> PAGE_CACHE_SHIFT, - end >> PAGE_CACHE_SHIFT); -} EXPORT_SYMBOL(filemap_fdatawait_range); /** @@ -345,8 +326,7 @@ int filemap_fdatawait(struct address_space *mapping) if (i_size == 0) return 0; - return wait_on_page_writeback_range(mapping, 0, - (i_size - 1) >> PAGE_CACHE_SHIFT); + return filemap_fdatawait_range(mapping, 0, i_size - 1); } EXPORT_SYMBOL(filemap_fdatawait); @@ -393,9 +373,8 @@ int filemap_write_and_wait_range(struct address_space *mapping, WB_SYNC_ALL); /* See comment of filemap_write_and_wait() */ if (err != -EIO) { - int err2 = wait_on_page_writeback_range(mapping, - lstart >> PAGE_CACHE_SHIFT, - lend >> PAGE_CACHE_SHIFT); + int err2 = filemap_fdatawait_range(mapping, + lstart, lend); if (!err) err = err2; } -- cgit v1.2.3 From 1472da5fdc65f0cd286c655758d629346001e126 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 16 Oct 2009 15:26:03 +0400 Subject: const: struct quota_format_ops Signed-off-by: Alexey Dobriyan Signed-off-by: Jan Kara --- fs/ocfs2/quota_local.c | 2 +- fs/quota/quota_v1.c | 2 +- fs/quota/quota_v2.c | 2 +- include/linux/quota.h | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 1a2c50a759fa..21f9e71223ca 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -1325,7 +1325,7 @@ out: return status; } -static struct quota_format_ops ocfs2_format_ops = { +static const struct quota_format_ops ocfs2_format_ops = { .check_quota_file = ocfs2_local_check_quota_file, .read_file_info = ocfs2_local_read_info, .write_file_info = ocfs2_global_write_info, diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c index 0edcf42b1778..2ae757e9c008 100644 --- a/fs/quota/quota_v1.c +++ b/fs/quota/quota_v1.c @@ -204,7 +204,7 @@ out: return ret; } -static struct quota_format_ops v1_format_ops = { +static const struct quota_format_ops v1_format_ops = { .check_quota_file = v1_check_quota_file, .read_file_info = v1_read_file_info, .write_file_info = v1_write_file_info, diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index a5475fb1ae44..01f25eae684d 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -207,7 +207,7 @@ static int v2_free_file_info(struct super_block *sb, int type) return 0; } -static struct quota_format_ops v2_format_ops = { +static const struct quota_format_ops v2_format_ops = { .check_quota_file = v2_check_quota_file, .read_file_info = v2_read_file_info, .write_file_info = v2_write_file_info, diff --git a/include/linux/quota.h b/include/linux/quota.h index ce9a9b2e5cd4..f63c9d6ba784 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -334,7 +334,7 @@ struct quotactl_ops { struct quota_format_type { int qf_fmt_id; /* Quota format id */ - struct quota_format_ops *qf_ops; /* Operations of format */ + const struct quota_format_ops *qf_ops; /* Operations of format */ struct module *qf_owner; /* Module implementing quota format */ struct quota_format_type *qf_next; }; @@ -394,7 +394,7 @@ struct quota_info { struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ - struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ + const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ }; int register_quota_format(struct quota_format_type *fmt); -- cgit v1.2.3 From ad888a1f07a72fc7d19286b4ce5c154172a06eed Mon Sep 17 00:00:00 2001 From: Jan Blunck Date: Wed, 18 Nov 2009 17:10:56 +0100 Subject: ext2: Explicitly assign values to on-disk enum of filetypes It is somewhat dangerous to use a straight enum here, because this will reassign values of later variables if one of the earlier ones is removed. Signed-off-by: Jan Blunck Cc: Andreas Dilger Signed-off-by: Jan Kara --- include/linux/ext2_fs.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 121720d74e15..2dfa7076e8b6 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -565,14 +565,14 @@ struct ext2_dir_entry_2 { * other bits are reserved for now. */ enum { - EXT2_FT_UNKNOWN, - EXT2_FT_REG_FILE, - EXT2_FT_DIR, - EXT2_FT_CHRDEV, - EXT2_FT_BLKDEV, - EXT2_FT_FIFO, - EXT2_FT_SOCK, - EXT2_FT_SYMLINK, + EXT2_FT_UNKNOWN = 0, + EXT2_FT_REG_FILE = 1, + EXT2_FT_DIR = 2, + EXT2_FT_CHRDEV = 3, + EXT2_FT_BLKDEV = 4, + EXT2_FT_FIFO = 5, + EXT2_FT_SOCK = 6, + EXT2_FT_SYMLINK = 7, EXT2_FT_MAX }; -- cgit v1.2.3 From 4cf46b67eb6de94532c1bea11d2479d085229d0e Mon Sep 17 00:00:00 2001 From: Alexey Fisher Date: Sun, 22 Nov 2009 20:38:55 +0100 Subject: ext3: Unify log messages in ext3 Make messages produced by ext3 more unified. It should be easy to parse. dmesg before patch: [ 4893.684892] reservations ON [ 4893.684896] xip option not supported [ 4893.684964] EXT3-fs warning: maximal mount count reached, running e2fsck is recommended dmesg after patch: [ 873.300792] EXT3-fs (loop0): using internal journaln [ 873.300796] EXT3-fs (loop0): mounted filesystem with writeback data mode [ 924.163657] EXT3-fs (loop0): error: can't find ext3 filesystem on dev loop0. [ 723.755642] EXT3-fs (loop0): error: bad blocksize 8192 [ 357.874687] EXT3-fs (loop0): error: no journal found. mounting ext3 over ext2? [ 873.300764] EXT3-fs (loop0): warning: maximal mount count reached, running e2fsck is recommended [ 924.163657] EXT3-fs (loop0): error: can't find ext3 filesystem on dev loop0. Signed-off-by: Alexey Fisher Signed-off-by: Jan Kara --- fs/ext3/super.c | 432 +++++++++++++++++++++++++----------------------- include/linux/ext3_fs.h | 2 + 2 files changed, 229 insertions(+), 205 deletions(-) (limited to 'include') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index c1d128d765b6..46433b30e45f 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -135,12 +135,24 @@ void ext3_journal_abort_handle(const char *caller, const char *err_fn, if (is_handle_aborted(handle)) return; - printk(KERN_ERR "%s: aborting transaction: %s in %s\n", - caller, errstr, err_fn); + printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n", + caller, errstr, err_fn); journal_abort_handle(handle); } +void ext3_msg(struct super_block *sb, const char *prefix, + const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + printk("%sEXT3-fs (%s): ", prefix, sb->s_id); + vprintk(fmt, args); + printk("\n"); + va_end(args); +} + /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * @@ -174,12 +186,13 @@ static void ext3_handle_error(struct super_block *sb) journal_abort(journal, -EIO); } if (test_opt (sb, ERRORS_RO)) { - printk (KERN_CRIT "Remounting filesystem read-only\n"); + ext3_msg(sb, KERN_CRIT, + "error: remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } ext3_commit_super(sb, es, 1); if (test_opt(sb, ERRORS_PANIC)) - panic("EXT3-fs (device %s): panic forced after error\n", + panic("EXT3-fs (%s): panic forced after error\n", sb->s_id); } @@ -247,8 +260,7 @@ void __ext3_std_error (struct super_block * sb, const char * function, return; errstr = ext3_decode_error(sb, errno, nbuf); - printk (KERN_CRIT "EXT3-fs error (device %s) in %s: %s\n", - sb->s_id, function, errstr); + ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr); ext3_handle_error(sb); } @@ -268,21 +280,20 @@ void ext3_abort (struct super_block * sb, const char * function, { va_list args; - printk (KERN_CRIT "ext3_abort called.\n"); - va_start(args, fmt); - printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function); + printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function); vprintk(fmt, args); printk("\n"); va_end(args); if (test_opt(sb, ERRORS_PANIC)) - panic("EXT3-fs panic from previous error\n"); + panic("EXT3-fs: panic from previous error\n"); if (sb->s_flags & MS_RDONLY) return; - printk(KERN_CRIT "Remounting filesystem read-only\n"); + ext3_msg(sb, KERN_CRIT, + "error: remounting filesystem read-only"); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; sb->s_flags |= MS_RDONLY; EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; @@ -296,7 +307,7 @@ void ext3_warning (struct super_block * sb, const char * function, va_list args; va_start(args, fmt); - printk(KERN_WARNING "EXT3-fs warning (device %s): %s: ", + printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ", sb->s_id, function); vprintk(fmt, args); printk("\n"); @@ -310,10 +321,10 @@ void ext3_update_dynamic_rev(struct super_block *sb) if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV) return; - ext3_warning(sb, __func__, - "updating to rev %d because of new feature flag, " - "running e2fsck is recommended", - EXT3_DYNAMIC_REV); + ext3_msg(sb, KERN_WARNING, + "warning: updating to rev %d because of " + "new feature flag, running e2fsck is recommended", + EXT3_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE); @@ -331,7 +342,7 @@ void ext3_update_dynamic_rev(struct super_block *sb) /* * Open the external journal device */ -static struct block_device *ext3_blkdev_get(dev_t dev) +static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; @@ -342,8 +353,9 @@ static struct block_device *ext3_blkdev_get(dev_t dev) return bdev; fail: - printk(KERN_ERR "EXT3: failed to open journal device %s: %ld\n", - __bdevname(dev, b), PTR_ERR(bdev)); + ext3_msg(sb, "error: failed to open journal device %s: %ld", + __bdevname(dev, b), PTR_ERR(bdev)); + return NULL; } @@ -378,13 +390,13 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) { struct list_head *l; - printk(KERN_ERR "sb orphan head is %d\n", + ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); - printk(KERN_ERR "sb_info orphan list:\n"); + ext3_msg(sb, KERN_ERR, "sb_info orphan list:"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); - printk(KERN_ERR " " + ext3_msg(sb, KERN_ERR, " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, @@ -849,7 +861,7 @@ static const match_table_t tokens = { {Opt_err, NULL}, }; -static ext3_fsblk_t get_sb_block(void **data) +static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) { ext3_fsblk_t sb_block; char *options = (char *) *data; @@ -860,7 +872,7 @@ static ext3_fsblk_t get_sb_block(void **data) /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { - printk("EXT3-fs: Invalid sb specification: %s\n", + ext3_msg(sb, "error: invalid sb specification: %s", (char *) *data); return 1; } @@ -960,7 +972,8 @@ static int parse_options (char *options, struct super_block *sb, #else case Opt_user_xattr: case Opt_nouser_xattr: - printk("EXT3 (no)user_xattr options not supported\n"); + ext3_msg(sb, KERN_INFO, + "(no)user_xattr options not supported"); break; #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL @@ -973,7 +986,8 @@ static int parse_options (char *options, struct super_block *sb, #else case Opt_acl: case Opt_noacl: - printk("EXT3 (no)acl options not supported\n"); + ext3_msg(sb, KERN_INFO, + "(no)acl options not supported"); break; #endif case Opt_reservation: @@ -989,16 +1003,16 @@ static int parse_options (char *options, struct super_block *sb, user to specify an existing inode to be the journal file. */ if (is_remount) { - printk(KERN_ERR "EXT3-fs: cannot specify " - "journal on remount\n"); + ext3_msg(sb, KERN_ERR, "error: cannot specify " + "journal on remount"); return 0; } set_opt (sbi->s_mount_opt, UPDATE_JOURNAL); break; case Opt_journal_inum: if (is_remount) { - printk(KERN_ERR "EXT3-fs: cannot specify " - "journal on remount\n"); + ext3_msg(sb, KERN_ERR, "error: cannot specify " + "journal on remount"); return 0; } if (match_int(&args[0], &option)) @@ -1007,8 +1021,8 @@ static int parse_options (char *options, struct super_block *sb, break; case Opt_journal_dev: if (is_remount) { - printk(KERN_ERR "EXT3-fs: cannot specify " - "journal on remount\n"); + ext3_msg(sb, KERN_ERR, "error: cannot specify " + "journal on remount"); return 0; } if (match_int(&args[0], &option)) @@ -1040,12 +1054,11 @@ static int parse_options (char *options, struct super_block *sb, if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS) == data_opt) break; - printk(KERN_ERR - "EXT3-fs (device %s): Cannot change " + ext3_msg(sb, KERN_ERR, + "error: cannot change " "data mode on remount. The filesystem " "is mounted in data=%s mode and you " - "try to remount it in data=%s mode.\n", - sb->s_id, + "try to remount it in data=%s mode.", data_mode_string(sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS), data_mode_string(data_opt)); @@ -1070,31 +1083,31 @@ static int parse_options (char *options, struct super_block *sb, set_qf_name: if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { - printk(KERN_ERR - "EXT3-fs: Cannot change journaled " - "quota options when quota turned on.\n"); + ext3_msg(sb, KERN_ERR, + "error: cannot change journaled " + "quota options when quota turned on."); return 0; } qname = match_strdup(&args[0]); if (!qname) { - printk(KERN_ERR - "EXT3-fs: not enough memory for " - "storing quotafile name.\n"); + ext3_msg(sb, KERN_ERR, + "error: not enough memory for " + "storing quotafile name."); return 0; } if (sbi->s_qf_names[qtype] && strcmp(sbi->s_qf_names[qtype], qname)) { - printk(KERN_ERR - "EXT3-fs: %s quota file already " - "specified.\n", QTYPE2NAME(qtype)); + ext3_msg(sb, KERN_ERR, + "error: %s quota file already " + "specified.", QTYPE2NAME(qtype)); kfree(qname); return 0; } sbi->s_qf_names[qtype] = qname; if (strchr(sbi->s_qf_names[qtype], '/')) { - printk(KERN_ERR - "EXT3-fs: quotafile must be on " - "filesystem root.\n"); + ext3_msg(sb, KERN_ERR, + "error: quotafile must be on " + "filesystem root."); kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 0; @@ -1109,9 +1122,9 @@ set_qf_name: clear_qf_name: if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { - printk(KERN_ERR "EXT3-fs: Cannot change " + ext3_msg(sb, KERN_ERR, "error: cannot change " "journaled quota options when " - "quota turned on.\n"); + "quota turned on."); return 0; } /* @@ -1128,9 +1141,9 @@ clear_qf_name: set_qf_format: if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { - printk(KERN_ERR "EXT3-fs: Cannot change " + ext3_msg(sb, KERN_ERR, "error: cannot change " "journaled quota options when " - "quota turned on.\n"); + "quota turned on."); return 0; } sbi->s_jquota_fmt = qfmt; @@ -1146,8 +1159,8 @@ set_qf_format: break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { - printk(KERN_ERR "EXT3-fs: Cannot change quota " - "options when quota turned on.\n"); + ext3_msg(sb, KERN_ERR, "error: cannot change " + "quota options when quota turned on."); return 0; } clear_opt(sbi->s_mount_opt, QUOTA); @@ -1158,8 +1171,8 @@ set_qf_format: case Opt_quota: case Opt_usrquota: case Opt_grpquota: - printk(KERN_ERR - "EXT3-fs: quota options not supported.\n"); + ext3_msg(sb, KERN_ERR, + "error: quota options not supported."); break; case Opt_usrjquota: case Opt_grpjquota: @@ -1167,9 +1180,9 @@ set_qf_format: case Opt_offgrpjquota: case Opt_jqfmt_vfsold: case Opt_jqfmt_vfsv0: - printk(KERN_ERR - "EXT3-fs: journaled quota options not " - "supported.\n"); + ext3_msg(sb, KERN_ERR, + "error: journaled quota options not " + "supported."); break; case Opt_noquota: break; @@ -1189,8 +1202,9 @@ set_qf_format: break; case Opt_resize: if (!is_remount) { - printk("EXT3-fs: resize option only available " - "for remount\n"); + ext3_msg(sb, KERN_ERR, + "error: resize option only available " + "for remount"); return 0; } if (match_int(&args[0], &option) != 0) @@ -1204,9 +1218,9 @@ set_qf_format: clear_opt(sbi->s_mount_opt, NOBH); break; default: - printk (KERN_ERR - "EXT3-fs: Unrecognized mount option \"%s\" " - "or missing value\n", p); + ext3_msg(sb, KERN_ERR, + "error: unrecognized mount option \"%s\" " + "or missing value", p); return 0; } } @@ -1224,21 +1238,21 @@ set_qf_format: (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)) || (sbi->s_qf_names[GRPQUOTA] && (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA))) { - printk(KERN_ERR "EXT3-fs: old and new quota " - "format mixing.\n"); + ext3_msg(sb, KERN_ERR, "error: old and new quota " + "format mixing."); return 0; } if (!sbi->s_jquota_fmt) { - printk(KERN_ERR "EXT3-fs: journaled quota format " - "not specified.\n"); + ext3_msg(sb, KERN_ERR, "error: journaled quota format " + "not specified."); return 0; } } else { if (sbi->s_jquota_fmt) { - printk(KERN_ERR "EXT3-fs: journaled quota format " + ext3_msg(sb, KERN_ERR, "error: journaled quota format " "specified with no journaling " - "enabled.\n"); + "enabled."); return 0; } } @@ -1253,31 +1267,33 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) { - printk (KERN_ERR "EXT3-fs warning: revision level too high, " - "forcing read-only mode\n"); + ext3_msg(sb, KERN_ERR, + "error: revision level too high, " + "forcing read-only mode"); res = MS_RDONLY; } if (read_only) return res; if (!(sbi->s_mount_state & EXT3_VALID_FS)) - printk (KERN_WARNING "EXT3-fs warning: mounting unchecked fs, " - "running e2fsck is recommended\n"); + ext3_msg(sb, KERN_WARNING, + "warning: mounting unchecked fs, " + "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT3_ERROR_FS)) - printk (KERN_WARNING - "EXT3-fs warning: mounting fs with errors, " - "running e2fsck is recommended\n"); + ext3_msg(sb, KERN_WARNING, + "warning: mounting fs with errors, " + "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 && le16_to_cpu(es->s_mnt_count) >= (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) - printk (KERN_WARNING - "EXT3-fs warning: maximal mount count reached, " - "running e2fsck is recommended\n"); + ext3_msg(sb, KERN_WARNING, + "warning: maximal mount count reached, " + "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) - printk (KERN_WARNING - "EXT3-fs warning: checktime reached, " - "running e2fsck is recommended\n"); + ext3_msg(sb, KERN_WARNING, + "warning: checktime reached, " + "running e2fsck is recommended"); #if 0 /* @@@ We _will_ want to clear the valid bit if we find inconsistencies, to force a fsck at reboot. But for @@ -1294,22 +1310,20 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, ext3_commit_super(sb, es, 1); if (test_opt(sb, DEBUG)) - printk(KERN_INFO "[EXT3 FS bs=%lu, gc=%lu, " - "bpg=%lu, ipg=%lu, mo=%04lx]\n", + ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, " + "bpg=%lu, ipg=%lu, mo=%04lx]", sb->s_blocksize, sbi->s_groups_count, EXT3_BLOCKS_PER_GROUP(sb), EXT3_INODES_PER_GROUP(sb), sbi->s_mount_opt); - printk(KERN_INFO "EXT3 FS on %s, ", sb->s_id); if (EXT3_SB(sb)->s_journal->j_inode == NULL) { char b[BDEVNAME_SIZE]; - - printk("external journal on %s\n", + ext3_msg(sb, KERN_INFO, "using external journal on %s", bdevname(EXT3_SB(sb)->s_journal->j_dev, b)); } else { - printk("internal journal\n"); + ext3_msg(sb, KERN_INFO, "using internal journal"); } return res; } @@ -1403,8 +1417,8 @@ static void ext3_orphan_cleanup (struct super_block * sb, } if (bdev_read_only(sb->s_bdev)) { - printk(KERN_ERR "EXT3-fs: write access " - "unavailable, skipping orphan cleanup.\n"); + ext3_msg(sb, KERN_ERR, "error: write access " + "unavailable, skipping orphan cleanup."); return; } @@ -1418,8 +1432,7 @@ static void ext3_orphan_cleanup (struct super_block * sb, } if (s_flags & MS_RDONLY) { - printk(KERN_INFO "EXT3-fs: %s: orphan cleanup on readonly fs\n", - sb->s_id); + ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA @@ -1430,9 +1443,9 @@ static void ext3_orphan_cleanup (struct super_block * sb, if (EXT3_SB(sb)->s_qf_names[i]) { int ret = ext3_quota_on_mount(sb, i); if (ret < 0) - printk(KERN_ERR - "EXT3-fs: Cannot turn on journaled " - "quota: error %d\n", ret); + ext3_msg(sb, KERN_ERR, + "error: cannot turn on journaled " + "quota: %d", ret); } } #endif @@ -1470,11 +1483,11 @@ static void ext3_orphan_cleanup (struct super_block * sb, #define PLURAL(x) (x), ((x)==1) ? "" : "s" if (nr_orphans) - printk(KERN_INFO "EXT3-fs: %s: %d orphan inode%s deleted\n", - sb->s_id, PLURAL(nr_orphans)); + ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted", + PLURAL(nr_orphans)); if (nr_truncates) - printk(KERN_INFO "EXT3-fs: %s: %d truncate%s cleaned up\n", - sb->s_id, PLURAL(nr_truncates)); + ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up", + PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { @@ -1558,7 +1571,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) struct ext3_super_block *es = NULL; struct ext3_sb_info *sbi; ext3_fsblk_t block; - ext3_fsblk_t sb_block = get_sb_block(&data); + ext3_fsblk_t sb_block = get_sb_block(&data, sb); ext3_fsblk_t logic_sb_block; unsigned long offset = 0; unsigned int journal_inum = 0; @@ -1594,7 +1607,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE); if (!blocksize) { - printk(KERN_ERR "EXT3-fs: unable to set blocksize\n"); + ext3_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto out_fail; } @@ -1610,7 +1623,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) } if (!(bh = sb_bread(sb, logic_sb_block))) { - printk (KERN_ERR "EXT3-fs: unable to read superblock\n"); + ext3_msg(sb, KERN_ERR, "error: unable to read superblock"); goto out_fail; } /* @@ -1669,9 +1682,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) - printk(KERN_WARNING - "EXT3-fs warning: feature flags set on rev 0 fs, " - "running e2fsck is recommended\n"); + ext3_msg(sb, KERN_WARNING, + "warning: feature flags set on rev 0 fs, " + "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, @@ -1679,25 +1692,25 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) */ features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP); if (features) { - printk(KERN_ERR "EXT3-fs: %s: couldn't mount because of " - "unsupported optional features (%x).\n", - sb->s_id, le32_to_cpu(features)); + ext3_msg(sb, KERN_ERR, + "error: couldn't mount because of unsupported " + "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP); if (!(sb->s_flags & MS_RDONLY) && features) { - printk(KERN_ERR "EXT3-fs: %s: couldn't mount RDWR because of " - "unsupported optional features (%x).\n", - sb->s_id, le32_to_cpu(features)); + ext3_msg(sb, KERN_ERR, + "error: couldn't mount RDWR because of unsupported " + "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT3_MIN_BLOCK_SIZE || blocksize > EXT3_MAX_BLOCK_SIZE) { - printk(KERN_ERR - "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n", - blocksize, sb->s_id); + ext3_msg(sb, KERN_ERR, + "error: couldn't mount because of unsupported " + "filesystem blocksize %d", blocksize); goto failed_mount; } @@ -1708,30 +1721,31 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) * than the hardware sectorsize for the machine. */ if (blocksize < hblock) { - printk(KERN_ERR "EXT3-fs: blocksize %d too small for " - "device blocksize %d.\n", blocksize, hblock); + ext3_msg(sb, KERN_ERR, + "error: fsblocksize %d too small for " + "hardware sectorsize %d", blocksize, hblock); goto failed_mount; } brelse (bh); if (!sb_set_blocksize(sb, blocksize)) { - printk(KERN_ERR "EXT3-fs: bad blocksize %d.\n", - blocksize); + ext3_msg(sb, KERN_ERR, + "error: bad blocksize %d", blocksize); goto out_fail; } logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if (!bh) { - printk(KERN_ERR - "EXT3-fs: Can't read superblock on 2nd try.\n"); + ext3_msg(sb, KERN_ERR, + "error: can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext3_super_block *)(((char *)bh->b_data) + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { - printk (KERN_ERR - "EXT3-fs: Magic mismatch, very weird !\n"); + ext3_msg(sb, KERN_ERR, + "error: magic mismatch"); goto failed_mount; } } @@ -1747,8 +1761,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { - printk (KERN_ERR - "EXT3-fs: unsupported inode size: %d\n", + ext3_msg(sb, KERN_ERR, + "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } @@ -1756,8 +1770,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) sbi->s_frag_size = EXT3_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (blocksize != sbi->s_frag_size) { - printk(KERN_ERR - "EXT3-fs: fragsize %lu != blocksize %u (unsupported)\n", + ext3_msg(sb, KERN_ERR, + "error: fragsize %lu != blocksize %u (unsupported)", sbi->s_frag_size, blocksize); goto failed_mount; } @@ -1793,31 +1807,31 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) } if (sbi->s_blocks_per_group > blocksize * 8) { - printk (KERN_ERR - "EXT3-fs: #blocks per group too big: %lu\n", + ext3_msg(sb, KERN_ERR, + "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > blocksize * 8) { - printk (KERN_ERR - "EXT3-fs: #fragments per group too big: %lu\n", + ext3_msg(sb, KERN_ERR, + "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > blocksize * 8) { - printk (KERN_ERR - "EXT3-fs: #inodes per group too big: %lu\n", + ext3_msg(sb, KERN_ERR, + "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } if (le32_to_cpu(es->s_blocks_count) > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { - printk(KERN_ERR "EXT3-fs: filesystem on %s:" - " too large to mount safely\n", sb->s_id); + ext3_msg(sb, KERN_ERR, + "error: filesystem is too large to mount safely"); if (sizeof(sector_t) < 8) - printk(KERN_WARNING "EXT3-fs: CONFIG_LBDAF not " - "enabled\n"); + ext3_msg(sb, KERN_ERR, + "error: CONFIG_LBDAF not enabled"); goto failed_mount; } @@ -1831,7 +1845,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { - printk (KERN_ERR "EXT3-fs: not enough memory\n"); + ext3_msg(sb, KERN_ERR, + "error: not enough memory"); goto failed_mount; } @@ -1841,14 +1856,15 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { - printk (KERN_ERR "EXT3-fs: " - "can't read group descriptor %d\n", i); + ext3_msg(sb, KERN_ERR, + "error: can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext3_check_descriptors (sb)) { - printk(KERN_ERR "EXT3-fs: group descriptors corrupted!\n"); + ext3_msg(sb, KERN_ERR, + "error: group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; @@ -1866,7 +1882,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ext3_count_dirs(sb)); } if (err) { - printk(KERN_ERR "EXT3-fs: insufficient memory\n"); + ext3_msg(sb, KERN_ERR, "error: insufficient memory"); goto failed_mount3; } @@ -1914,9 +1930,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) goto failed_mount3; } else { if (!silent) - printk (KERN_ERR - "ext3: No journal on filesystem on %s\n", - sb->s_id); + ext3_msg(sb, KERN_ERR, + "error: no journal found. " + "mounting ext3 over ext2?"); goto failed_mount3; } @@ -1938,8 +1954,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) case EXT3_MOUNT_WRITEBACK_DATA: if (!journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) { - printk(KERN_ERR "EXT3-fs: Journal does not support " - "requested data journaling mode\n"); + ext3_msg(sb, KERN_ERR, + "error: journal does not support " + "requested data journaling mode"); goto failed_mount4; } default: @@ -1948,8 +1965,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) if (test_opt(sb, NOBH)) { if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { - printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " - "its supported only with writeback mode\n"); + ext3_msg(sb, KERN_WARNING, + "warning: ignoring nobh option - " + "it is supported only with writeback mode"); clear_opt(sbi->s_mount_opt, NOBH); } } @@ -1960,18 +1978,18 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) root = ext3_iget(sb, EXT3_ROOT_INO); if (IS_ERR(root)) { - printk(KERN_ERR "EXT3-fs: get root inode failed\n"); + ext3_msg(sb, KERN_ERR, "error: get root inode failed"); ret = PTR_ERR(root); goto failed_mount4; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); - printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n"); + ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount4; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { - printk(KERN_ERR "EXT3-fs: get root dentry failed\n"); + ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); iput(root); ret = -ENOMEM; goto failed_mount4; @@ -1990,9 +2008,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ext3_orphan_cleanup(sb, es); EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; if (needs_recovery) - printk (KERN_INFO "EXT3-fs: recovery complete.\n"); + ext3_msg(sb, KERN_INFO, "recovery complete"); ext3_mark_recovery_complete(sb, es); - printk (KERN_INFO "EXT3-fs: mounted filesystem with %s data mode.\n", + ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode", test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": "writeback"); @@ -2002,7 +2020,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) cantfind_ext3: if (!silent) - printk(KERN_ERR "VFS: Can't find ext3 filesystem on dev %s.\n", + ext3_msg(sb, KERN_INFO, + "error: can't find ext3 filesystem on dev %s.", sb->s_id); goto failed_mount; @@ -2070,27 +2089,27 @@ static journal_t *ext3_get_journal(struct super_block *sb, journal_inode = ext3_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { - printk(KERN_ERR "EXT3-fs: no journal found.\n"); + ext3_msg(sb, KERN_ERR, "error: no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); - printk(KERN_ERR "EXT3-fs: journal inode is deleted.\n"); + ext3_msg(sb, KERN_ERR, "error: journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { - printk(KERN_ERR "EXT3-fs: invalid journal inode.\n"); + ext3_msg(sb, KERN_ERR, "error: invalid journal inode"); iput(journal_inode); return NULL; } journal = journal_init_inode(journal_inode); if (!journal) { - printk(KERN_ERR "EXT3-fs: Could not load journal inode\n"); + ext3_msg(sb, KERN_ERR, "error: could not load journal inode"); iput(journal_inode); return NULL; } @@ -2112,13 +2131,13 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, struct ext3_super_block * es; struct block_device *bdev; - bdev = ext3_blkdev_get(j_dev); + bdev = ext3_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; if (bd_claim(bdev, sb)) { - printk(KERN_ERR - "EXT3: failed to claim external journal device.\n"); + ext3_msg(sb, KERN_ERR, + "error: failed to claim external journal device"); blkdev_put(bdev, FMODE_READ|FMODE_WRITE); return NULL; } @@ -2126,8 +2145,8 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { - printk(KERN_ERR - "EXT3-fs: blocksize too small for journal device.\n"); + ext3_msg(sb, KERN_ERR, + "error: blocksize too small for journal device"); goto out_bdev; } @@ -2135,8 +2154,8 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, offset = EXT3_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { - printk(KERN_ERR "EXT3-fs: couldn't read superblock of " - "external journal\n"); + ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of " + "external journal"); goto out_bdev; } @@ -2144,14 +2163,14 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) { - printk(KERN_ERR "EXT3-fs: external journal has " - "bad superblock\n"); + ext3_msg(sb, KERN_ERR, "error: external journal has " + "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { - printk(KERN_ERR "EXT3-fs: journal UUID does not match\n"); + ext3_msg(sb, KERN_ERR, "error: journal UUID does not match"); brelse(bh); goto out_bdev; } @@ -2163,19 +2182,21 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, journal = journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { - printk(KERN_ERR "EXT3-fs: failed to create device journal\n"); + ext3_msg(sb, KERN_ERR, + "error: failed to create device journal"); goto out_bdev; } journal->j_private = sb; ll_rw_block(READ, 1, &journal->j_sb_buffer); wait_on_buffer(journal->j_sb_buffer); if (!buffer_uptodate(journal->j_sb_buffer)) { - printk(KERN_ERR "EXT3-fs: I/O error on journal device\n"); + ext3_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { - printk(KERN_ERR "EXT3-fs: External journal has more than one " - "user (unsupported) - %d\n", + ext3_msg(sb, KERN_ERR, + "error: external journal has more than one " + "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } @@ -2201,8 +2222,8 @@ static int ext3_load_journal(struct super_block *sb, if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { - printk(KERN_INFO "EXT3-fs: external journal device major/minor " - "numbers have changed\n"); + ext3_msg(sb, KERN_INFO, "external journal device major/minor " + "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); @@ -2217,21 +2238,21 @@ static int ext3_load_journal(struct super_block *sb, if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { - printk(KERN_INFO "EXT3-fs: INFO: recovery " - "required on readonly filesystem.\n"); + ext3_msg(sb, KERN_INFO, + "recovery required on readonly filesystem"); if (really_read_only) { - printk(KERN_ERR "EXT3-fs: write access " - "unavailable, cannot proceed.\n"); + ext3_msg(sb, KERN_ERR, "error: write access " + "unavailable, cannot proceed"); return -EROFS; } - printk (KERN_INFO "EXT3-fs: write access will " - "be enabled during recovery.\n"); + ext3_msg(sb, KERN_INFO, + "write access will be enabled during recovery"); } } if (journal_inum && journal_dev) { - printk(KERN_ERR "EXT3-fs: filesystem has both journal " - "and inode journals!\n"); + ext3_msg(sb, KERN_ERR, "error: filesystem has both journal " + "and inode journals"); return -EINVAL; } @@ -2246,7 +2267,7 @@ static int ext3_load_journal(struct super_block *sb, if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { err = journal_update_format(journal); if (err) { - printk(KERN_ERR "EXT3-fs: error updating journal.\n"); + ext3_msg(sb, KERN_ERR, "error updating journal"); journal_destroy(journal); return err; } @@ -2258,7 +2279,7 @@ static int ext3_load_journal(struct super_block *sb, err = journal_load(journal); if (err) { - printk(KERN_ERR "EXT3-fs: error loading journal.\n"); + ext3_msg(sb, KERN_ERR, "error loading journal"); journal_destroy(journal); return err; } @@ -2277,16 +2298,17 @@ static int ext3_load_journal(struct super_block *sb, return 0; } -static int ext3_create_journal(struct super_block * sb, - struct ext3_super_block * es, +static int ext3_create_journal(struct super_block *sb, + struct ext3_super_block *es, unsigned int journal_inum) { journal_t *journal; int err; if (sb->s_flags & MS_RDONLY) { - printk(KERN_ERR "EXT3-fs: readonly filesystem when trying to " - "create journal.\n"); + ext3_msg(sb, KERN_ERR, + "error: readonly filesystem when trying to " + "create journal"); return -EROFS; } @@ -2294,12 +2316,12 @@ static int ext3_create_journal(struct super_block * sb, if (!journal) return -EINVAL; - printk(KERN_INFO "EXT3-fs: creating new journal on inode %u\n", + ext3_msg(sb, KERN_INFO, "creating new journal on inode %u", journal_inum); err = journal_create(journal); if (err) { - printk(KERN_ERR "EXT3-fs: error creating journal.\n"); + ext3_msg(sb, KERN_ERR, "error creating journal"); journal_destroy(journal); return -EIO; } @@ -2380,8 +2402,8 @@ out: * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ -static void ext3_clear_journal_err(struct super_block * sb, - struct ext3_super_block * es) +static void ext3_clear_journal_err(struct super_block *sb, + struct ext3_super_block *es) { journal_t *journal; int j_errno; @@ -2572,10 +2594,10 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) __le32 ret; if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))) { - printk(KERN_WARNING "EXT3-fs: %s: couldn't " - "remount RDWR because of unsupported " - "optional features (%x).\n", - sb->s_id, le32_to_cpu(ret)); + ext3_msg(sb, KERN_WARNING, + "warning: couldn't remount RDWR " + "because of unsupported optional " + "features (%x)", le32_to_cpu(ret)); err = -EROFS; goto restore_opts; } @@ -2586,11 +2608,10 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) * require a full umount/remount for now. */ if (es->s_last_orphan) { - printk(KERN_WARNING "EXT3-fs: %s: couldn't " + ext3_msg(sb, KERN_WARNING, "warning: couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " - "umount/remount instead.\n", - sb->s_id); + "umount/remount instead."); err = -EINVAL; goto restore_opts; } @@ -2839,9 +2860,9 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id, if (EXT3_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ if (path.dentry->d_parent != sb->s_root) - printk(KERN_WARNING - "EXT3-fs: Quota file not on filesystem root. " - "Journaled quota will not work.\n"); + ext3_msg(sb, KERN_WARNING, + "warning: Quota file not on filesystem root. " + "Journaled quota will not work."); } /* @@ -2923,8 +2944,9 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type, handle_t *handle = journal_current_handle(); if (!handle) { - printk(KERN_WARNING "EXT3-fs: Quota write (off=%Lu, len=%Lu)" - " cancelled because transaction is not started.\n", + ext3_msg(sb, KERN_WARNING, + "warning: quota write (off=%llu, len=%llu)" + " cancelled because transaction is not started.", (unsigned long long)off, (unsigned long long)len); return -EIO; } diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 7499b3667798..6b049030fbe6 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -918,6 +918,8 @@ extern void ext3_abort (struct super_block *, const char *, const char *, ...) __attribute__ ((format (printf, 3, 4))); extern void ext3_warning (struct super_block *, const char *, const char *, ...) __attribute__ ((format (printf, 3, 4))); +extern void ext3_msg(struct super_block *, const char *, const char *, ...) + __attribute__ ((format (printf, 3, 4))); extern void ext3_update_dynamic_rev (struct super_block *sb); #define ext3_std_error(sb, errno) \ -- cgit v1.2.3 From 30673930051e5203d0b826b8b8f2454cab453b94 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 30 Nov 2009 22:17:41 +0100 Subject: quota: Move definition of QFMT_OCFS2 to linux/quota.h Move definition of this constant to linux/quota.h so that it cannot clash with other format IDs. CC: Joel Becker Signed-off-by: Jan Kara --- fs/ocfs2/quota.h | 4 ---- include/linux/quota.h | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include') diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index e5df9d170b0c..123bc520a2c0 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h @@ -17,10 +17,6 @@ #include "ocfs2.h" -/* Common stuff */ -/* id number of quota format */ -#define QFMT_OCFS2 3 - /* * In-memory structures */ diff --git a/include/linux/quota.h b/include/linux/quota.h index f63c9d6ba784..7db3a005483f 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -73,6 +73,7 @@ /* Quota format type IDs */ #define QFMT_VFS_OLD 1 #define QFMT_VFS_V0 2 +#define QFMT_OCFS2 3 /* Size of block in which space limits are passed through the quota * interface */ -- cgit v1.2.3 From 498c60153ebb8889d8944591383c5c12af1127d4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 16 Nov 2009 18:09:47 +0100 Subject: quota: Implement quota format with 64-bit space and inode limits So far the maximum quota space limit was 4TB. Apparently this isn't enough for Lustre guys anymore. So implement new quota format which raises block limits to 2^64 bytes. Also store number of inodes and inode limits in 64-bit variables as 2^32 files isn't that insanely high anymore. The first version of the patch has been developed by Andrew Perepechko . CC: Andrew.Perepechko@Sun.COM Signed-off-by: Jan Kara --- fs/quota/Kconfig | 8 ++- fs/quota/quota_v2.c | 165 ++++++++++++++++++++++++++++++++++++++++---------- fs/quota/quotaio_v2.h | 19 +++++- include/linux/quota.h | 1 + 4 files changed, 154 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index 353e78a9ebee..efc02ebb8c70 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig @@ -46,12 +46,14 @@ config QFMT_V1 format say Y here. config QFMT_V2 - tristate "Quota format v2 support" + tristate "Quota format vfsv0 and vfsv1 support" depends on QUOTA select QUOTA_TREE help - This quota format allows using quotas with 32-bit UIDs/GIDs. If you - need this functionality say Y here. + This config option enables kernel support for vfsv0 and vfsv1 quota + formats. Both these formats support 32-bit UIDs/GIDs and vfsv1 format + also supports 64-bit inode and block quota limits. If you need this + functionality say Y here. config QUOTACTL bool diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 01f25eae684d..3dfc23e02135 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -23,14 +23,23 @@ MODULE_LICENSE("GPL"); #define __QUOTA_V2_PARANOIA -static void v2_mem2diskdqb(void *dp, struct dquot *dquot); -static void v2_disk2memdqb(struct dquot *dquot, void *dp); -static int v2_is_id(void *dp, struct dquot *dquot); - -static struct qtree_fmt_operations v2_qtree_ops = { - .mem2disk_dqblk = v2_mem2diskdqb, - .disk2mem_dqblk = v2_disk2memdqb, - .is_id = v2_is_id, +static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot); +static void v2r0_disk2memdqb(struct dquot *dquot, void *dp); +static int v2r0_is_id(void *dp, struct dquot *dquot); +static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot); +static void v2r1_disk2memdqb(struct dquot *dquot, void *dp); +static int v2r1_is_id(void *dp, struct dquot *dquot); + +static struct qtree_fmt_operations v2r0_qtree_ops = { + .mem2disk_dqblk = v2r0_mem2diskdqb, + .disk2mem_dqblk = v2r0_disk2memdqb, + .is_id = v2r0_is_id, +}; + +static struct qtree_fmt_operations v2r1_qtree_ops = { + .mem2disk_dqblk = v2r1_mem2diskdqb, + .disk2mem_dqblk = v2r1_disk2memdqb, + .is_id = v2r1_is_id, }; #define QUOTABLOCK_BITS 10 @@ -46,23 +55,33 @@ static inline qsize_t v2_qbtos(qsize_t blocks) return blocks << QUOTABLOCK_BITS; } +static int v2_read_header(struct super_block *sb, int type, + struct v2_disk_dqheader *dqhead) +{ + ssize_t size; + + size = sb->s_op->quota_read(sb, type, (char *)dqhead, + sizeof(struct v2_disk_dqheader), 0); + if (size != sizeof(struct v2_disk_dqheader)) { + printk(KERN_WARNING "quota_v2: Failed header read:" + " expected=%zd got=%zd\n", + sizeof(struct v2_disk_dqheader), size); + return 0; + } + return 1; +} + /* Check whether given file is really vfsv0 quotafile */ static int v2_check_quota_file(struct super_block *sb, int type) { struct v2_disk_dqheader dqhead; - ssize_t size; static const uint quota_magics[] = V2_INITQMAGICS; static const uint quota_versions[] = V2_INITQVERSIONS; - size = sb->s_op->quota_read(sb, type, (char *)&dqhead, - sizeof(struct v2_disk_dqheader), 0); - if (size != sizeof(struct v2_disk_dqheader)) { - printk("quota_v2: failed read expected=%zd got=%zd\n", - sizeof(struct v2_disk_dqheader), size); + if (!v2_read_header(sb, type, &dqhead)) return 0; - } if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || - le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) + le32_to_cpu(dqhead.dqh_version) > quota_versions[type]) return 0; return 1; } @@ -71,14 +90,20 @@ static int v2_check_quota_file(struct super_block *sb, int type) static int v2_read_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; + struct v2_disk_dqheader dqhead; struct mem_dqinfo *info = sb_dqinfo(sb, type); struct qtree_mem_dqinfo *qinfo; ssize_t size; + unsigned int version; + + if (!v2_read_header(sb, type, &dqhead)) + return 0; + version = le32_to_cpu(dqhead.dqh_version); size = sb->s_op->quota_read(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); if (size != sizeof(struct v2_disk_dqinfo)) { - printk(KERN_WARNING "Can't read info structure on device %s.\n", + printk(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n", sb->s_id); return -1; } @@ -89,9 +114,15 @@ static int v2_read_file_info(struct super_block *sb, int type) return -1; } qinfo = info->dqi_priv; - /* limits are stored as unsigned 32-bit data */ - info->dqi_maxblimit = 0xffffffff; - info->dqi_maxilimit = 0xffffffff; + if (version == 0) { + /* limits are stored as unsigned 32-bit data */ + info->dqi_maxblimit = 0xffffffff; + info->dqi_maxilimit = 0xffffffff; + } else { + /* used space is stored as unsigned 64-bit value */ + info->dqi_maxblimit = 0xffffffffffffffff; /* 2^64-1 */ + info->dqi_maxilimit = 0xffffffffffffffff; + } info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); @@ -103,8 +134,13 @@ static int v2_read_file_info(struct super_block *sb, int type) qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; qinfo->dqi_qtree_depth = qtree_depth(qinfo); - qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); - qinfo->dqi_ops = &v2_qtree_ops; + if (version == 0) { + qinfo->dqi_entry_size = sizeof(struct v2r0_disk_dqblk); + qinfo->dqi_ops = &v2r0_qtree_ops; + } else { + qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk); + qinfo->dqi_ops = &v2r1_qtree_ops; + } return 0; } @@ -135,9 +171,9 @@ static int v2_write_file_info(struct super_block *sb, int type) return 0; } -static void v2_disk2memdqb(struct dquot *dquot, void *dp) +static void v2r0_disk2memdqb(struct dquot *dquot, void *dp) { - struct v2_disk_dqblk *d = dp, empty; + struct v2r0_disk_dqblk *d = dp, empty; struct mem_dqblk *m = &dquot->dq_dqb; m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); @@ -149,15 +185,15 @@ static void v2_disk2memdqb(struct dquot *dquot, void *dp) m->dqb_curspace = le64_to_cpu(d->dqb_curspace); m->dqb_btime = le64_to_cpu(d->dqb_btime); /* We need to escape back all-zero structure */ - memset(&empty, 0, sizeof(struct v2_disk_dqblk)); + memset(&empty, 0, sizeof(struct v2r0_disk_dqblk)); empty.dqb_itime = cpu_to_le64(1); - if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) + if (!memcmp(&empty, dp, sizeof(struct v2r0_disk_dqblk))) m->dqb_itime = 0; } -static void v2_mem2diskdqb(void *dp, struct dquot *dquot) +static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot) { - struct v2_disk_dqblk *d = dp; + struct v2r0_disk_dqblk *d = dp; struct mem_dqblk *m = &dquot->dq_dqb; struct qtree_mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; @@ -175,9 +211,60 @@ static void v2_mem2diskdqb(void *dp, struct dquot *dquot) d->dqb_itime = cpu_to_le64(1); } -static int v2_is_id(void *dp, struct dquot *dquot) +static int v2r0_is_id(void *dp, struct dquot *dquot) +{ + struct v2r0_disk_dqblk *d = dp; + struct qtree_mem_dqinfo *info = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + if (qtree_entry_unused(info, dp)) + return 0; + return le32_to_cpu(d->dqb_id) == dquot->dq_id; +} + +static void v2r1_disk2memdqb(struct dquot *dquot, void *dp) +{ + struct v2r1_disk_dqblk *d = dp, empty; + struct mem_dqblk *m = &dquot->dq_dqb; + + m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit); + m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit); + m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes); + m->dqb_itime = le64_to_cpu(d->dqb_itime); + m->dqb_bhardlimit = v2_qbtos(le64_to_cpu(d->dqb_bhardlimit)); + m->dqb_bsoftlimit = v2_qbtos(le64_to_cpu(d->dqb_bsoftlimit)); + m->dqb_curspace = le64_to_cpu(d->dqb_curspace); + m->dqb_btime = le64_to_cpu(d->dqb_btime); + /* We need to escape back all-zero structure */ + memset(&empty, 0, sizeof(struct v2r1_disk_dqblk)); + empty.dqb_itime = cpu_to_le64(1); + if (!memcmp(&empty, dp, sizeof(struct v2r1_disk_dqblk))) + m->dqb_itime = 0; +} + +static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot) +{ + struct v2r1_disk_dqblk *d = dp; + struct mem_dqblk *m = &dquot->dq_dqb; + struct qtree_mem_dqinfo *info = + sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; + + d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); + d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); + d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes); + d->dqb_itime = cpu_to_le64(m->dqb_itime); + d->dqb_bhardlimit = cpu_to_le64(v2_stoqb(m->dqb_bhardlimit)); + d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit)); + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(dquot->dq_id); + if (qtree_entry_unused(info, dp)) + d->dqb_itime = cpu_to_le64(1); +} + +static int v2r1_is_id(void *dp, struct dquot *dquot) { - struct v2_disk_dqblk *d = dp; + struct v2r1_disk_dqblk *d = dp; struct qtree_mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; @@ -217,20 +304,32 @@ static const struct quota_format_ops v2_format_ops = { .release_dqblk = v2_release_dquot, }; -static struct quota_format_type v2_quota_format = { +static struct quota_format_type v2r0_quota_format = { .qf_fmt_id = QFMT_VFS_V0, .qf_ops = &v2_format_ops, .qf_owner = THIS_MODULE }; +static struct quota_format_type v2r1_quota_format = { + .qf_fmt_id = QFMT_VFS_V1, + .qf_ops = &v2_format_ops, + .qf_owner = THIS_MODULE +}; + static int __init init_v2_quota_format(void) { - return register_quota_format(&v2_quota_format); + int ret; + + ret = register_quota_format(&v2r0_quota_format); + if (ret) + return ret; + return register_quota_format(&v2r1_quota_format); } static void __exit exit_v2_quota_format(void) { - unregister_quota_format(&v2_quota_format); + unregister_quota_format(&v2r0_quota_format); + unregister_quota_format(&v2r1_quota_format); } module_init(init_v2_quota_format); diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h index 530fe580685c..f1966b42c2fd 100644 --- a/fs/quota/quotaio_v2.h +++ b/fs/quota/quotaio_v2.h @@ -17,8 +17,8 @@ } #define V2_INITQVERSIONS {\ - 0, /* USRQUOTA */\ - 0 /* GRPQUOTA */\ + 1, /* USRQUOTA */\ + 1 /* GRPQUOTA */\ } /* First generic header */ @@ -32,7 +32,7 @@ struct v2_disk_dqheader { * (as it appears on disk) - the file is a radix tree whose leaves point * to blocks of these structures. */ -struct v2_disk_dqblk { +struct v2r0_disk_dqblk { __le32 dqb_id; /* id this quota applies to */ __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ __le32 dqb_isoftlimit; /* preferred inode limit */ @@ -44,6 +44,19 @@ struct v2_disk_dqblk { __le64 dqb_itime; /* time limit for excessive inode use */ }; +struct v2r1_disk_dqblk { + __le32 dqb_id; /* id this quota applies to */ + __le32 dqb_pad; + __le64 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __le64 dqb_isoftlimit; /* preferred inode limit */ + __le64 dqb_curinodes; /* current # allocated inodes */ + __le64 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ + __le64 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ + __le64 dqb_curspace; /* current space occupied (in bytes) */ + __le64 dqb_btime; /* time limit for excessive disk use */ + __le64 dqb_itime; /* time limit for excessive inode use */ +}; + /* Header with type and version specific information */ struct v2_disk_dqinfo { __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ diff --git a/include/linux/quota.h b/include/linux/quota.h index 7db3a005483f..e70e62194243 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -74,6 +74,7 @@ #define QFMT_VFS_OLD 1 #define QFMT_VFS_V0 2 #define QFMT_OCFS2 3 +#define QFMT_VFS_V1 4 /* Size of block in which space limits are passed through the quota * interface */ -- cgit v1.2.3 From cc9b2e9f6603190c009e5d2629ce8e3f99571346 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 26 Nov 2009 09:50:20 -0600 Subject: [SCSI] enclosure: fix oops while iterating enclosure_status array Based on patch originally by Jeff Mahoney enclosure_status is expected to be a NULL terminated array of strings but isn't actually NULL terminated. When writing an invalid value to /sys/class/enclosure/.../.../status, it goes off the end of the array and Oopses. Fix by making the assumption true and adding NULL at the end. Reported-by: Artur Wojcik Signed-off-by: James Bottomley --- drivers/misc/enclosure.c | 1 + include/linux/enclosure.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index e9eae4a78402..1eac626e710a 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -391,6 +391,7 @@ static const char *const enclosure_status [] = { [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", [ENCLOSURE_STATUS_UNKNOWN] = "unknown", [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", + [ENCLOSURE_STATUS_MAX] = NULL, }; static const char *const enclosure_type [] = { diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 90d1c2184112..9a33c5f7e126 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h @@ -42,6 +42,8 @@ enum enclosure_status { ENCLOSURE_STATUS_NOT_INSTALLED, ENCLOSURE_STATUS_UNKNOWN, ENCLOSURE_STATUS_UNAVAILABLE, + /* last element for counting purposes */ + ENCLOSURE_STATUS_MAX }; /* SFF-8485 activity light settings */ -- cgit v1.2.3 From 5d0961fd1f25e117f907f3af3aaa870637049252 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Tue, 1 Dec 2009 17:36:21 +0200 Subject: [SCSI] libosd: Fix blk_put_request locking again So libosd has decided to sacrifice some code simplicity for the sake of a clean API. One of these things is the possibility for users to call osd_end_request, in any condition at any state. This opens up some problems with calling blk_put_request when out-side of the completion callback but calling __blk_put_request when detecting a from-completion state. The current hack was working just fine until exofs decided to operate on all devices in parallel and wait for the sum of the requests, before deallocating all osd-requests at once. There are two new possible cases 1. All request in a group are deallocated as part of the last request's async-done, request_queue is locked. 2. All request in a group where executed asynchronously, but de-allocation was delayed to after the async-done, in the context of another thread. Async execution but request_queue is not locked. The solution I chose was to separate the deallocation of the osd_request which has the information users need, from the deallocation of the internal(2) requests which impose the locking problem. The internal block-requests are freed unconditionally inside the async-done-callback, when we know the queue is always locked. If at osd_end_request time we still have a bock-request, then we know it did not come from within an async-done-callback and we can call the regular blk_put_request. The internal requests were used for carrying error information after execution. This information is now copied to osd_request members for later analysis by user code. The external API and behaviour was unchanged, except now it really supports what was previously advertised. Reported-by: Vineet Agarwal Signed-off-by: Boaz Harrosh Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 88 +++++++++++++++++++++------------------- include/scsi/osd_initiator.h | 5 ++- 2 files changed, 50 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 950202a70bcf..24223473f573 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused, seg->alloc_size = 0; } -static void _put_request(struct request *rq , bool is_async) +static void _put_request(struct request *rq) { - if (is_async) { - WARN_ON(rq->bio); - __blk_put_request(rq->q, rq); - } else { - /* - * If osd_finalize_request() was called but the request was not - * executed through the block layer, then we must release BIOs. - * TODO: Keep error code in or->async_error. Need to audit all - * code paths. - */ - if (unlikely(rq->bio)) - blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq)); - else - blk_put_request(rq); - } + /* + * If osd_finalize_request() was called but the request was not + * executed through the block layer, then we must release BIOs. + * TODO: Keep error code in or->async_error. Need to audit all + * code paths. + */ + if (unlikely(rq->bio)) + blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq)); + else + blk_put_request(rq); } void osd_end_request(struct osd_request *or) { struct request *rq = or->request; - /* IMPORTANT: make sure this agrees with osd_execute_request_async */ - bool is_async = (or->request->end_io_data == or); _osd_free_seg(or, &or->set_attr); _osd_free_seg(or, &or->enc_get_attr); @@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or) if (rq) { if (rq->next_rq) { - _put_request(rq->next_rq, is_async); + _put_request(rq->next_rq); rq->next_rq = NULL; } - _put_request(rq, is_async); + _put_request(rq); } _osd_request_free(or); } EXPORT_SYMBOL(osd_end_request); +static void _set_error_resid(struct osd_request *or, struct request *req, + int error) +{ + or->async_error = error; + or->req_errors = req->errors ? : error; + or->sense_len = req->sense_len; + if (or->out.req) + or->out.residual = or->out.req->resid_len; + if (or->in.req) + or->in.residual = or->in.req->resid_len; +} + int osd_execute_request(struct osd_request *or) { - return or->async_error = - blk_execute_rq(or->request->q, NULL, or->request, 0); + int error = blk_execute_rq(or->request->q, NULL, or->request, 0); + + _set_error_resid(or, or->request, error); + return error; } EXPORT_SYMBOL(osd_execute_request); @@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error) { struct osd_request *or = req->end_io_data; - or->async_error = error; - - if (unlikely(error)) { - OSD_DEBUG("osd_request_async_done error recieved %d " - "errors 0x%x\n", error, req->errors); - if (!req->errors) /* don't miss out on this one */ - req->errors = error; + _set_error_resid(or, req, error); + if (req->next_rq) { + __blk_put_request(req->q, req->next_rq); + req->next_rq = NULL; } + __blk_put_request(req->q, req); + or->request = NULL; + or->in.req = NULL; + or->out.req = NULL; + if (or->async_done) or->async_done(or, or->async_private); else @@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or, #endif int ret; - if (likely(!or->request->errors)) { - osi->out_resid = 0; - osi->in_resid = 0; + if (likely(!or->req_errors)) return 0; - } osi = osi ? : &local_osi; memset(osi, 0, sizeof(*osi)); - ssdb = or->request->sense; - sense_len = or->request->sense_len; + ssdb = (typeof(ssdb))or->sense; + sense_len = or->sense_len; if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) { OSD_ERR("Block-layer returned error(0x%x) but " "sense_len(%u) || key(%d) is empty\n", - or->request->errors, sense_len, ssdb->sense_key); + or->req_errors, sense_len, ssdb->sense_key); goto analyze; } @@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or, "additional_code=0x%x async_error=%d errors=0x%x\n", osi->key, original_sense_len, sense_len, osi->additional_code, or->async_error, - or->request->errors); + or->req_errors); if (original_sense_len < sense_len) sense_len = original_sense_len; @@ -1695,10 +1701,10 @@ analyze: ret = -EIO; } - if (or->out.req) - osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes; - if (or->in.req) - osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes; + if (!or->out.residual) + or->out.residual = or->out.total_bytes; + if (!or->in.residual) + or->in.residual = or->in.total_bytes; return ret; } diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index 39d6d1097153..a8f370126632 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h @@ -142,6 +142,7 @@ struct osd_request { struct _osd_io_info { struct bio *bio; u64 total_bytes; + u64 residual; struct request *req; struct _osd_req_data_segment *last_seg; u8 *pad_buff; @@ -150,12 +151,14 @@ struct osd_request { gfp_t alloc_flags; unsigned timeout; unsigned retries; + unsigned sense_len; u8 sense[OSD_MAX_SENSE_LEN]; enum osd_attributes_mode attributes_mode; osd_req_done_fn *async_done; void *async_private; int async_error; + int req_errors; }; static inline bool osd_req_is_ver1(struct osd_request *or) @@ -297,8 +300,6 @@ enum osd_err_priority { }; struct osd_sense_info { - u64 out_resid; /* Zero on success otherwise out residual */ - u64 in_resid; /* Zero on success otherwise in residual */ enum osd_err_priority osd_err_pri; int key; /* one of enum scsi_sense_keys */ -- cgit v1.2.3 From b9889ed1ddeca5a3f3569c8de7354e9e97d803ae Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 10 Dec 2009 20:32:39 +0100 Subject: sched: Remove forced2_migrations stats This build warning: kernel/sched.c: In function 'set_task_cpu': kernel/sched.c:2070: warning: unused variable 'old_rq' Made me realize that the forced2_migrations stat looks pretty pointless (and a misnomer) - remove it. Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 - kernel/sched.c | 6 ------ kernel/sched_debug.c | 2 -- 3 files changed, 9 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index ee9f200d12d3..87b89a827f0c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1174,7 +1174,6 @@ struct sched_entity { u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; - u64 nr_forced2_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; diff --git a/kernel/sched.c b/kernel/sched.c index 36cc05a76947..bc68037f3199 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2067,7 +2067,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { int old_cpu = task_cpu(p); - struct rq *old_rq = cpu_rq(old_cpu); struct cfs_rq *old_cfsrq = task_cfs_rq(p), *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); @@ -2075,10 +2074,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (old_cpu != new_cpu) { p->se.nr_migrations++; -#ifdef CONFIG_SCHEDSTATS - if (task_hot(p, old_rq->clock, NULL)) - schedstat_inc(p, se.nr_forced2_migrations); -#endif perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); } @@ -2521,7 +2516,6 @@ static void __sched_fork(struct task_struct *p) p->se.nr_failed_migrations_running = 0; p->se.nr_failed_migrations_hot = 0; p->se.nr_forced_migrations = 0; - p->se.nr_forced2_migrations = 0; p->se.nr_wakeups = 0; p->se.nr_wakeups_sync = 0; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 0fc5287fe80f..5ae24fc65d75 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -432,7 +432,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.nr_failed_migrations_running); P(se.nr_failed_migrations_hot); P(se.nr_forced_migrations); - P(se.nr_forced2_migrations); P(se.nr_wakeups); P(se.nr_wakeups_sync); P(se.nr_wakeups_migrate); @@ -508,7 +507,6 @@ void proc_sched_set_task(struct task_struct *p) p->se.nr_failed_migrations_running = 0; p->se.nr_failed_migrations_hot = 0; p->se.nr_forced_migrations = 0; - p->se.nr_forced2_migrations = 0; p->se.nr_wakeups = 0; p->se.nr_wakeups_sync = 0; p->se.nr_wakeups_migrate = 0; -- cgit v1.2.3 From 1f018c8d0d30bd6cc704104cfc50f2e5eee4e2dd Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Wed, 9 Dec 2009 06:53:39 -0500 Subject: asm-generic/gpio.h: add some forward decls of the device struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After the recent commit a4177ee7f, attempting to include asm-generic/gpio.h in otherwise "slim" code results in ugly warnings like so: CC arch/blackfin/kernel/bfin_gpio.o In file included from arch/blackfin/include/asm/gpio.h:278, from arch/blackfin/kernel/bfin_gpio.c:15: include/asm-generic/gpio.h:193: warning: ‘struct device’ declared inside parameter list its scope is only this definition or declaration, which is probably not what you want So add simple C forward decls of the struct device to avoid these. Signed-off-by: Mike Frysinger Signed-off-by: Arnd Bergmann --- include/asm-generic/gpio.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 66d6106a2067..204bed37e82d 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -28,6 +28,7 @@ static inline int gpio_is_valid(int number) return ((unsigned)number) < ARCH_NR_GPIOS; } +struct device; struct seq_file; struct module; @@ -181,6 +182,8 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value) #ifndef CONFIG_GPIO_SYSFS +struct device; + /* sysfs support is only available with gpiolib, where it's optional */ static inline int gpio_export(unsigned gpio, bool direction_may_change) -- cgit v1.2.3 From 9ab87644393d789b950ba984fa360f45c4df02e5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 10 Dec 2009 22:10:31 +0100 Subject: asm-generic: add sys_accept4 to unistd.h Code review has shown that the generic version of unistd.h is missing a reference to the accept4 system call. This was not noticed before because most architectures handle this through sys_socketcall. Signed-off-by: Arnd Bergmann --- include/asm-generic/unistd.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 2869650fb083..c5545da5cb72 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -622,9 +622,11 @@ __SYSCALL(__NR_move_pages, sys_move_pages) __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) #define __NR_perf_event_open 241 __SYSCALL(__NR_perf_event_open, sys_perf_event_open) +#define __NR_accept4 242 +__SYSCALL(__NR_accept4, sys_accept4) #undef __NR_syscalls -#define __NR_syscalls 242 +#define __NR_syscalls 243 /* * All syscalls below here should go away really, -- cgit v1.2.3 From 3d7703870633dd454f6554e6d8d7f70441d0fd2d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 10 Dec 2009 22:15:20 +0100 Subject: asm-generic: add sys_recvmmsg to unistd.h sys_recvmmsg was recently merged, add it to asm-generic as well so new architectures can use it. Signed-off-by: Arnd Bergmann --- include/asm-generic/unistd.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index c5545da5cb72..30e38b17703c 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -624,9 +624,11 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) __SYSCALL(__NR_perf_event_open, sys_perf_event_open) #define __NR_accept4 242 __SYSCALL(__NR_accept4, sys_accept4) +#define __NR_recvmmsg 243 +__SYSCALL(__NR_recvmmsg, sys_recvmmsg) #undef __NR_syscalls -#define __NR_syscalls 243 +#define __NR_syscalls 244 /* * All syscalls below here should go away really, -- cgit v1.2.3 From 637e8a60a7aaf4ef7d46cfdf83bcfac9cf6f0fbd Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 14 Nov 2009 02:28:05 +0100 Subject: usbdevfs: move compat_ioctl handling to devio.c Half the compat_ioctl handling is in devio.c, the other half is in fs/compat_ioctl.c. This moves everything into one place for consistency. As a positive side-effect, push down the BKL into the ioctl methods. Signed-off-by: Arnd Bergmann Acked-by: Greg Kroah-Hartman Cc: Alan Stern Cc: Oliver Neukum Cc: Alon Bar-Lev Cc: David Vrabel Cc: linux-usb@vger.kernel.org --- drivers/usb/core/devio.c | 110 +++++++++++++++++++++++++++++++++++++----- fs/compat_ioctl.c | 112 ------------------------------------------- include/linux/usbdevice_fs.h | 26 ++++++++++ 3 files changed, 125 insertions(+), 123 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 181f78c84105..6e8bcdfd23b4 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1388,6 +1388,46 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) } #ifdef CONFIG_COMPAT +static int proc_control_compat(struct dev_state *ps, + struct usbdevfs_ctrltransfer32 __user *p32) +{ + struct usbdevfs_ctrltransfer __user *p; + __u32 udata; + p = compat_alloc_user_space(sizeof(*p)); + if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) || + get_user(udata, &p32->data) || + put_user(compat_ptr(udata), &p->data)) + return -EFAULT; + return proc_control(ps, p); +} + +static int proc_bulk_compat(struct dev_state *ps, + struct usbdevfs_bulktransfer32 __user *p32) +{ + struct usbdevfs_bulktransfer __user *p; + compat_uint_t n; + compat_caddr_t addr; + + p = compat_alloc_user_space(sizeof(*p)); + + if (get_user(n, &p32->ep) || put_user(n, &p->ep) || + get_user(n, &p32->len) || put_user(n, &p->len) || + get_user(n, &p32->timeout) || put_user(n, &p->timeout) || + get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data)) + return -EFAULT; + + return proc_bulk(ps, p); +} +static int proc_disconnectsignal_compat(struct dev_state *ps, void __user *arg) +{ + struct usbdevfs_disconnectsignal32 ds; + + if (copy_from_user(&ds, arg, sizeof(ds))) + return -EFAULT; + ps->discsignr = ds.signr; + ps->disccontext = compat_ptr(ds.context); + return 0; +} static int get_urb32(struct usbdevfs_urb *kurb, struct usbdevfs_urb32 __user *uurb) @@ -1482,6 +1522,7 @@ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) return processcompl_compat(as, (void __user * __user *)arg); } + #endif static int proc_disconnectsignal(struct dev_state *ps, void __user *arg) @@ -1648,12 +1689,12 @@ static int proc_release_port(struct dev_state *ps, void __user *arg) * are assuming that somehow the configuration has been prevented from * changing. But there's no mechanism to ensure that... */ -static int usbdev_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long usbdev_do_ioctl(struct file *file, unsigned int cmd, + void __user *p) { struct dev_state *ps = file->private_data; + struct inode *inode = file->f_path.dentry->d_inode; struct usb_device *dev = ps->dev; - void __user *p = (void __user *)arg; int ret = -ENOTTY; if (!(file->f_mode & FMODE_WRITE)) @@ -1726,6 +1767,24 @@ static int usbdev_ioctl(struct inode *inode, struct file *file, break; #ifdef CONFIG_COMPAT + case USBDEVFS_CONTROL32: + snoop(&dev->dev, "%s: CONTROL32\n", __func__); + ret = proc_control_compat(ps, p); + if (ret >= 0) + inode->i_mtime = CURRENT_TIME; + break; + + case USBDEVFS_BULK32: + snoop(&dev->dev, "%s: BULK32\n", __func__); + ret = proc_bulk_compat(ps, p); + if (ret >= 0) + inode->i_mtime = CURRENT_TIME; + break; + + case USBDEVFS_DISCSIGNAL32: + snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__); + ret = proc_disconnectsignal_compat(ps, p); + break; case USBDEVFS_SUBMITURB32: snoop(&dev->dev, "%s: SUBMITURB32\n", __func__); @@ -1745,7 +1804,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file, break; case USBDEVFS_IOCTL32: - snoop(&dev->dev, "%s: IOCTL\n", __func__); + snoop(&dev->dev, "%s: IOCTL32\n", __func__); ret = proc_ioctl_compat(ps, ptr_to_compat(p)); break; #endif @@ -1801,6 +1860,32 @@ static int usbdev_ioctl(struct inode *inode, struct file *file, return ret; } +static long usbdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); + unlock_kernel(); + + return ret; +} + +#ifdef CONFIG_COMPAT +static long usbdev_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg)); + unlock_kernel(); + + return ret; +} +#endif + /* No kernel lock - fine */ static unsigned int usbdev_poll(struct file *file, struct poll_table_struct *wait) @@ -1817,13 +1902,16 @@ static unsigned int usbdev_poll(struct file *file, } const struct file_operations usbdev_file_operations = { - .owner = THIS_MODULE, - .llseek = usbdev_lseek, - .read = usbdev_read, - .poll = usbdev_poll, - .ioctl = usbdev_ioctl, - .open = usbdev_open, - .release = usbdev_release, + .owner = THIS_MODULE, + .llseek = usbdev_lseek, + .read = usbdev_read, + .poll = usbdev_poll, + .unlocked_ioctl = usbdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = usbdev_compat_ioctl, +#endif + .open = usbdev_open, + .release = usbdev_release, }; static void usbdev_remove(struct usb_device *udev) diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 598763fd207f..278020d2449c 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -742,94 +742,6 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, return err; } -struct usbdevfs_ctrltransfer32 { - u8 bRequestType; - u8 bRequest; - u16 wValue; - u16 wIndex; - u16 wLength; - u32 timeout; /* in milliseconds */ - compat_caddr_t data; -}; - -#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32) - -static int do_usbdevfs_control(unsigned int fd, unsigned int cmd, - struct usbdevfs_ctrltransfer32 __user *p32) -{ - struct usbdevfs_ctrltransfer __user *p; - __u32 udata; - p = compat_alloc_user_space(sizeof(*p)); - if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) || - get_user(udata, &p32->data) || - put_user(compat_ptr(udata), &p->data)) - return -EFAULT; - return sys_ioctl(fd, USBDEVFS_CONTROL, (unsigned long)p); -} - - -struct usbdevfs_bulktransfer32 { - compat_uint_t ep; - compat_uint_t len; - compat_uint_t timeout; /* in milliseconds */ - compat_caddr_t data; -}; - -#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32) - -static int do_usbdevfs_bulk(unsigned int fd, unsigned int cmd, - struct usbdevfs_bulktransfer32 __user *p32) -{ - struct usbdevfs_bulktransfer __user *p; - compat_uint_t n; - compat_caddr_t addr; - - p = compat_alloc_user_space(sizeof(*p)); - - if (get_user(n, &p32->ep) || put_user(n, &p->ep) || - get_user(n, &p32->len) || put_user(n, &p->len) || - get_user(n, &p32->timeout) || put_user(n, &p->timeout) || - get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data)) - return -EFAULT; - - return sys_ioctl(fd, USBDEVFS_BULK, (unsigned long)p); -} - - -/* - * USBDEVFS_SUBMITURB, USBDEVFS_REAPURB and USBDEVFS_REAPURBNDELAY - * are handled in usbdevfs core. -Christopher Li - */ - -struct usbdevfs_disconnectsignal32 { - compat_int_t signr; - compat_caddr_t context; -}; - -#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32) - -static int do_usbdevfs_discsignal(unsigned int fd, unsigned int cmd, - struct usbdevfs_disconnectsignal32 __user *udis) -{ - struct usbdevfs_disconnectsignal kdis; - mm_segment_t old_fs; - u32 uctx; - int err; - - if (get_user(kdis.signr, &udis->signr) || - __get_user(uctx, &udis->context)) - return -EFAULT; - - kdis.context = compat_ptr(uctx); - - old_fs = get_fs(); - set_fs(KERNEL_DS); - err = sys_ioctl(fd, USBDEVFS_DISCSIGNAL, (unsigned long) &kdis); - set_fs(old_fs); - - return err; -} - /* * I2C layer ioctls */ @@ -1471,21 +1383,6 @@ COMPATIBLE_IOCTL(PCIIOC_CONTROLLER) COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO) COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM) COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE) -/* USB */ -COMPATIBLE_IOCTL(USBDEVFS_RESETEP) -COMPATIBLE_IOCTL(USBDEVFS_SETINTERFACE) -COMPATIBLE_IOCTL(USBDEVFS_SETCONFIGURATION) -COMPATIBLE_IOCTL(USBDEVFS_GETDRIVER) -COMPATIBLE_IOCTL(USBDEVFS_DISCARDURB) -COMPATIBLE_IOCTL(USBDEVFS_CLAIMINTERFACE) -COMPATIBLE_IOCTL(USBDEVFS_RELEASEINTERFACE) -COMPATIBLE_IOCTL(USBDEVFS_CONNECTINFO) -COMPATIBLE_IOCTL(USBDEVFS_HUB_PORTINFO) -COMPATIBLE_IOCTL(USBDEVFS_RESET) -COMPATIBLE_IOCTL(USBDEVFS_SUBMITURB32) -COMPATIBLE_IOCTL(USBDEVFS_REAPURB32) -COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32) -COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT) /* NBD */ COMPATIBLE_IOCTL(NBD_DO_IT) COMPATIBLE_IOCTL(NBD_CLEAR_SOCK) @@ -1604,8 +1501,6 @@ COMPATIBLE_IOCTL(TIOCSLTC) COMPATIBLE_IOCTL(TIOCSTART) COMPATIBLE_IOCTL(TIOCSTOP) #endif -/* Usbdevfs */ -COMPATIBLE_IOCTL(USBDEVFS_IOCTL32) /* fat 'r' ioctls. These are handled by fat with ->compat_ioctl, but we don't want warnings on other file systems. So declare @@ -1677,13 +1572,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd, case TIOCGSERIAL: case TIOCSSERIAL: return serial_struct_ioctl(fd, cmd, argp); - /* Usbdevfs */ - case USBDEVFS_CONTROL32: - return do_usbdevfs_control(fd, cmd, argp); - case USBDEVFS_BULK32: - return do_usbdevfs_bulk(fd, cmd, argp); - case USBDEVFS_DISCSIGNAL32: - return do_usbdevfs_discsignal(fd, cmd, argp); /* i2c */ case I2C_FUNCS: return w_long(fd, cmd, argp); diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h index b2a7d8ba6ee3..15591d2ea400 100644 --- a/include/linux/usbdevice_fs.h +++ b/include/linux/usbdevice_fs.h @@ -128,6 +128,29 @@ struct usbdevfs_hub_portinfo { #ifdef __KERNEL__ #ifdef CONFIG_COMPAT #include + +struct usbdevfs_ctrltransfer32 { + u8 bRequestType; + u8 bRequest; + u16 wValue; + u16 wIndex; + u16 wLength; + u32 timeout; /* in milliseconds */ + compat_caddr_t data; +}; + +struct usbdevfs_bulktransfer32 { + compat_uint_t ep; + compat_uint_t len; + compat_uint_t timeout; /* in milliseconds */ + compat_caddr_t data; +}; + +struct usbdevfs_disconnectsignal32 { + compat_int_t signr; + compat_caddr_t context; +}; + struct usbdevfs_urb32 { unsigned char type; unsigned char endpoint; @@ -153,7 +176,9 @@ struct usbdevfs_ioctl32 { #endif /* __KERNEL__ */ #define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer) +#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32) #define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer) +#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32) #define USBDEVFS_RESETEP _IOR('U', 3, unsigned int) #define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface) #define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int) @@ -166,6 +191,7 @@ struct usbdevfs_ioctl32 { #define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *) #define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32) #define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal) +#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32) #define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int) #define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int) #define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo) -- cgit v1.2.3 From 87a8f240e9bcf025ba45e4563c842b0d59c5e8ef Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 10 Dec 2009 23:52:01 +0000 Subject: dm log: add flush callback fn Introduce a callback pointer from the log to dm-raid1 layer. Before some region is set as "in-sync", we need to flush hardware cache on all the disks. But the log module doesn't have access to the mirror_set structure. So it will use this callback. So far the callback is unused, it will be used in further patches. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-log.c | 6 ++++-- drivers/md/dm-raid1.c | 2 +- include/linux/dm-dirty-log.h | 6 ++++-- 3 files changed, 9 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index d779f8c915dd..666a80e3602e 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -145,8 +145,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type) EXPORT_SYMBOL(dm_dirty_log_type_unregister); struct dm_dirty_log *dm_dirty_log_create(const char *type_name, - struct dm_target *ti, - unsigned int argc, char **argv) + struct dm_target *ti, + int (*flush_callback_fn)(struct dm_target *ti), + unsigned int argc, char **argv) { struct dm_dirty_log_type *type; struct dm_dirty_log *log; @@ -161,6 +162,7 @@ struct dm_dirty_log *dm_dirty_log_create(const char *type_name, return NULL; } + log->flush_callback_fn = flush_callback_fn; log->type = type; if (type->ctr(log, ti, argc, argv)) { kfree(log); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 752a29e1855b..d44bc497dad9 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -896,7 +896,7 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, return NULL; } - dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2); + dl = dm_dirty_log_create(argv[0], ti, NULL, param_count, argv + 2); if (!dl) { ti->error = "Error creating mirror dirty log"; return NULL; diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h index 5e8b11d88f6f..7084503c3405 100644 --- a/include/linux/dm-dirty-log.h +++ b/include/linux/dm-dirty-log.h @@ -21,6 +21,7 @@ struct dm_dirty_log_type; struct dm_dirty_log { struct dm_dirty_log_type *type; + int (*flush_callback_fn)(struct dm_target *ti); void *context; }; @@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type); * type->constructor/destructor() directly. */ struct dm_dirty_log *dm_dirty_log_create(const char *type_name, - struct dm_target *ti, - unsigned argc, char **argv); + struct dm_target *ti, + int (*flush_callback_fn)(struct dm_target *ti), + unsigned argc, char **argv); void dm_dirty_log_destroy(struct dm_dirty_log *log); #endif /* __KERNEL__ */ -- cgit v1.2.3 From c58098be979509a54021e837a47fcad08db31f94 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 10 Dec 2009 23:52:05 +0000 Subject: dm raid1: remove bio_endio from dm_rh_mark_nosync Move bio completion out of dm_rh_mark_nosync in preparation for the next patch. Signed-off-by: Mikulas Patocka Reviewed-by: Takahiro Yasui Tested-by: Takahiro Yasui Signed-off-by: Alasdair G Kergon --- drivers/md/dm-raid1.c | 3 ++- drivers/md/dm-region-hash.c | 6 +----- include/linux/dm-region-hash.h | 3 +-- 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index d1a7f1a4789c..4f466ad75680 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -779,7 +779,8 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) hold_bio(ms, bio); else { ms->in_sync = 0; - dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); + dm_rh_mark_nosync(ms->rh, bio); + bio_endio(bio, 0); } } } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 00806b760ccd..5f19ceb6fe91 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -383,8 +383,6 @@ static void complete_resync_work(struct dm_region *reg, int success) /* dm_rh_mark_nosync * @ms * @bio - * @done - * @error * * The bio was written on some mirror(s) but failed on other mirror(s). * We can successfully endio the bio but should avoid the region being @@ -392,8 +390,7 @@ static void complete_resync_work(struct dm_region *reg, int success) * * This function is _not_ safe in interrupt context! */ -void dm_rh_mark_nosync(struct dm_region_hash *rh, - struct bio *bio, unsigned done, int error) +void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) { unsigned long flags; struct dm_dirty_log *log = rh->log; @@ -430,7 +427,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, BUG_ON(!list_empty(®->list)); spin_unlock_irqrestore(&rh->region_lock, flags); - bio_endio(bio, error); if (recovering) complete_resync_work(reg, 0); } diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h index a9e652a41373..9e2a7a401df5 100644 --- a/include/linux/dm-region-hash.h +++ b/include/linux/dm-region-hash.h @@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region); /* Delay bios on regions. */ void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); -void dm_rh_mark_nosync(struct dm_region_hash *rh, - struct bio *bio, unsigned done, int error); +void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); /* * Region recovery control. -- cgit v1.2.3 From 7c6664114b7342a36f14a6836564e865669b3cea Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Thu, 10 Dec 2009 23:52:19 +0000 Subject: dm: rename dm_get_table to dm_get_live_table Rename dm_get_table to dm_get_live_table. Signed-off-by: Alasdair G Kergon --- drivers/md/dm-ioctl.c | 14 +++++++------- drivers/md/dm.c | 24 ++++++++++++------------ include/linux/device-mapper.h | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 99de0e4ce831..bf3d19a0fb78 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -235,7 +235,7 @@ static void __hash_remove(struct hash_cell *hc) dm_set_mdptr(hc->md, NULL); mutex_unlock(&dm_hash_cells_mutex); - table = dm_get_table(hc->md); + table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); @@ -338,7 +338,7 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) /* * Wake up any dm event waiters. */ - table = dm_get_table(hc->md); + table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); @@ -564,7 +564,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) param->event_nr = dm_get_event_nr(md); - table = dm_get_table(md); + table = dm_get_live_table(md); if (table) { param->flags |= DM_ACTIVE_PRESENT_FLAG; param->target_count = dm_table_get_num_targets(table); @@ -993,7 +993,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_table(md); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); @@ -1226,7 +1226,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_table(md); if (table) { retrieve_deps(table, param, param_size); dm_table_put(table); @@ -1255,7 +1255,7 @@ static int table_status(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_table(md); + table = dm_get_live_table(md); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); @@ -1299,7 +1299,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size) goto out; } - table = dm_get_table(md); + table = dm_get_live_table(md); if (!table) goto out_argv; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3de8d6d5b0b8..233a2e9156a3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -397,7 +397,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *tgt; int r = -ENOTTY; @@ -528,7 +528,7 @@ static void queue_io(struct mapped_device *md, struct bio *bio) * function to access the md->map field, and make sure they call * dm_table_put() when finished. */ -struct dm_table *dm_get_table(struct mapped_device *md) +struct dm_table *dm_get_live_table(struct mapped_device *md) { struct dm_table *t; unsigned long flags; @@ -1294,7 +1294,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) struct clone_info ci; int error = 0; - ci.map = dm_get_table(md); + ci.map = dm_get_live_table(md); if (unlikely(!ci.map)) { if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) bio_io_error(bio); @@ -1335,7 +1335,7 @@ static int dm_merge_bvec(struct request_queue *q, struct bio_vec *biovec) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *ti; sector_t max_sectors; int max_size = 0; @@ -1638,7 +1638,7 @@ static void map_request(struct dm_target *ti, struct request *clone, static void dm_request_fn(struct request_queue *q) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); struct dm_target *ti; struct request *rq, *clone; @@ -1697,7 +1697,7 @@ static int dm_lld_busy(struct request_queue *q) { int r; struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) r = 1; @@ -1712,7 +1712,7 @@ static int dm_lld_busy(struct request_queue *q) static void dm_unplug_all(struct request_queue *q) { struct mapped_device *md = q->queuedata; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); if (map) { if (dm_request_based(md)) @@ -1730,7 +1730,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) struct dm_table *map; if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - map = dm_get_table(md); + map = dm_get_live_table(md); if (map) { /* * Request-based dm cares about only own queue for @@ -2166,7 +2166,7 @@ void dm_put(struct mapped_device *md) BUG_ON(test_bit(DMF_FREEING, &md->flags)); if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { - map = dm_get_table(md); + map = dm_get_live_table(md); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); @@ -2302,7 +2302,7 @@ static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr) static int dm_rq_barrier(struct mapped_device *md) { int i, j; - struct dm_table *map = dm_get_table(md); + struct dm_table *map = dm_get_live_table(md); unsigned num_targets = dm_table_get_num_targets(map); struct dm_target *ti; struct request *clone; @@ -2453,7 +2453,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) goto out_unlock; } - map = dm_get_table(md); + map = dm_get_live_table(md); /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. @@ -2558,7 +2558,7 @@ int dm_resume(struct mapped_device *md) if (!dm_suspended(md)) goto out; - map = dm_get_table(md); + map = dm_get_live_table(md); if (!map || !dm_table_get_size(map)) goto out; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index df7607e6dce8..fc16a351ef7a 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t); /* * Table reference counting. */ -struct dm_table *dm_get_table(struct mapped_device *md); +struct dm_table *dm_get_live_table(struct mapped_device *md); void dm_table_get(struct dm_table *t); void dm_table_put(struct dm_table *t); -- cgit v1.2.3 From 1d0f3ce83200edc5d43723c77c62b09ad6560294 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 10 Dec 2009 23:52:22 +0000 Subject: dm ioctl: retrieve status from inactive table Add the flag DM_QUERY_INACTIVE_TABLE_FLAG to the ioctls to return infomation about the loaded-but-not-yet-active table instead of the live table. Prior to this patch it was impossible to obtain this information until the device had been 'resumed'. Userspace dmsetup and libdevmapper support the flag as of version 1.02.40. e.g. dmsetup info --inactive vg1-lv1 Signed-off-by: Mike Snitzer Signed-off-by: Alasdair G Kergon --- drivers/md/dm-ioctl.c | 70 +++++++++++++++++++++++++++++++++++++++--------- include/linux/dm-ioctl.h | 13 ++++++--- 2 files changed, 67 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index d06dd39856f3..a3d20265ffc1 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -523,8 +523,6 @@ static int list_versions(struct dm_ioctl *param, size_t param_size) return 0; } - - static int check_name(const char *name) { if (strchr(name, '/')) { @@ -535,6 +533,40 @@ static int check_name(const char *name) return 0; } +/* + * On successful return, the caller must not attempt to acquire + * _hash_lock without first calling dm_table_put, because dm_table_destroy + * waits for this dm_table_put and could be called under this lock. + */ +static struct dm_table *dm_get_inactive_table(struct mapped_device *md) +{ + struct hash_cell *hc; + struct dm_table *table = NULL; + + down_read(&_hash_lock); + hc = dm_get_mdptr(md); + if (!hc || hc->md != md) { + DMWARN("device has been removed from the dev hash table."); + goto out; + } + + table = hc->new_map; + if (table) + dm_table_get(table); + +out: + up_read(&_hash_lock); + + return table; +} + +static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, + struct dm_ioctl *param) +{ + return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? + dm_get_inactive_table(md) : dm_get_live_table(md); +} + /* * Fills in a dm_ioctl structure, ready for sending back to * userland. @@ -559,18 +591,30 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) */ param->open_count = dm_open_count(md); - if (get_disk_ro(disk)) - param->flags |= DM_READONLY_FLAG; - param->event_nr = dm_get_event_nr(md); + param->target_count = 0; table = dm_get_live_table(md); if (table) { - param->flags |= DM_ACTIVE_PRESENT_FLAG; - param->target_count = dm_table_get_num_targets(table); + if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { + if (get_disk_ro(disk)) + param->flags |= DM_READONLY_FLAG; + param->target_count = dm_table_get_num_targets(table); + } dm_table_put(table); - } else - param->target_count = 0; + + param->flags |= DM_ACTIVE_PRESENT_FLAG; + } + + if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { + table = dm_get_inactive_table(md); + if (table) { + if (!(dm_table_get_mode(table) & FMODE_WRITE)) + param->flags |= DM_READONLY_FLAG; + param->target_count = dm_table_get_num_targets(table); + dm_table_put(table); + } + } return 0; } @@ -993,7 +1037,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_live_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); @@ -1226,7 +1270,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_live_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_deps(table, param, param_size); dm_table_put(table); @@ -1255,13 +1299,13 @@ static int table_status(struct dm_ioctl *param, size_t param_size) if (r) goto out; - table = dm_get_live_table(md); + table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); } - out: +out: dm_put(md); return r; } diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index 2ab84c83c31a..aa95508d2f95 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. - * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved. * * This file is released under the LGPL. */ @@ -266,9 +266,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 15 +#define DM_VERSION_MINOR 16 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2009-04-01)" +#define DM_VERSION_EXTRA "-ioctl (2009-11-05)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -309,4 +309,11 @@ enum { */ #define DM_NOFLUSH_FLAG (1 << 11) /* In */ +/* + * If set, any table information returned will relate to the inactive + * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG + * is set before using the data returned. + */ +#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ + #endif /* _LINUX_DM_IOCTL_H */ -- cgit v1.2.3 From 042d2a9bcd80fe12d4b0871706aa9dd2231e8238 Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Thu, 10 Dec 2009 23:52:24 +0000 Subject: dm: keep old table until after resume succeeded When swapping a new table into place, retain the old table until its replacement is in place. An old check for an empty table is removed because this is enforced in populate_table(). __unbind() becomes redundant when followed by __bind(). Signed-off-by: Alasdair G Kergon --- drivers/md/dm-ioctl.c | 10 ++++++---- drivers/md/dm.c | 34 +++++++++++++++++----------------- include/linux/device-mapper.h | 4 +++- 3 files changed, 26 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a3d20265ffc1..63fd4de25bda 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -855,7 +855,7 @@ static int do_resume(struct dm_ioctl *param) unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct hash_cell *hc; struct mapped_device *md; - struct dm_table *new_map; + struct dm_table *new_map, *old_map = NULL; down_write(&_hash_lock); @@ -884,11 +884,11 @@ static int do_resume(struct dm_ioctl *param) if (!dm_suspended(md)) dm_suspend(md, suspend_flags); - r = dm_swap_table(md, new_map); - if (r) { + old_map = dm_swap_table(md, new_map); + if (IS_ERR(old_map)) { dm_table_destroy(new_map); dm_put(md); - return r; + return PTR_ERR(old_map); } if (dm_table_get_mode(new_map) & FMODE_WRITE) @@ -900,6 +900,8 @@ static int do_resume(struct dm_ioctl *param) if (dm_suspended(md)) r = dm_resume(md); + if (old_map) + dm_table_destroy(old_map); if (!r) { dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 255b75c61544..c254c6cf69a1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2033,9 +2033,13 @@ static void __set_size(struct mapped_device *md, sector_t size) mutex_unlock(&md->bdev->bd_inode->i_mutex); } -static int __bind(struct mapped_device *md, struct dm_table *t, - struct queue_limits *limits) +/* + * Returns old map, which caller must destroy. + */ +static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, + struct queue_limits *limits) { + struct dm_table *old_map; struct request_queue *q = md->queue; sector_t size; unsigned long flags; @@ -2050,11 +2054,6 @@ static int __bind(struct mapped_device *md, struct dm_table *t, __set_size(md, size); - if (!size) { - dm_table_destroy(t); - return 0; - } - dm_table_event_callback(t, event_callback, md); /* @@ -2070,11 +2069,12 @@ static int __bind(struct mapped_device *md, struct dm_table *t, __bind_mempools(md, t); write_lock_irqsave(&md->map_lock, flags); + old_map = md->map; md->map = t; dm_table_set_restrictions(t, q, limits); write_unlock_irqrestore(&md->map_lock, flags); - return 0; + return old_map; } /* @@ -2368,13 +2368,13 @@ static void dm_rq_barrier_work(struct work_struct *work) } /* - * Swap in a new table (destroying old one). + * Swap in a new table, returning the old one for the caller to destroy. */ -int dm_swap_table(struct mapped_device *md, struct dm_table *table) +struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { - struct dm_table *map; + struct dm_table *map = ERR_PTR(-EINVAL); struct queue_limits limits; - int r = -EINVAL; + int r; mutex_lock(&md->suspend_lock); @@ -2383,8 +2383,10 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) goto out; r = dm_calculate_queue_limits(table, &limits); - if (r) + if (r) { + map = ERR_PTR(r); goto out; + } /* cannot change the device type, once a table is bound */ if (md->map && @@ -2393,13 +2395,11 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) goto out; } - map = __unbind(md); - r = __bind(md, table, &limits); - dm_table_destroy(map); + map = __bind(md, table, &limits); out: mutex_unlock(&md->suspend_lock); - return r; + return map; } /* diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index fc16a351ef7a..b9c6c8ca11be 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t); /* * The device must be suspended before calling this method. + * Returns the previous table, which the caller must destroy. */ -int dm_swap_table(struct mapped_device *md, struct dm_table *t); +struct dm_table *dm_swap_table(struct mapped_device *md, + struct dm_table *t); /* * A wrapper around vmalloc. -- cgit v1.2.3 From 4f186f8bbfa92bf1a2b39f7a8674348bfdba9437 Mon Sep 17 00:00:00 2001 From: Kiyoshi Ueda Date: Thu, 10 Dec 2009 23:52:26 +0000 Subject: dm: rename dm_suspended to dm_suspended_md This patch renames dm_suspended() to dm_suspended_md() and keeps it internal to dm. No functional change. Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Cc: Mike Anderson Signed-off-by: Alasdair G Kergon --- drivers/md/dm-ioctl.c | 8 ++++---- drivers/md/dm-sysfs.c | 2 +- drivers/md/dm.c | 12 ++++++------ drivers/md/dm.h | 5 +++++ include/linux/device-mapper.h | 1 - 5 files changed, 16 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 63fd4de25bda..1d669322b27c 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -579,7 +579,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | DM_ACTIVE_PRESENT_FLAG); - if (dm_suspended(md)) + if (dm_suspended_md(md)) param->flags |= DM_SUSPEND_FLAG; param->dev = huge_encode_dev(disk_devt(disk)); @@ -839,7 +839,7 @@ static int do_suspend(struct dm_ioctl *param) if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) r = dm_suspend(md, suspend_flags); if (!r) @@ -881,7 +881,7 @@ static int do_resume(struct dm_ioctl *param) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) dm_suspend(md, suspend_flags); old_map = dm_swap_table(md, new_map); @@ -897,7 +897,7 @@ static int do_resume(struct dm_ioctl *param) set_disk_ro(dm_disk(md), 1); } - if (dm_suspended(md)) + if (dm_suspended_md(md)) r = dm_resume(md); if (old_map) diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index b000de38c99a..f53392df7b97 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -59,7 +59,7 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) { - sprintf(buf, "%d\n", dm_suspended(md)); + sprintf(buf, "%d\n", dm_suspended_md(md)); return strlen(buf); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f2b993c43359..e0702bf37935 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -415,7 +415,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, tgt = dm_table_get_target(map, 0); - if (dm_suspended(md)) { + if (dm_suspended_md(md)) { r = -EAGAIN; goto out; } @@ -2182,7 +2182,7 @@ void dm_put(struct mapped_device *md) MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - if (!dm_suspended(md)) { + if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } @@ -2381,7 +2381,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) mutex_lock(&md->suspend_lock); /* device must be suspended */ - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) goto out; r = dm_calculate_queue_limits(table, &limits); @@ -2461,7 +2461,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) mutex_lock(&md->suspend_lock); - if (dm_suspended(md)) { + if (dm_suspended_md(md)) { r = -EINVAL; goto out_unlock; } @@ -2568,7 +2568,7 @@ int dm_resume(struct mapped_device *md) struct dm_table *map = NULL; mutex_lock(&md->suspend_lock); - if (!dm_suspended(md)) + if (!dm_suspended_md(md)) goto out; map = dm_get_live_table(md); @@ -2679,7 +2679,7 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) return md; } -int dm_suspended(struct mapped_device *md) +int dm_suspended_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED, &md->flags); } diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 604a5f2a2383..8dadaa5bc396 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -93,6 +93,11 @@ int dm_split_args(int *argc, char ***argvp, char *input); */ int dm_deleting_md(struct mapped_device *md); +/* + * Is this mapped_device suspended? + */ +int dm_suspended_md(struct mapped_device *md); + /* * The device-mapper can be driven through one of two interfaces; * ioctl or filesystem, depending which patch you have applied. diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index b9c6c8ca11be..fca0d31bbf2d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -235,7 +235,6 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist); const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); -int dm_suspended(struct mapped_device *md); int dm_noflush_suspending(struct dm_target *ti); union map_info *dm_get_mapinfo(struct bio *bio); union map_info *dm_get_rq_mapinfo(struct request *rq); -- cgit v1.2.3 From 64dbce580d5a7e89e8de20b91f80c7267cdad91d Mon Sep 17 00:00:00 2001 From: Kiyoshi Ueda Date: Thu, 10 Dec 2009 23:52:27 +0000 Subject: dm: export suspended state to targets This patch adds the exported dm_suspended() function so that targets can check whether or not they are suspended. Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Cc: Mike Anderson Signed-off-by: Alasdair G Kergon --- drivers/md/dm.c | 11 +++++++++++ include/linux/device-mapper.h | 1 + 2 files changed, 12 insertions(+) (limited to 'include') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e0702bf37935..3167480b532c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2684,6 +2684,17 @@ int dm_suspended_md(struct mapped_device *md) return test_bit(DMF_SUSPENDED, &md->flags); } +int dm_suspended(struct dm_target *ti) +{ + struct mapped_device *md = dm_table_get_md(ti->table); + int r = dm_suspended_md(md); + + dm_put(md); + + return r; +} +EXPORT_SYMBOL_GPL(dm_suspended); + int dm_noflush_suspending(struct dm_target *ti) { struct mapped_device *md = dm_table_get_md(ti->table); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index fca0d31bbf2d..d4c9c0b88adc 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -235,6 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist); const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); +int dm_suspended(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); union map_info *dm_get_mapinfo(struct bio *bio); union map_info *dm_get_rq_mapinfo(struct request *rq); -- cgit v1.2.3 From 09855acb1c2e3779f25317ec9a8ffe1b1784a4a8 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Thu, 10 Dec 2009 17:16:27 +0100 Subject: drm/ttm: Convert ttm_buffer_object_init to use ttm_placement Convert ttm_buffer_object_init to use struct ttm_placement and rename to ttm_bo_init for consistency with function naming. This allow to give more complex placement at buffer creation. For instance you ask to allocate bo into vram first but if there is not enough vram you can give system as a second possible placement. It also allow to create buffer in a specific range. Also rename ttm_buffer_object_validate to ttm_bo_validate. Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 130 +++++++++++++++++++------------------------ include/drm/ttm/ttm_bo_api.h | 63 ++++++++++----------- 2 files changed, 87 insertions(+), 106 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a835b6fe42a1..fae5c158351c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1002,9 +1002,9 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, return -1; } -int ttm_buffer_object_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible, bool no_wait) +int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + bool interruptible, bool no_wait) { int ret; @@ -1040,55 +1040,57 @@ int ttm_buffer_object_validate(struct ttm_buffer_object *bo, } return 0; } -EXPORT_SYMBOL(ttm_buffer_object_validate); +EXPORT_SYMBOL(ttm_bo_validate); -int -ttm_bo_check_placement(struct ttm_buffer_object *bo, - uint32_t set_flags, uint32_t clr_flags) +int ttm_bo_check_placement(struct ttm_buffer_object *bo, + struct ttm_placement *placement) { - uint32_t new_mask = set_flags | clr_flags; - - if ((bo->type == ttm_bo_type_user) && - (clr_flags & TTM_PL_FLAG_CACHED)) { - printk(KERN_ERR TTM_PFX - "User buffers require cache-coherent memory.\n"); - return -EINVAL; - } + int i; - if (!capable(CAP_SYS_ADMIN)) { - if (new_mask & TTM_PL_FLAG_NO_EVICT) { - printk(KERN_ERR TTM_PFX "Need to be root to modify" - " NO_EVICT status.\n"); + if (placement->fpfn || placement->lpfn) { + if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { + printk(KERN_ERR TTM_PFX "Page number range to small " + "Need %lu pages, range is [%u, %u]\n", + bo->mem.num_pages, placement->fpfn, + placement->lpfn); return -EINVAL; } - - if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) && - (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { - printk(KERN_ERR TTM_PFX - "Incompatible memory specification" - " for NO_EVICT buffer.\n"); - return -EINVAL; + } + for (i = 0; i < placement->num_placement; i++) { + if (!capable(CAP_SYS_ADMIN)) { + if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { + printk(KERN_ERR TTM_PFX "Need to be root to " + "modify NO_EVICT status.\n"); + return -EINVAL; + } + } + } + for (i = 0; i < placement->num_busy_placement; i++) { + if (!capable(CAP_SYS_ADMIN)) { + if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { + printk(KERN_ERR TTM_PFX "Need to be root to " + "modify NO_EVICT status.\n"); + return -EINVAL; + } } } return 0; } -int ttm_buffer_object_init(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - uint32_t flags, - uint32_t page_alignment, - unsigned long buffer_start, - bool interruptible, - struct file *persistant_swap_storage, - size_t acc_size, - void (*destroy) (struct ttm_buffer_object *)) +int ttm_bo_init(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + unsigned long buffer_start, + bool interruptible, + struct file *persistant_swap_storage, + size_t acc_size, + void (*destroy) (struct ttm_buffer_object *)) { - int i, c, ret = 0; + int ret = 0; unsigned long num_pages; - uint32_t placements[8]; - struct ttm_placement placement; size += buffer_start & ~PAGE_MASK; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -1123,38 +1125,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, bo->acc_size = acc_size; atomic_inc(&bo->glob->bo_count); - ret = ttm_bo_check_placement(bo, flags, 0ULL); + ret = ttm_bo_check_placement(bo, placement); if (unlikely(ret != 0)) goto out_err; - /* - * If no caching attributes are set, accept any form of caching. - */ - - if ((flags & TTM_PL_MASK_CACHING) == 0) - flags |= TTM_PL_MASK_CACHING; - /* * For ttm_bo_type_device buffers, allocate * address space from the device. */ - if (bo->type == ttm_bo_type_device) { ret = ttm_bo_setup_vm(bo); if (ret) goto out_err; } - placement.fpfn = 0; - placement.lpfn = 0; - for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++) - if (flags & (1 << i)) - placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i); - placement.placement = placements; - placement.num_placement = c; - placement.busy_placement = placements; - placement.num_busy_placement = c; - ret = ttm_buffer_object_validate(bo, &placement, interruptible, false); + ret = ttm_bo_validate(bo, placement, interruptible, false); if (ret) goto out_err; @@ -1167,7 +1152,7 @@ out_err: return ret; } -EXPORT_SYMBOL(ttm_buffer_object_init); +EXPORT_SYMBOL(ttm_bo_init); static inline size_t ttm_bo_size(struct ttm_bo_global *glob, unsigned long num_pages) @@ -1178,15 +1163,15 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob, return glob->ttm_bo_size + 2 * page_array_size; } -int ttm_buffer_object_create(struct ttm_bo_device *bdev, - unsigned long size, - enum ttm_bo_type type, - uint32_t flags, - uint32_t page_alignment, - unsigned long buffer_start, - bool interruptible, - struct file *persistant_swap_storage, - struct ttm_buffer_object **p_bo) +int ttm_bo_create(struct ttm_bo_device *bdev, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + unsigned long buffer_start, + bool interruptible, + struct file *persistant_swap_storage, + struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; @@ -1205,10 +1190,9 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, return -ENOMEM; } - ret = ttm_buffer_object_init(bdev, bo, size, type, flags, - page_alignment, buffer_start, - interruptible, - persistant_swap_storage, acc_size, NULL); + ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, + buffer_start, interruptible, + persistant_swap_storage, acc_size, NULL); if (likely(ret == 0)) *p_bo = bo; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 4fd498523ce3..81eb9f45883c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -308,7 +308,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait); /** - * ttm_buffer_object_validate + * ttm_bo_validate * * @bo: The buffer object. * @placement: Proposed placement for the buffer object. @@ -323,9 +323,9 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, * -EBUSY if no_wait is true and buffer busy. * -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible, bool no_wait); +extern int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + bool interruptible, bool no_wait); /** * ttm_bo_unref @@ -362,7 +362,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); /** - * ttm_buffer_object_init + * ttm_bo_init * * @bdev: Pointer to a ttm_bo_device struct. * @bo: Pointer to a ttm_buffer_object to be initialized. @@ -393,17 +393,17 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - uint32_t flags, - uint32_t page_alignment, - unsigned long buffer_start, - bool interrubtible, - struct file *persistant_swap_storage, - size_t acc_size, - void (*destroy) (struct ttm_buffer_object *)); +extern int ttm_bo_init(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + unsigned long buffer_start, + bool interrubtible, + struct file *persistant_swap_storage, + size_t acc_size, + void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_synccpu_object_init * @@ -424,40 +424,37 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, * GEM user interface. * @p_bo: On successful completion *p_bo points to the created object. * - * This function allocates a ttm_buffer_object, and then calls - * ttm_buffer_object_init on that object. - * The destroy function is set to kfree(). + * This function allocates a ttm_buffer_object, and then calls ttm_bo_init + * on that object. The destroy function is set to kfree(). * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while waiting for resources. */ -extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, - unsigned long size, - enum ttm_bo_type type, - uint32_t flags, - uint32_t page_alignment, - unsigned long buffer_start, - bool interruptible, - struct file *persistant_swap_storage, - struct ttm_buffer_object **p_bo); +extern int ttm_bo_create(struct ttm_bo_device *bdev, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + unsigned long buffer_start, + bool interruptible, + struct file *persistant_swap_storage, + struct ttm_buffer_object **p_bo); /** * ttm_bo_check_placement * - * @bo: the buffer object. - * @set_flags: placement flags to set. - * @clr_flags: placement flags to clear. + * @bo: the buffer object. + * @placement: placements * * Performs minimal validity checking on an intended change of * placement flags. * Returns * -EINVAL: Intended change is invalid or not allowed. */ - extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, - uint32_t set_flags, uint32_t clr_flags); + struct ttm_placement *placement); /** * ttm_bo_init_mm -- cgit v1.2.3 From a88f6667078412e5eff37ead68a043ee0ec9f1da Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 10 Dec 2009 18:35:15 +0100 Subject: dmaengine: clarify the meaning of the DMA_CTRL_ACK flag DMA_CTRL_ACK's description applies to its clear state, not to its set state. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- include/linux/dmaengine.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2b9f2ac7ed60..78784982b33e 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -74,7 +74,7 @@ enum dma_transaction_type { * control completion, and communicate status. * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of * this transaction - * @DMA_CTRL_ACK - the descriptor cannot be reused until the client + * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any dependency * chains * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) -- cgit v1.2.3 From 0f24f1287a86b198c1e4bd4ce45e8565e40ff804 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 11 Dec 2009 15:45:30 +0800 Subject: tracing, slab: Define kmem_cache_alloc_notrace ifdef CONFIG_TRACING Define kmem_trace_alloc_{,node}_notrace() if CONFIG_TRACING is enabled, otherwise perf-kmem will show wrong stats ifndef CONFIG_KMEM_TRACE, because a kmalloc() memory allocation may be traced by both trace_kmalloc() and trace_kmem_cache_alloc(). Signed-off-by: Li Zefan Reviewed-by: Pekka Enberg Cc: Christoph Lameter Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: linux-mm@kvack.org Cc: Eduard - Gabriel Munteanu LKML-Reference: <4B21F89A.7000801@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/slab_def.h | 4 ++-- include/linux/slub_def.h | 4 ++-- mm/slab.c | 6 +++--- mm/slub.c | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 850d057500de..ca6b2b317991 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[]; void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); extern size_t slab_buffer_size(struct kmem_cache *cachep); #else @@ -166,7 +166,7 @@ found: extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, gfp_t flags, int nodeid); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5ad70a60fd74..1e14beb23f9b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); #else static __always_inline void * @@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, gfp_t gfpflags, int node); diff --git a/mm/slab.c b/mm/slab.c index 7dfa481c96ba..9733bb4009d9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) #endif -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING size_t slab_buffer_size(struct kmem_cache *cachep) { return cachep->buffer_size; @@ -3558,7 +3558,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) } EXPORT_SYMBOL(kmem_cache_alloc); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) { return __cache_alloc(cachep, flags, __builtin_return_address(0)); @@ -3621,7 +3621,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) } EXPORT_SYMBOL(kmem_cache_alloc_node); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, gfp_t flags, int nodeid) diff --git a/mm/slub.c b/mm/slub.c index 4996fc719552..4a89c3d231b2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1754,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) } EXPORT_SYMBOL(kmem_cache_alloc); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) { return slab_alloc(s, gfpflags, -1, _RET_IP_); @@ -1775,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) EXPORT_SYMBOL(kmem_cache_alloc_node); #endif -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, gfp_t gfpflags, int node) -- cgit v1.2.3 From 99ac64c826e62a07e5818cfde620be4d524f1edf Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 11 Dec 2009 11:58:42 +0100 Subject: hw-breakpoints: Handle bad modify_user_hw_breakpoint off-case return value While converting modify_user_hw_breakpoint() return value, we forgot to handle the off-case. It's not returning a pointer anymore. This solves the build warning reported by Stephen Rothwell against linux-next. Reported-by: Stephen Rothwell Signed-off-by: Frederic Weisbecker Cc: Prasad LKML-Reference: <1260529122-6260-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar Cc: Prasad --- include/linux/hw_breakpoint.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 69f07a9f1277..41235c93e4e9 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -93,7 +93,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, struct task_struct *tsk) { return NULL; } static inline int modify_user_hw_breakpoint(struct perf_event *bp, - struct perf_event_attr *attr) { return NULL; } + struct perf_event_attr *attr) { return -ENOSYS; } static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, -- cgit v1.2.3 From 6ee738610f41b59733f63718f0bdbcba7d3a3f12 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 11 Dec 2009 19:24:15 +1000 Subject: drm/nouveau: Add DRM driver for NVIDIA GPUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a drm/kms staging non-API stable driver for GPUs from NVIDIA. This driver is a KMS-based driver and requires a compatible nouveau userspace libdrm and nouveau X.org driver. This driver requires firmware files not available in this kernel tree, interested parties can find them via the nouveau project git archive. This driver is reverse engineered, and is in no way supported by nVidia. Support for nearly the complete range of nvidia hw from nv04->g80 (nv50) is available, and the kms driver should support driving nearly all output types (displayport is under development still) along with supporting suspend/resume. This work is all from the upstream nouveau project found at nouveau.freedesktop.org. The original authors list from nouveau git tree is: Anssi Hannula Ben Skeggs Francisco Jerez Maarten Maathuis Marcin KoÅ›cielnicki Matthew Garrett Matt Parnell Patrice Mandin Pekka Paalanen Xavier Chantry along with project founder Stephane Marchesin Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 2 + drivers/gpu/drm/i2c/Makefile | 4 + drivers/gpu/drm/i2c/ch7006_drv.c | 531 +++ drivers/gpu/drm/i2c/ch7006_mode.c | 473 +++ drivers/gpu/drm/i2c/ch7006_priv.h | 344 ++ drivers/gpu/drm/nouveau/Kconfig | 44 + drivers/gpu/drm/nouveau/Makefile | 31 + drivers/gpu/drm/nouveau/nouveau_acpi.c | 125 + drivers/gpu/drm/nouveau/nouveau_backlight.c | 155 + drivers/gpu/drm/nouveau/nouveau_bios.c | 6095 +++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_bios.h | 289 ++ drivers/gpu/drm/nouveau/nouveau_bo.c | 671 +++ drivers/gpu/drm/nouveau/nouveau_calc.c | 478 +++ drivers/gpu/drm/nouveau/nouveau_channel.c | 468 ++ drivers/gpu/drm/nouveau/nouveau_connector.c | 824 ++++ drivers/gpu/drm/nouveau/nouveau_connector.h | 54 + drivers/gpu/drm/nouveau/nouveau_crtc.h | 95 + drivers/gpu/drm/nouveau/nouveau_debugfs.c | 155 + drivers/gpu/drm/nouveau/nouveau_display.c | 115 + drivers/gpu/drm/nouveau/nouveau_dma.c | 206 + drivers/gpu/drm/nouveau/nouveau_dma.h | 157 + drivers/gpu/drm/nouveau/nouveau_dp.c | 569 +++ drivers/gpu/drm/nouveau/nouveau_drv.c | 405 ++ drivers/gpu/drm/nouveau/nouveau_drv.h | 1286 ++++++ drivers/gpu/drm/nouveau/nouveau_encoder.h | 91 + drivers/gpu/drm/nouveau/nouveau_fb.h | 47 + drivers/gpu/drm/nouveau/nouveau_fbcon.c | 380 ++ drivers/gpu/drm/nouveau/nouveau_fbcon.h | 47 + drivers/gpu/drm/nouveau/nouveau_fence.c | 262 ++ drivers/gpu/drm/nouveau/nouveau_gem.c | 992 +++++ drivers/gpu/drm/nouveau/nouveau_hw.c | 1080 +++++ drivers/gpu/drm/nouveau/nouveau_hw.h | 455 ++ drivers/gpu/drm/nouveau/nouveau_i2c.c | 269 ++ drivers/gpu/drm/nouveau/nouveau_i2c.h | 52 + drivers/gpu/drm/nouveau/nouveau_ioc32.c | 72 + drivers/gpu/drm/nouveau/nouveau_irq.c | 702 +++ drivers/gpu/drm/nouveau/nouveau_mem.c | 568 +++ drivers/gpu/drm/nouveau/nouveau_notifier.c | 196 + drivers/gpu/drm/nouveau/nouveau_object.c | 1294 ++++++ drivers/gpu/drm/nouveau/nouveau_reg.h | 836 ++++ drivers/gpu/drm/nouveau/nouveau_sgdma.c | 321 ++ drivers/gpu/drm/nouveau/nouveau_state.c | 811 ++++ drivers/gpu/drm/nouveau/nouveau_ttm.c | 131 + drivers/gpu/drm/nouveau/nv04_crtc.c | 1002 +++++ drivers/gpu/drm/nouveau/nv04_cursor.c | 70 + drivers/gpu/drm/nouveau/nv04_dac.c | 528 +++ drivers/gpu/drm/nouveau/nv04_dfp.c | 621 +++ drivers/gpu/drm/nouveau/nv04_display.c | 288 ++ drivers/gpu/drm/nouveau/nv04_fb.c | 21 + drivers/gpu/drm/nouveau/nv04_fbcon.c | 316 ++ drivers/gpu/drm/nouveau/nv04_fifo.c | 271 ++ drivers/gpu/drm/nouveau/nv04_graph.c | 579 +++ drivers/gpu/drm/nouveau/nv04_instmem.c | 208 + drivers/gpu/drm/nouveau/nv04_mc.c | 20 + drivers/gpu/drm/nouveau/nv04_timer.c | 51 + drivers/gpu/drm/nouveau/nv04_tv.c | 305 ++ drivers/gpu/drm/nouveau/nv10_fb.c | 24 + drivers/gpu/drm/nouveau/nv10_fifo.c | 260 ++ drivers/gpu/drm/nouveau/nv10_graph.c | 892 ++++ drivers/gpu/drm/nouveau/nv17_gpio.c | 92 + drivers/gpu/drm/nouveau/nv17_tv.c | 681 +++ drivers/gpu/drm/nouveau/nv17_tv.h | 156 + drivers/gpu/drm/nouveau/nv17_tv_modes.c | 583 +++ drivers/gpu/drm/nouveau/nv20_graph.c | 780 ++++ drivers/gpu/drm/nouveau/nv40_fb.c | 62 + drivers/gpu/drm/nouveau/nv40_fifo.c | 314 ++ drivers/gpu/drm/nouveau/nv40_graph.c | 560 +++ drivers/gpu/drm/nouveau/nv40_mc.c | 38 + drivers/gpu/drm/nouveau/nv50_crtc.c | 769 ++++ drivers/gpu/drm/nouveau/nv50_cursor.c | 156 + drivers/gpu/drm/nouveau/nv50_dac.c | 304 ++ drivers/gpu/drm/nouveau/nv50_display.c | 1015 +++++ drivers/gpu/drm/nouveau/nv50_display.h | 46 + drivers/gpu/drm/nouveau/nv50_evo.h | 113 + drivers/gpu/drm/nouveau/nv50_fbcon.c | 273 ++ drivers/gpu/drm/nouveau/nv50_fifo.c | 494 +++ drivers/gpu/drm/nouveau/nv50_graph.c | 385 ++ drivers/gpu/drm/nouveau/nv50_instmem.c | 509 +++ drivers/gpu/drm/nouveau/nv50_mc.c | 40 + drivers/gpu/drm/nouveau/nv50_sor.c | 309 ++ drivers/gpu/drm/nouveau/nvreg.h | 535 +++ drivers/staging/Kconfig | 2 + include/drm/Kbuild | 1 + include/drm/i2c/ch7006.h | 86 + include/drm/nouveau_drm.h | 220 + 85 files changed, 36161 insertions(+) create mode 100644 drivers/gpu/drm/i2c/Makefile create mode 100644 drivers/gpu/drm/i2c/ch7006_drv.c create mode 100644 drivers/gpu/drm/i2c/ch7006_mode.c create mode 100644 drivers/gpu/drm/i2c/ch7006_priv.h create mode 100644 drivers/gpu/drm/nouveau/Kconfig create mode 100644 drivers/gpu/drm/nouveau/Makefile create mode 100644 drivers/gpu/drm/nouveau/nouveau_acpi.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_backlight.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_bios.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_bios.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_bo.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_calc.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_channel.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_connector.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_connector.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_crtc.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_debugfs.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_display.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_dma.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_dma.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_dp.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_drv.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_drv.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_encoder.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_fb.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_fbcon.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_fbcon.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_fence.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_gem.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_hw.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_hw.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_i2c.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_i2c.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_ioc32.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_irq.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_mem.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_notifier.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_object.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_reg.h create mode 100644 drivers/gpu/drm/nouveau/nouveau_sgdma.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_state.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_ttm.c create mode 100644 drivers/gpu/drm/nouveau/nv04_crtc.c create mode 100644 drivers/gpu/drm/nouveau/nv04_cursor.c create mode 100644 drivers/gpu/drm/nouveau/nv04_dac.c create mode 100644 drivers/gpu/drm/nouveau/nv04_dfp.c create mode 100644 drivers/gpu/drm/nouveau/nv04_display.c create mode 100644 drivers/gpu/drm/nouveau/nv04_fb.c create mode 100644 drivers/gpu/drm/nouveau/nv04_fbcon.c create mode 100644 drivers/gpu/drm/nouveau/nv04_fifo.c create mode 100644 drivers/gpu/drm/nouveau/nv04_graph.c create mode 100644 drivers/gpu/drm/nouveau/nv04_instmem.c create mode 100644 drivers/gpu/drm/nouveau/nv04_mc.c create mode 100644 drivers/gpu/drm/nouveau/nv04_timer.c create mode 100644 drivers/gpu/drm/nouveau/nv04_tv.c create mode 100644 drivers/gpu/drm/nouveau/nv10_fb.c create mode 100644 drivers/gpu/drm/nouveau/nv10_fifo.c create mode 100644 drivers/gpu/drm/nouveau/nv10_graph.c create mode 100644 drivers/gpu/drm/nouveau/nv17_gpio.c create mode 100644 drivers/gpu/drm/nouveau/nv17_tv.c create mode 100644 drivers/gpu/drm/nouveau/nv17_tv.h create mode 100644 drivers/gpu/drm/nouveau/nv17_tv_modes.c create mode 100644 drivers/gpu/drm/nouveau/nv20_graph.c create mode 100644 drivers/gpu/drm/nouveau/nv40_fb.c create mode 100644 drivers/gpu/drm/nouveau/nv40_fifo.c create mode 100644 drivers/gpu/drm/nouveau/nv40_graph.c create mode 100644 drivers/gpu/drm/nouveau/nv40_mc.c create mode 100644 drivers/gpu/drm/nouveau/nv50_crtc.c create mode 100644 drivers/gpu/drm/nouveau/nv50_cursor.c create mode 100644 drivers/gpu/drm/nouveau/nv50_dac.c create mode 100644 drivers/gpu/drm/nouveau/nv50_display.c create mode 100644 drivers/gpu/drm/nouveau/nv50_display.h create mode 100644 drivers/gpu/drm/nouveau/nv50_evo.h create mode 100644 drivers/gpu/drm/nouveau/nv50_fbcon.c create mode 100644 drivers/gpu/drm/nouveau/nv50_fifo.c create mode 100644 drivers/gpu/drm/nouveau/nv50_graph.c create mode 100644 drivers/gpu/drm/nouveau/nv50_instmem.c create mode 100644 drivers/gpu/drm/nouveau/nv50_mc.c create mode 100644 drivers/gpu/drm/nouveau/nv50_sor.c create mode 100644 drivers/gpu/drm/nouveau/nvreg.h create mode 100644 include/drm/i2c/ch7006.h create mode 100644 include/drm/nouveau_drm.h (limited to 'include') diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 91567ac806f1..470ef6779db3 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -31,3 +31,5 @@ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_SIS) += sis/ obj-$(CONFIG_DRM_SAVAGE)+= savage/ obj-$(CONFIG_DRM_VIA) +=via/ +obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ +obj-y += i2c/ diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile new file mode 100644 index 000000000000..6d2abaf35ba2 --- /dev/null +++ b/drivers/gpu/drm/i2c/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -Iinclude/drm + +ch7006-y := ch7006_drv.o ch7006_mode.o +obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c new file mode 100644 index 000000000000..9422a74c8b54 --- /dev/null +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -0,0 +1,531 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ch7006_priv.h" + +/* DRM encoder functions */ + +static void ch7006_encoder_set_config(struct drm_encoder *encoder, + void *params) +{ + struct ch7006_priv *priv = to_ch7006_priv(encoder); + + priv->params = params; +} + +static void ch7006_encoder_destroy(struct drm_encoder *encoder) +{ + struct ch7006_priv *priv = to_ch7006_priv(encoder); + + drm_property_destroy(encoder->dev, priv->scale_property); + + kfree(priv); + to_encoder_slave(encoder)->slave_priv = NULL; + + drm_i2c_encoder_destroy(encoder); +} + +static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_state *state = &priv->state; + + ch7006_dbg(client, "\n"); + + if (mode == priv->last_dpms) + return; + priv->last_dpms = mode; + + ch7006_setup_power_state(encoder); + + ch7006_load_reg(client, state, CH7006_POWER); +} + +static void ch7006_encoder_save(struct drm_encoder *encoder) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + + ch7006_dbg(client, "\n"); + + ch7006_state_save(client, &priv->saved_state); +} + +static void ch7006_encoder_restore(struct drm_encoder *encoder) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + + ch7006_dbg(client, "\n"); + + ch7006_state_load(client, &priv->saved_state); +} + +static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct ch7006_priv *priv = to_ch7006_priv(encoder); + + /* The ch7006 is painfully picky with the input timings so no + * custom modes for now... */ + + priv->mode = ch7006_lookup_mode(encoder, mode); + + return !!priv->mode; +} + +static int ch7006_encoder_mode_valid(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + if (ch7006_lookup_mode(encoder, mode)) + return MODE_OK; + else + return MODE_BAD; +} + +static void ch7006_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *drm_mode, + struct drm_display_mode *adjusted_mode) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_encoder_params *params = priv->params; + struct ch7006_state *state = &priv->state; + uint8_t *regs = state->regs; + struct ch7006_mode *mode = priv->mode; + struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; + int start_active; + + ch7006_dbg(client, "\n"); + + regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode; + regs[CH7006_BWIDTH] = 0; + regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT, + params->input_format); + + regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK + | bitf(CH7006_CLKMODE_XCM, params->xcm) + | bitf(CH7006_CLKMODE_PCM, params->pcm); + if (params->clock_mode) + regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER; + if (params->clock_edge) + regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE; + + start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7); + regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active); + regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active); + + regs[CH7006_INPUT_SYNC] = 0; + if (params->sync_direction) + regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT; + if (params->sync_encoding) + regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED; + if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC) + regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC; + if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC) + regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC; + + regs[CH7006_DETECT] = 0; + regs[CH7006_BCLKOUT] = 0; + + regs[CH7006_SUBC_INC3] = 0; + if (params->pout_level) + regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V; + + regs[CH7006_SUBC_INC4] = 0; + if (params->active_detect) + regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT; + + regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL]; + + ch7006_setup_levels(encoder); + ch7006_setup_subcarrier(encoder); + ch7006_setup_pll(encoder); + ch7006_setup_power_state(encoder); + ch7006_setup_properties(encoder); + + ch7006_state_load(client, state); +} + +static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_state *state = &priv->state; + int det; + + ch7006_dbg(client, "\n"); + + ch7006_save_reg(client, state, CH7006_DETECT); + ch7006_save_reg(client, state, CH7006_POWER); + ch7006_save_reg(client, state, CH7006_CLKMODE); + + ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET | + bitfs(CH7006_POWER_LEVEL, NORMAL)); + ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER); + + ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE); + + ch7006_write(client, CH7006_DETECT, 0); + + det = ch7006_read(client, CH7006_DETECT); + + ch7006_load_reg(client, state, CH7006_CLKMODE); + ch7006_load_reg(client, state, CH7006_POWER); + ch7006_load_reg(client, state, CH7006_DETECT); + + if ((det & (CH7006_DETECT_SVIDEO_Y_TEST| + CH7006_DETECT_SVIDEO_C_TEST| + CH7006_DETECT_CVBS_TEST)) == 0) + priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART; + else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST| + CH7006_DETECT_SVIDEO_C_TEST)) == 0) + priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; + else if ((det & CH7006_DETECT_CVBS_TEST) == 0) + priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite; + else + priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + + drm_connector_property_set_value(connector, + encoder->dev->mode_config.tv_subconnector_property, + priv->subconnector); + + return priv->subconnector ? connector_status_connected : + connector_status_disconnected; +} + +static int ch7006_encoder_get_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_mode *mode; + int n = 0; + + for (mode = ch7006_modes; mode->mode.clock; mode++) { + if (~mode->valid_scales & 1<scale || + ~mode->valid_norms & 1<norm) + continue; + + drm_mode_probed_add(connector, + drm_mode_duplicate(encoder->dev, &mode->mode)); + + n++; + } + + return n; +} + +static int ch7006_encoder_create_resources(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct drm_device *dev = encoder->dev; + struct drm_mode_config *conf = &dev->mode_config; + + drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names); + + priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, + "scale", 2); + priv->scale_property->values[0] = 0; + priv->scale_property->values[1] = 2; + + drm_connector_attach_property(connector, conf->tv_select_subconnector_property, + priv->select_subconnector); + drm_connector_attach_property(connector, conf->tv_subconnector_property, + priv->subconnector); + drm_connector_attach_property(connector, conf->tv_left_margin_property, + priv->hmargin); + drm_connector_attach_property(connector, conf->tv_bottom_margin_property, + priv->vmargin); + drm_connector_attach_property(connector, conf->tv_mode_property, + priv->norm); + drm_connector_attach_property(connector, conf->tv_brightness_property, + priv->brightness); + drm_connector_attach_property(connector, conf->tv_contrast_property, + priv->contrast); + drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, + priv->flicker); + drm_connector_attach_property(connector, priv->scale_property, + priv->scale); + + return 0; +} + +static int ch7006_encoder_set_property(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_state *state = &priv->state; + struct drm_mode_config *conf = &encoder->dev->mode_config; + struct drm_crtc *crtc = encoder->crtc; + bool modes_changed = false; + + ch7006_dbg(client, "\n"); + + if (property == conf->tv_select_subconnector_property) { + priv->select_subconnector = val; + + ch7006_setup_power_state(encoder); + + ch7006_load_reg(client, state, CH7006_POWER); + + } else if (property == conf->tv_left_margin_property) { + priv->hmargin = val; + + ch7006_setup_properties(encoder); + + ch7006_load_reg(client, state, CH7006_POV); + ch7006_load_reg(client, state, CH7006_HPOS); + + } else if (property == conf->tv_bottom_margin_property) { + priv->vmargin = val; + + ch7006_setup_properties(encoder); + + ch7006_load_reg(client, state, CH7006_POV); + ch7006_load_reg(client, state, CH7006_VPOS); + + } else if (property == conf->tv_mode_property) { + if (connector->dpms != DRM_MODE_DPMS_OFF) + return -EINVAL; + + priv->norm = val; + + modes_changed = true; + + } else if (property == conf->tv_brightness_property) { + priv->brightness = val; + + ch7006_setup_levels(encoder); + + ch7006_load_reg(client, state, CH7006_BLACK_LEVEL); + + } else if (property == conf->tv_contrast_property) { + priv->contrast = val; + + ch7006_setup_properties(encoder); + + ch7006_load_reg(client, state, CH7006_CONTRAST); + + } else if (property == conf->tv_flicker_reduction_property) { + priv->flicker = val; + + ch7006_setup_properties(encoder); + + ch7006_load_reg(client, state, CH7006_FFILTER); + + } else if (property == priv->scale_property) { + if (connector->dpms != DRM_MODE_DPMS_OFF) + return -EINVAL; + + priv->scale = val; + + modes_changed = true; + + } else { + return -EINVAL; + } + + if (modes_changed) { + drm_helper_probe_single_connector_modes(connector, 0, 0); + + /* Disable the crtc to ensure a full modeset is + * performed whenever it's turned on again. */ + if (crtc) { + struct drm_mode_set modeset = { + .crtc = crtc, + }; + + crtc->funcs->set_config(&modeset); + } + } + + return 0; +} + +static struct drm_encoder_slave_funcs ch7006_encoder_funcs = { + .set_config = ch7006_encoder_set_config, + .destroy = ch7006_encoder_destroy, + .dpms = ch7006_encoder_dpms, + .save = ch7006_encoder_save, + .restore = ch7006_encoder_restore, + .mode_fixup = ch7006_encoder_mode_fixup, + .mode_valid = ch7006_encoder_mode_valid, + .mode_set = ch7006_encoder_mode_set, + .detect = ch7006_encoder_detect, + .get_modes = ch7006_encoder_get_modes, + .create_resources = ch7006_encoder_create_resources, + .set_property = ch7006_encoder_set_property, +}; + + +/* I2C driver functions */ + +static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + uint8_t addr = CH7006_VERSION_ID; + uint8_t val; + int ret; + + ch7006_dbg(client, "\n"); + + ret = i2c_master_send(client, &addr, sizeof(addr)); + if (ret < 0) + goto fail; + + ret = i2c_master_recv(client, &val, sizeof(val)); + if (ret < 0) + goto fail; + + ch7006_info(client, "Detected version ID: %x\n", val); + + return 0; + +fail: + ch7006_err(client, "Error %d reading version ID\n", ret); + + return -ENODEV; +} + +static int ch7006_remove(struct i2c_client *client) +{ + ch7006_dbg(client, "\n"); + + return 0; +} + +static int ch7006_encoder_init(struct i2c_client *client, + struct drm_device *dev, + struct drm_encoder_slave *encoder) +{ + struct ch7006_priv *priv; + int i; + + ch7006_dbg(client, "\n"); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + encoder->slave_priv = priv; + encoder->slave_funcs = &ch7006_encoder_funcs; + + priv->norm = TV_NORM_PAL; + priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic; + priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + priv->scale = 1; + priv->contrast = 50; + priv->brightness = 50; + priv->flicker = 50; + priv->hmargin = 50; + priv->vmargin = 50; + priv->last_dpms = -1; + + if (ch7006_tv_norm) { + for (i = 0; i < NUM_TV_NORMS; i++) { + if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) { + priv->norm = i; + break; + } + } + + if (i == NUM_TV_NORMS) + ch7006_err(client, "Invalid TV norm setting \"%s\".\n", + ch7006_tv_norm); + } + + if (ch7006_scale >= 0 && ch7006_scale <= 2) + priv->scale = ch7006_scale; + else + ch7006_err(client, "Invalid scale setting \"%d\".\n", + ch7006_scale); + + return 0; +} + +static struct i2c_device_id ch7006_ids[] = { + { "ch7006", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ch7006_ids); + +static struct drm_i2c_encoder_driver ch7006_driver = { + .i2c_driver = { + .probe = ch7006_probe, + .remove = ch7006_remove, + + .driver = { + .name = "ch7006", + }, + + .id_table = ch7006_ids, + }, + + .encoder_init = ch7006_encoder_init, +}; + + +/* Module initialization */ + +static int __init ch7006_init(void) +{ + return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver); +} + +static void __exit ch7006_exit(void) +{ + drm_i2c_encoder_unregister(&ch7006_driver); +} + +int ch7006_debug; +module_param_named(debug, ch7006_debug, int, 0600); +MODULE_PARM_DESC(debug, "Enable debug output."); + +char *ch7006_tv_norm; +module_param_named(tv_norm, ch7006_tv_norm, charp, 0600); +MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" + "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n" + "\t\tDefault: PAL"); + +int ch7006_scale = 1; +module_param_named(scale, ch7006_scale, int, 0600); +MODULE_PARM_DESC(scale, "Default scale.\n" + "\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n" + "\t\t\t1 -> Select default video modes.\n" + "\t\t\t2 -> Select video modes with a lower blanking ratio."); + +MODULE_AUTHOR("Francisco Jerez "); +MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver"); +MODULE_LICENSE("GPL and additional rights"); + +module_init(ch7006_init); +module_exit(ch7006_exit); diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c new file mode 100644 index 000000000000..87f5445092e8 --- /dev/null +++ b/drivers/gpu/drm/i2c/ch7006_mode.c @@ -0,0 +1,473 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ch7006_priv.h" + +char *ch7006_tv_norm_names[] = { + [TV_NORM_PAL] = "PAL", + [TV_NORM_PAL_M] = "PAL-M", + [TV_NORM_PAL_N] = "PAL-N", + [TV_NORM_PAL_NC] = "PAL-Nc", + [TV_NORM_PAL_60] = "PAL-60", + [TV_NORM_NTSC_M] = "NTSC-M", + [TV_NORM_NTSC_J] = "NTSC-J", +}; + +#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001, \ + .vdisplay = 480, \ + .vtotal = 525, \ + .hvirtual = 660 + +#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1, \ + .vdisplay = 576, \ + .vtotal = 625, \ + .hvirtual = 810 + +struct ch7006_tv_norm_info ch7006_tv_norms[] = { + [TV_NORM_NTSC_M] = { + NTSC_LIKE_TIMINGS, + .black_level = 0.339 * fixed1, + .subc_freq = 3579545 * fixed1, + .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC), + .voffset = 0, + }, + [TV_NORM_NTSC_J] = { + NTSC_LIKE_TIMINGS, + .black_level = 0.286 * fixed1, + .subc_freq = 3579545 * fixed1, + .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J), + .voffset = 0, + }, + [TV_NORM_PAL] = { + PAL_LIKE_TIMINGS, + .black_level = 0.3 * fixed1, + .subc_freq = 4433618.75 * fixed1, + .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL), + .voffset = 0, + }, + [TV_NORM_PAL_M] = { + NTSC_LIKE_TIMINGS, + .black_level = 0.339 * fixed1, + .subc_freq = 3575611.433 * fixed1, + .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M), + .voffset = 16, + }, + + /* The following modes seem to work right but they're + * undocumented */ + + [TV_NORM_PAL_N] = { + PAL_LIKE_TIMINGS, + .black_level = 0.339 * fixed1, + .subc_freq = 4433618.75 * fixed1, + .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL), + .voffset = 0, + }, + [TV_NORM_PAL_NC] = { + PAL_LIKE_TIMINGS, + .black_level = 0.3 * fixed1, + .subc_freq = 3582056.25 * fixed1, + .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL), + .voffset = 0, + }, + [TV_NORM_PAL_60] = { + NTSC_LIKE_TIMINGS, + .black_level = 0.3 * fixed1, + .subc_freq = 4433618.75 * fixed1, + .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M), + .voffset = 16, + }, +}; + +#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp, \ + subc, scale, scale_mask, norm_mask, e_hd, e_vd) { \ + .mode = { \ + .name = #hd "x" #vd, \ + .status = 0, \ + .type = DRM_MODE_TYPE_DRIVER, \ + .clock = f, \ + .hdisplay = hd, \ + .hsync_start = e_hd + 16, \ + .hsync_end = e_hd + 80, \ + .htotal = ht, \ + .hskew = 0, \ + .vdisplay = vd, \ + .vsync_start = vd + 10, \ + .vsync_end = vd + 26, \ + .vtotal = vt, \ + .vscan = 0, \ + .flags = DRM_MODE_FLAG_##hsynp##HSYNC | \ + DRM_MODE_FLAG_##vsynp##VSYNC, \ + .vrefresh = 0, \ + }, \ + .enc_hdisp = e_hd, \ + .enc_vdisp = e_vd, \ + .subc_coeff = subc * fixed1, \ + .dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \ + bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \ + .valid_scales = scale_mask, \ + .valid_norms = norm_mask \ + } + +#define MODE(f, hd, vd, ht, vt, hsynp, vsynp, \ + subc, scale, scale_mask, norm_mask) \ + __MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale, \ + scale_mask, norm_mask, hd, vd) + +#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J | \ + 1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60) + +#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC) + +struct ch7006_mode ch7006_modes[] = { + MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE), + MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE), + MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE), + MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE), + MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE), + MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE), + MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE), + MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE), + MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE), + MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE), + MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE), + MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE), + MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE), + MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE), + MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE), + MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE), + MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE), + MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE), + MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE), + __MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600), + MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE), + MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE), + MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE), + MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE), + MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE), + {} +}; + +struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, + struct drm_display_mode *drm_mode) +{ + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_mode *mode; + + for (mode = ch7006_modes; mode->mode.clock; mode++) { + + if (~mode->valid_norms & 1<norm) + continue; + + if (mode->mode.hdisplay != drm_mode->hdisplay || + mode->mode.vdisplay != drm_mode->vdisplay || + mode->mode.vtotal != drm_mode->vtotal || + mode->mode.htotal != drm_mode->htotal || + mode->mode.clock != drm_mode->clock) + continue; + + return mode; + } + + return NULL; +} + +/* Some common HW state calculation code */ + +void ch7006_setup_levels(struct drm_encoder *encoder) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + uint8_t *regs = priv->state.regs; + struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; + int gain; + int black_level; + + /* Set DAC_GAIN if the voltage drop between white and black is + * high enough. */ + if (norm->black_level < 339*fixed1/1000) { + gain = 76; + + regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN; + } else { + gain = 71; + + regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN; + } + + black_level = round_fixed(norm->black_level*26625)/gain; + + /* Correct it with the specified brightness. */ + black_level = interpolate(90, black_level, 208, priv->brightness); + + regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level); + + ch7006_dbg(client, "black level: %d\n", black_level); +} + +void ch7006_setup_subcarrier(struct drm_encoder *encoder) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_state *state = &priv->state; + struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; + struct ch7006_mode *mode = priv->mode; + uint32_t subc_inc; + + subc_inc = round_fixed((mode->subc_coeff >> 8) + * (norm->subc_freq >> 24)); + + setbitf(state, CH7006_SUBC_INC0, 28, subc_inc); + setbitf(state, CH7006_SUBC_INC1, 24, subc_inc); + setbitf(state, CH7006_SUBC_INC2, 20, subc_inc); + setbitf(state, CH7006_SUBC_INC3, 16, subc_inc); + setbitf(state, CH7006_SUBC_INC4, 12, subc_inc); + setbitf(state, CH7006_SUBC_INC5, 8, subc_inc); + setbitf(state, CH7006_SUBC_INC6, 4, subc_inc); + setbitf(state, CH7006_SUBC_INC7, 0, subc_inc); + + ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc); +} + +void ch7006_setup_pll(struct drm_encoder *encoder) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + uint8_t *regs = priv->state.regs; + struct ch7006_mode *mode = priv->mode; + int n, best_n = 0; + int m, best_m = 0; + int freq, best_freq = 0; + + for (n = 0; n < CH7006_MAXN; n++) { + for (m = 0; m < CH7006_MAXM; m++) { + freq = CH7006_FREQ0*(n+2)/(m+2); + + if (abs(freq - mode->mode.clock) < + abs(best_freq - mode->mode.clock)) { + best_freq = freq; + best_n = n; + best_m = m; + } + } + } + + regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) | + bitf(CH7006_PLLOV_M_8, best_m); + + regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m); + regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n); + + if (best_n < 108) + regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR; + else + regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR; + + ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n", + best_n, best_m, best_freq, best_n < 108); +} + +void ch7006_setup_power_state(struct drm_encoder *encoder) +{ + struct ch7006_priv *priv = to_ch7006_priv(encoder); + uint8_t *power = &priv->state.regs[CH7006_POWER]; + int subconnector; + + subconnector = priv->select_subconnector ? priv->select_subconnector : + priv->subconnector; + + *power = CH7006_POWER_RESET; + + if (priv->last_dpms == DRM_MODE_DPMS_ON) { + switch (subconnector) { + case DRM_MODE_SUBCONNECTOR_SVIDEO: + *power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF); + break; + case DRM_MODE_SUBCONNECTOR_Composite: + *power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF); + break; + case DRM_MODE_SUBCONNECTOR_SCART: + *power |= bitfs(CH7006_POWER_LEVEL, NORMAL) | + CH7006_POWER_SCART; + break; + } + + } else { + *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF); + } +} + +void ch7006_setup_properties(struct drm_encoder *encoder) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + struct ch7006_priv *priv = to_ch7006_priv(encoder); + struct ch7006_state *state = &priv->state; + struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm]; + struct ch7006_mode *ch_mode = priv->mode; + struct drm_display_mode *mode = &ch_mode->mode; + uint8_t *regs = state->regs; + int flicker, contrast, hpos, vpos; + uint64_t scale, aspect; + + flicker = interpolate(0, 2, 3, priv->flicker); + regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) | + bitf(CH7006_FFILTER_LUMA, flicker) | + bitf(CH7006_FFILTER_CHROMA, 1); + + contrast = interpolate(0, 5, 7, priv->contrast); + regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast); + + scale = norm->vtotal*fixed1; + do_div(scale, mode->vtotal); + + aspect = ch_mode->enc_hdisp*fixed1; + do_div(aspect, ch_mode->enc_vdisp); + + hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale) + * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4; + + setbitf(state, CH7006_POV, HPOS_8, hpos); + setbitf(state, CH7006_HPOS, 0, hpos); + + vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale) + + norm->voffset) * priv->vmargin / 100 / 2; + + setbitf(state, CH7006_POV, VPOS_8, vpos); + setbitf(state, CH7006_VPOS, 0, vpos); + + ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos); +} + +/* HW access functions */ + +void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val) +{ + uint8_t buf[] = {addr, val}; + int ret; + + ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); + if (ret < 0) + ch7006_err(client, "Error %d writing to subaddress 0x%x\n", + ret, addr); +} + +uint8_t ch7006_read(struct i2c_client *client, uint8_t addr) +{ + uint8_t val; + int ret; + + ret = i2c_master_send(client, &addr, sizeof(addr)); + if (ret < 0) + goto fail; + + ret = i2c_master_recv(client, &val, sizeof(val)); + if (ret < 0) + goto fail; + + return val; + +fail: + ch7006_err(client, "Error %d reading from subaddress 0x%x\n", + ret, addr); + return 0; +} + +void ch7006_state_load(struct i2c_client *client, + struct ch7006_state *state) +{ + ch7006_load_reg(client, state, CH7006_POWER); + + ch7006_load_reg(client, state, CH7006_DISPMODE); + ch7006_load_reg(client, state, CH7006_FFILTER); + ch7006_load_reg(client, state, CH7006_BWIDTH); + ch7006_load_reg(client, state, CH7006_INPUT_FORMAT); + ch7006_load_reg(client, state, CH7006_CLKMODE); + ch7006_load_reg(client, state, CH7006_START_ACTIVE); + ch7006_load_reg(client, state, CH7006_POV); + ch7006_load_reg(client, state, CH7006_BLACK_LEVEL); + ch7006_load_reg(client, state, CH7006_HPOS); + ch7006_load_reg(client, state, CH7006_VPOS); + ch7006_load_reg(client, state, CH7006_INPUT_SYNC); + ch7006_load_reg(client, state, CH7006_DETECT); + ch7006_load_reg(client, state, CH7006_CONTRAST); + ch7006_load_reg(client, state, CH7006_PLLOV); + ch7006_load_reg(client, state, CH7006_PLLM); + ch7006_load_reg(client, state, CH7006_PLLN); + ch7006_load_reg(client, state, CH7006_BCLKOUT); + ch7006_load_reg(client, state, CH7006_SUBC_INC0); + ch7006_load_reg(client, state, CH7006_SUBC_INC1); + ch7006_load_reg(client, state, CH7006_SUBC_INC2); + ch7006_load_reg(client, state, CH7006_SUBC_INC3); + ch7006_load_reg(client, state, CH7006_SUBC_INC4); + ch7006_load_reg(client, state, CH7006_SUBC_INC5); + ch7006_load_reg(client, state, CH7006_SUBC_INC6); + ch7006_load_reg(client, state, CH7006_SUBC_INC7); + ch7006_load_reg(client, state, CH7006_PLL_CONTROL); + ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0); + + /* I don't know what this is for, but otherwise I get no + * signal. + */ + ch7006_write(client, 0x3d, 0x0); +} + +void ch7006_state_save(struct i2c_client *client, + struct ch7006_state *state) +{ + ch7006_save_reg(client, state, CH7006_POWER); + + ch7006_save_reg(client, state, CH7006_DISPMODE); + ch7006_save_reg(client, state, CH7006_FFILTER); + ch7006_save_reg(client, state, CH7006_BWIDTH); + ch7006_save_reg(client, state, CH7006_INPUT_FORMAT); + ch7006_save_reg(client, state, CH7006_CLKMODE); + ch7006_save_reg(client, state, CH7006_START_ACTIVE); + ch7006_save_reg(client, state, CH7006_POV); + ch7006_save_reg(client, state, CH7006_BLACK_LEVEL); + ch7006_save_reg(client, state, CH7006_HPOS); + ch7006_save_reg(client, state, CH7006_VPOS); + ch7006_save_reg(client, state, CH7006_INPUT_SYNC); + ch7006_save_reg(client, state, CH7006_DETECT); + ch7006_save_reg(client, state, CH7006_CONTRAST); + ch7006_save_reg(client, state, CH7006_PLLOV); + ch7006_save_reg(client, state, CH7006_PLLM); + ch7006_save_reg(client, state, CH7006_PLLN); + ch7006_save_reg(client, state, CH7006_BCLKOUT); + ch7006_save_reg(client, state, CH7006_SUBC_INC0); + ch7006_save_reg(client, state, CH7006_SUBC_INC1); + ch7006_save_reg(client, state, CH7006_SUBC_INC2); + ch7006_save_reg(client, state, CH7006_SUBC_INC3); + ch7006_save_reg(client, state, CH7006_SUBC_INC4); + ch7006_save_reg(client, state, CH7006_SUBC_INC5); + ch7006_save_reg(client, state, CH7006_SUBC_INC6); + ch7006_save_reg(client, state, CH7006_SUBC_INC7); + ch7006_save_reg(client, state, CH7006_PLL_CONTROL); + ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0); + + state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) | + (state->regs[CH7006_FFILTER] & 0x0c) >> 2 | + (state->regs[CH7006_FFILTER] & 0x03) << 2; +} diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h new file mode 100644 index 000000000000..b06d3d93d8ac --- /dev/null +++ b/drivers/gpu/drm/i2c/ch7006_priv.h @@ -0,0 +1,344 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_CH7006_PRIV_H__ +#define __DRM_I2C_CH7006_PRIV_H__ + +#include "drmP.h" +#include "drm_crtc_helper.h" +#include "drm_encoder_slave.h" +#include "i2c/ch7006.h" + +typedef int64_t fixed; +#define fixed1 (1LL << 32) + +enum ch7006_tv_norm { + TV_NORM_PAL, + TV_NORM_PAL_M, + TV_NORM_PAL_N, + TV_NORM_PAL_NC, + TV_NORM_PAL_60, + TV_NORM_NTSC_M, + TV_NORM_NTSC_J, + NUM_TV_NORMS +}; + +struct ch7006_tv_norm_info { + fixed vrefresh; + int vdisplay; + int vtotal; + int hvirtual; + + fixed subc_freq; + fixed black_level; + + uint32_t dispmode; + int voffset; +}; + +struct ch7006_mode { + struct drm_display_mode mode; + + int enc_hdisp; + int enc_vdisp; + + fixed subc_coeff; + uint32_t dispmode; + + uint32_t valid_scales; + uint32_t valid_norms; +}; + +struct ch7006_state { + uint8_t regs[0x26]; +}; + +struct ch7006_priv { + struct ch7006_encoder_params *params; + struct ch7006_mode *mode; + + struct ch7006_state state; + struct ch7006_state saved_state; + + struct drm_property *scale_property; + + int select_subconnector; + int subconnector; + int hmargin; + int vmargin; + enum ch7006_tv_norm norm; + int brightness; + int contrast; + int flicker; + int scale; + + int last_dpms; +}; + +#define to_ch7006_priv(x) \ + ((struct ch7006_priv *)to_encoder_slave(x)->slave_priv) + +extern int ch7006_debug; +extern char *ch7006_tv_norm; +extern int ch7006_scale; + +extern char *ch7006_tv_norm_names[]; +extern struct ch7006_tv_norm_info ch7006_tv_norms[]; +extern struct ch7006_mode ch7006_modes[]; + +struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, + struct drm_display_mode *drm_mode); + +void ch7006_setup_levels(struct drm_encoder *encoder); +void ch7006_setup_subcarrier(struct drm_encoder *encoder); +void ch7006_setup_pll(struct drm_encoder *encoder); +void ch7006_setup_power_state(struct drm_encoder *encoder); +void ch7006_setup_properties(struct drm_encoder *encoder); + +void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val); +uint8_t ch7006_read(struct i2c_client *client, uint8_t addr); + +void ch7006_state_load(struct i2c_client *client, + struct ch7006_state *state); +void ch7006_state_save(struct i2c_client *client, + struct ch7006_state *state); + +/* Some helper macros */ + +#define ch7006_dbg(client, format, ...) do { \ + if (ch7006_debug) \ + dev_printk(KERN_DEBUG, &client->dev, \ + "%s: " format, __func__, ## __VA_ARGS__); \ + } while (0) +#define ch7006_info(client, format, ...) \ + dev_info(&client->dev, format, __VA_ARGS__) +#define ch7006_err(client, format, ...) \ + dev_err(&client->dev, format, __VA_ARGS__) + +#define __mask(src, bitfield) \ + (((2 << (1 ? bitfield)) - 1) & ~((1 << (0 ? bitfield)) - 1)) +#define mask(bitfield) __mask(bitfield) + +#define __bitf(src, bitfield, x) \ + (((x) >> (src) << (0 ? bitfield)) & __mask(src, bitfield)) +#define bitf(bitfield, x) __bitf(bitfield, x) +#define bitfs(bitfield, s) __bitf(bitfield, bitfield##_##s) +#define setbitf(state, reg, bitfield, x) \ + state->regs[reg] = (state->regs[reg] & ~mask(reg##_##bitfield)) \ + | bitf(reg##_##bitfield, x) + +#define __unbitf(src, bitfield, x) \ + ((x & __mask(src, bitfield)) >> (0 ? bitfield) << (src)) +#define unbitf(bitfield, x) __unbitf(bitfield, x) + +static inline int interpolate(int y0, int y1, int y2, int x) +{ + return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50; +} + +static inline int32_t round_fixed(fixed x) +{ + return (x + fixed1/2) >> 32; +} + +#define ch7006_load_reg(client, state, reg) ch7006_write(client, reg, state->regs[reg]) +#define ch7006_save_reg(client, state, reg) state->regs[reg] = ch7006_read(client, reg) + +/* Fixed hardware specs */ + +#define CH7006_FREQ0 14318 +#define CH7006_MAXN 650 +#define CH7006_MAXM 315 + +/* Register definitions */ + +#define CH7006_DISPMODE 0x00 +#define CH7006_DISPMODE_INPUT_RES 0, 7:5 +#define CH7006_DISPMODE_INPUT_RES_512x384 0x0 +#define CH7006_DISPMODE_INPUT_RES_720x400 0x1 +#define CH7006_DISPMODE_INPUT_RES_640x400 0x2 +#define CH7006_DISPMODE_INPUT_RES_640x480 0x3 +#define CH7006_DISPMODE_INPUT_RES_800x600 0x4 +#define CH7006_DISPMODE_INPUT_RES_NATIVE 0x5 +#define CH7006_DISPMODE_OUTPUT_STD 0, 4:3 +#define CH7006_DISPMODE_OUTPUT_STD_PAL 0x0 +#define CH7006_DISPMODE_OUTPUT_STD_NTSC 0x1 +#define CH7006_DISPMODE_OUTPUT_STD_PAL_M 0x2 +#define CH7006_DISPMODE_OUTPUT_STD_NTSC_J 0x3 +#define CH7006_DISPMODE_SCALING_RATIO 0, 2:0 +#define CH7006_DISPMODE_SCALING_RATIO_5_4 0x0 +#define CH7006_DISPMODE_SCALING_RATIO_1_1 0x1 +#define CH7006_DISPMODE_SCALING_RATIO_7_8 0x2 +#define CH7006_DISPMODE_SCALING_RATIO_5_6 0x3 +#define CH7006_DISPMODE_SCALING_RATIO_3_4 0x4 +#define CH7006_DISPMODE_SCALING_RATIO_7_10 0x5 + +#define CH7006_FFILTER 0x01 +#define CH7006_FFILTER_TEXT 0, 5:4 +#define CH7006_FFILTER_LUMA 0, 3:2 +#define CH7006_FFILTER_CHROMA 0, 1:0 +#define CH7006_FFILTER_CHROMA_NO_DCRAWL 0x3 + +#define CH7006_BWIDTH 0x03 +#define CH7006_BWIDTH_5L_FFILER (1 << 7) +#define CH7006_BWIDTH_CVBS_NO_CHROMA (1 << 6) +#define CH7006_BWIDTH_CHROMA 0, 5:4 +#define CH7006_BWIDTH_SVIDEO_YPEAK (1 << 3) +#define CH7006_BWIDTH_SVIDEO_LUMA 0, 2:1 +#define CH7006_BWIDTH_CVBS_LUMA 0, 0:0 + +#define CH7006_INPUT_FORMAT 0x04 +#define CH7006_INPUT_FORMAT_DAC_GAIN (1 << 6) +#define CH7006_INPUT_FORMAT_RGB_PASS_THROUGH (1 << 5) +#define CH7006_INPUT_FORMAT_FORMAT 0, 3:0 +#define CH7006_INPUT_FORMAT_FORMAT_RGB16 0x0 +#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m16 0x1 +#define CH7006_INPUT_FORMAT_FORMAT_RGB24m16 0x2 +#define CH7006_INPUT_FORMAT_FORMAT_RGB15 0x3 +#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12C 0x4 +#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12I 0x5 +#define CH7006_INPUT_FORMAT_FORMAT_RGB24m8 0x6 +#define CH7006_INPUT_FORMAT_FORMAT_RGB16m8 0x7 +#define CH7006_INPUT_FORMAT_FORMAT_RGB15m8 0x8 +#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m8 0x9 + +#define CH7006_CLKMODE 0x06 +#define CH7006_CLKMODE_SUBC_LOCK (1 << 7) +#define CH7006_CLKMODE_MASTER (1 << 6) +#define CH7006_CLKMODE_POS_EDGE (1 << 4) +#define CH7006_CLKMODE_XCM 0, 3:2 +#define CH7006_CLKMODE_PCM 0, 1:0 + +#define CH7006_START_ACTIVE 0x07 +#define CH7006_START_ACTIVE_0 0, 7:0 + +#define CH7006_POV 0x08 +#define CH7006_POV_START_ACTIVE_8 8, 2:2 +#define CH7006_POV_HPOS_8 8, 1:1 +#define CH7006_POV_VPOS_8 8, 0:0 + +#define CH7006_BLACK_LEVEL 0x09 +#define CH7006_BLACK_LEVEL_0 0, 7:0 + +#define CH7006_HPOS 0x0a +#define CH7006_HPOS_0 0, 7:0 + +#define CH7006_VPOS 0x0b +#define CH7006_VPOS_0 0, 7:0 + +#define CH7006_INPUT_SYNC 0x0d +#define CH7006_INPUT_SYNC_EMBEDDED (1 << 3) +#define CH7006_INPUT_SYNC_OUTPUT (1 << 2) +#define CH7006_INPUT_SYNC_PVSYNC (1 << 1) +#define CH7006_INPUT_SYNC_PHSYNC (1 << 0) + +#define CH7006_POWER 0x0e +#define CH7006_POWER_SCART (1 << 4) +#define CH7006_POWER_RESET (1 << 3) +#define CH7006_POWER_LEVEL 0, 2:0 +#define CH7006_POWER_LEVEL_CVBS_OFF 0x0 +#define CH7006_POWER_LEVEL_POWER_OFF 0x1 +#define CH7006_POWER_LEVEL_SVIDEO_OFF 0x2 +#define CH7006_POWER_LEVEL_NORMAL 0x3 +#define CH7006_POWER_LEVEL_FULL_POWER_OFF 0x4 + +#define CH7006_DETECT 0x10 +#define CH7006_DETECT_SVIDEO_Y_TEST (1 << 3) +#define CH7006_DETECT_SVIDEO_C_TEST (1 << 2) +#define CH7006_DETECT_CVBS_TEST (1 << 1) +#define CH7006_DETECT_SENSE (1 << 0) + +#define CH7006_CONTRAST 0x11 +#define CH7006_CONTRAST_0 0, 2:0 + +#define CH7006_PLLOV 0x13 +#define CH7006_PLLOV_N_8 8, 2:1 +#define CH7006_PLLOV_M_8 8, 0:0 + +#define CH7006_PLLM 0x14 +#define CH7006_PLLM_0 0, 7:0 + +#define CH7006_PLLN 0x15 +#define CH7006_PLLN_0 0, 7:0 + +#define CH7006_BCLKOUT 0x17 + +#define CH7006_SUBC_INC0 0x18 +#define CH7006_SUBC_INC0_28 28, 3:0 + +#define CH7006_SUBC_INC1 0x19 +#define CH7006_SUBC_INC1_24 24, 3:0 + +#define CH7006_SUBC_INC2 0x1a +#define CH7006_SUBC_INC2_20 20, 3:0 + +#define CH7006_SUBC_INC3 0x1b +#define CH7006_SUBC_INC3_GPIO1_VAL (1 << 7) +#define CH7006_SUBC_INC3_GPIO0_VAL (1 << 6) +#define CH7006_SUBC_INC3_POUT_3_3V (1 << 5) +#define CH7006_SUBC_INC3_POUT_INV (1 << 4) +#define CH7006_SUBC_INC3_16 16, 3:0 + +#define CH7006_SUBC_INC4 0x1c +#define CH7006_SUBC_INC4_GPIO1_IN (1 << 7) +#define CH7006_SUBC_INC4_GPIO0_IN (1 << 6) +#define CH7006_SUBC_INC4_DS_INPUT (1 << 4) +#define CH7006_SUBC_INC4_12 12, 3:0 + +#define CH7006_SUBC_INC5 0x1d +#define CH7006_SUBC_INC5_8 8, 3:0 + +#define CH7006_SUBC_INC6 0x1e +#define CH7006_SUBC_INC6_4 4, 3:0 + +#define CH7006_SUBC_INC7 0x1f +#define CH7006_SUBC_INC7_0 0, 3:0 + +#define CH7006_PLL_CONTROL 0x20 +#define CH7006_PLL_CONTROL_CPI (1 << 5) +#define CH7006_PLL_CONTROL_CAPACITOR (1 << 4) +#define CH7006_PLL_CONTROL_7STAGES (1 << 3) +#define CH7006_PLL_CONTROL_DIGITAL_5V (1 << 2) +#define CH7006_PLL_CONTROL_ANALOG_5V (1 << 1) +#define CH7006_PLL_CONTROL_MEMORY_5V (1 << 0) + +#define CH7006_CALC_SUBC_INC0 0x21 +#define CH7006_CALC_SUBC_INC0_24 24, 4:3 +#define CH7006_CALC_SUBC_INC0_HYST 0, 2:1 +#define CH7006_CALC_SUBC_INC0_AUTO (1 << 0) + +#define CH7006_CALC_SUBC_INC1 0x22 +#define CH7006_CALC_SUBC_INC1_16 16, 7:0 + +#define CH7006_CALC_SUBC_INC2 0x23 +#define CH7006_CALC_SUBC_INC2_8 8, 7:0 + +#define CH7006_CALC_SUBC_INC3 0x24 +#define CH7006_CALC_SUBC_INC3_0 0, 7:0 + +#define CH7006_VERSION_ID 0x25 + +#endif diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig new file mode 100644 index 000000000000..d823e6319516 --- /dev/null +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -0,0 +1,44 @@ +config DRM_NOUVEAU + tristate "Nouveau (nVidia) cards" + depends on DRM + select FW_LOADER + select DRM_KMS_HELPER + select DRM_TTM + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select FB + select FRAMEBUFFER_CONSOLE if !EMBEDDED + select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT + help + Choose this option for open-source nVidia support. + +config DRM_NOUVEAU_BACKLIGHT + bool "Support for backlight control" + depends on DRM_NOUVEAU + default y + help + Say Y here if you want to control the backlight of your display + (e.g. a laptop panel). + +config DRM_NOUVEAU_DEBUG + bool "Build in Nouveau's debugfs support" + depends on DRM_NOUVEAU && DEBUG_FS + default y + help + Say Y here if you want Nouveau to output debugging information + via debugfs. + +menu "I2C encoder or helper chips" + depends on DRM + +config DRM_I2C_CH7006 + tristate "Chrontel ch7006 TV encoder" + default m if DRM_NOUVEAU + help + Support for Chrontel ch7006 and similar TV encoders, found + on some nVidia video cards. + + This driver is currently only useful if you're also using + the nouveau driver. +endmenu diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile new file mode 100644 index 000000000000..1d90d4d0144f --- /dev/null +++ b/drivers/gpu/drm/nouveau/Makefile @@ -0,0 +1,31 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Iinclude/drm +nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_sgdma.o nouveau_dma.o \ + nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ + nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ + nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ + nouveau_dp.o \ + nv04_timer.o \ + nv04_mc.o nv40_mc.o nv50_mc.o \ + nv04_fb.o nv10_fb.o nv40_fb.o \ + nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ + nv04_graph.o nv10_graph.o nv20_graph.o \ + nv40_graph.o nv50_graph.o \ + nv04_instmem.o nv50_instmem.o \ + nv50_crtc.o nv50_dac.o nv50_sor.o \ + nv50_cursor.o nv50_display.o nv50_fbcon.o \ + nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ + nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ + nv17_gpio.o + +nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o +nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o +nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o +nouveau-$(CONFIG_ACPI) += nouveau_acpi.o + +obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c new file mode 100644 index 000000000000..1cf488247a16 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -0,0 +1,125 @@ +#include +#include +#include +#include + +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "drm_crtc_helper.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" +#include "nv50_display.h" + +#define NOUVEAU_DSM_SUPPORTED 0x00 +#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00 + +#define NOUVEAU_DSM_ACTIVE 0x01 +#define NOUVEAU_DSM_ACTIVE_QUERY 0x00 + +#define NOUVEAU_DSM_LED 0x02 +#define NOUVEAU_DSM_LED_STATE 0x00 +#define NOUVEAU_DSM_LED_OFF 0x10 +#define NOUVEAU_DSM_LED_STAMINA 0x11 +#define NOUVEAU_DSM_LED_SPEED 0x12 + +#define NOUVEAU_DSM_POWER 0x03 +#define NOUVEAU_DSM_POWER_STATE 0x00 +#define NOUVEAU_DSM_POWER_SPEED 0x01 +#define NOUVEAU_DSM_POWER_STAMINA 0x02 + +static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) +{ + static char muid[] = { + 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, + }; + + struct pci_dev *pdev = dev->pdev; + struct acpi_handle *handle; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *obj; + int err; + + handle = DEVICE_ACPI_HANDLE(&pdev->dev); + + if (!handle) + return -ENODEV; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(muid); + params[0].buffer.pointer = (char *)muid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = 0x00000102; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = func; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = arg; + + err = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (err) { + NV_INFO(dev, "failed to evaluate _DSM: %d\n", err); + return err; + } + + obj = (union acpi_object *)output.pointer; + + if (obj->type == ACPI_TYPE_INTEGER) + if (obj->integer.value == 0x80000002) + return -ENODEV; + + if (obj->type == ACPI_TYPE_BUFFER) { + if (obj->buffer.length == 4 && result) { + *result = 0; + *result |= obj->buffer.pointer[0]; + *result |= (obj->buffer.pointer[1] << 8); + *result |= (obj->buffer.pointer[2] << 16); + *result |= (obj->buffer.pointer[3] << 24); + } + } + + kfree(output.pointer); + return 0; +} + +int nouveau_hybrid_setup(struct drm_device *dev) +{ + int result; + + if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, + &result)) + return -ENODEV; + + NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); + + if (result & 0x1) { /* Stamina mode - disable the external GPU */ + nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, + NULL); + nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, + NULL); + } else { /* Ensure that the external GPU is enabled */ + nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); + nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, + NULL); + } + + return 0; +} + +bool nouveau_dsm_probe(struct drm_device *dev) +{ + int support = 0; + + if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED, + NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support)) + return false; + + if (!support) + return false; + + return true; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c new file mode 100644 index 000000000000..20564f8cb0ec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2009 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Matthew Garrett + * + * Register locations derived from NVClock by Roderick Colenbrander + */ + +#include + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" +#include "nouveau_reg.h" + +static int nv40_get_intensity(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) + >> 16; + + return val; +} + +static int nv40_set_intensity(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + int val = bd->props.brightness; + int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT); + + nv_wr32(dev, NV40_PMC_BACKLIGHT, + (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); + + return 0; +} + +static struct backlight_ops nv40_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = nv40_get_intensity, + .update_status = nv40_set_intensity, +}; + +static int nv50_get_intensity(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + + return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT); +} + +static int nv50_set_intensity(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + int val = bd->props.brightness; + + nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT, + val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE); + return 0; +} + +static struct backlight_ops nv50_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = nv50_get_intensity, + .update_status = nv50_set_intensity, +}; + +static int nouveau_nv40_backlight_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct backlight_device *bd; + + if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) + return 0; + + bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, + &nv40_bl_ops); + if (IS_ERR(bd)) + return PTR_ERR(bd); + + dev_priv->backlight = bd; + bd->props.max_brightness = 31; + bd->props.brightness = nv40_get_intensity(bd); + backlight_update_status(bd); + + return 0; +} + +static int nouveau_nv50_backlight_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct backlight_device *bd; + + if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT)) + return 0; + + bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, + &nv50_bl_ops); + if (IS_ERR(bd)) + return PTR_ERR(bd); + + dev_priv->backlight = bd; + bd->props.max_brightness = 1025; + bd->props.brightness = nv50_get_intensity(bd); + backlight_update_status(bd); + return 0; +} + +int nouveau_backlight_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (dev_priv->card_type) { + case NV_40: + return nouveau_nv40_backlight_init(dev); + case NV_50: + return nouveau_nv50_backlight_init(dev); + default: + break; + } + + return 0; +} + +void nouveau_backlight_exit(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->backlight) { + backlight_device_unregister(dev_priv->backlight); + dev_priv->backlight = NULL; + } +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c new file mode 100644 index 000000000000..5eec5ed69489 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -0,0 +1,6095 @@ +/* + * Copyright 2005-2006 Erik Waling + * Copyright 2006 Stephane Marchesin + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "drmP.h" +#define NV_DEBUG_NOTRACE +#include "nouveau_drv.h" +#include "nouveau_hw.h" + +/* these defines are made up */ +#define NV_CIO_CRE_44_HEADA 0x0 +#define NV_CIO_CRE_44_HEADB 0x3 +#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */ +#define LEGACY_I2C_CRT 0x80 +#define LEGACY_I2C_PANEL 0x81 +#define LEGACY_I2C_TV 0x82 + +#define EDID1_LEN 128 + +#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) +#define LOG_OLD_VALUE(x) + +#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x)) +#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x)) + +struct init_exec { + bool execute; + bool repeat; +}; + +static bool nv_cksum(const uint8_t *data, unsigned int length) +{ + /* + * There's a few checksums in the BIOS, so here's a generic checking + * function. + */ + int i; + uint8_t sum = 0; + + for (i = 0; i < length; i++) + sum += data[i]; + + if (sum) + return true; + + return false; +} + +static int +score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable) +{ + if (!(data[0] == 0x55 && data[1] == 0xAA)) { + NV_TRACEWARN(dev, "... BIOS signature not found\n"); + return 0; + } + + if (nv_cksum(data, data[2] * 512)) { + NV_TRACEWARN(dev, "... BIOS checksum invalid\n"); + /* if a ro image is somewhat bad, it's probably all rubbish */ + return writeable ? 2 : 1; + } else + NV_TRACE(dev, "... appears to be valid\n"); + + return 3; +} + +static void load_vbios_prom(struct drm_device *dev, uint8_t *data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t pci_nv_20, save_pci_nv_20; + int pcir_ptr; + int i; + + if (dev_priv->card_type >= NV_50) + pci_nv_20 = 0x88050; + else + pci_nv_20 = NV_PBUS_PCI_NV_20; + + /* enable ROM access */ + save_pci_nv_20 = nvReadMC(dev, pci_nv_20); + nvWriteMC(dev, pci_nv_20, + save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); + + /* bail if no rom signature */ + if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 || + nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa) + goto out; + + /* additional check (see note below) - read PCI record header */ + pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) | + nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8; + if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' || + nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' || + nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' || + nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R') + goto out; + + /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a + * a good read may be obtained by waiting or re-reading (cargocult: 5x) + * each byte. we'll hope pramin has something usable instead + */ + for (i = 0; i < NV_PROM_SIZE; i++) + data[i] = nv_rd08(dev, NV_PROM_OFFSET + i); + +out: + /* disable ROM access */ + nvWriteMC(dev, pci_nv_20, + save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); +} + +static void load_vbios_pramin(struct drm_device *dev, uint8_t *data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t old_bar0_pramin = 0; + int i; + + if (dev_priv->card_type >= NV_50) { + uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8; + + if (!vbios_vram) + vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000; + + old_bar0_pramin = nv_rd32(dev, 0x1700); + nv_wr32(dev, 0x1700, vbios_vram >> 16); + } + + /* bail if no rom signature */ + if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 || + nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa) + goto out; + + for (i = 0; i < NV_PROM_SIZE; i++) + data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i); + +out: + if (dev_priv->card_type >= NV_50) + nv_wr32(dev, 0x1700, old_bar0_pramin); +} + +static void load_vbios_pci(struct drm_device *dev, uint8_t *data) +{ + void __iomem *rom = NULL; + size_t rom_len; + int ret; + + ret = pci_enable_rom(dev->pdev); + if (ret) + return; + + rom = pci_map_rom(dev->pdev, &rom_len); + if (!rom) + goto out; + memcpy_fromio(data, rom, rom_len); + pci_unmap_rom(dev->pdev, rom); + +out: + pci_disable_rom(dev->pdev); +} + +struct methods { + const char desc[8]; + void (*loadbios)(struct drm_device *, uint8_t *); + const bool rw; + int score; +}; + +static struct methods nv04_methods[] = { + { "PROM", load_vbios_prom, false }, + { "PRAMIN", load_vbios_pramin, true }, + { "PCIROM", load_vbios_pci, true }, + { } +}; + +static struct methods nv50_methods[] = { + { "PRAMIN", load_vbios_pramin, true }, + { "PROM", load_vbios_prom, false }, + { "PCIROM", load_vbios_pci, true }, + { } +}; + +static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct methods *methods, *method; + int testscore = 3; + + if (nouveau_vbios) { + method = nv04_methods; + while (method->loadbios) { + if (!strcasecmp(nouveau_vbios, method->desc)) + break; + method++; + } + + if (method->loadbios) { + NV_INFO(dev, "Attempting to use BIOS image from %s\n", + method->desc); + + method->loadbios(dev, data); + if (score_vbios(dev, data, method->rw)) + return true; + } + + NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); + } + + if (dev_priv->card_type < NV_50) + methods = nv04_methods; + else + methods = nv50_methods; + + method = methods; + while (method->loadbios) { + NV_TRACE(dev, "Attempting to load BIOS image from %s\n", + method->desc); + data[0] = data[1] = 0; /* avoid reuse of previous image */ + method->loadbios(dev, data); + method->score = score_vbios(dev, data, method->rw); + if (method->score == testscore) + return true; + method++; + } + + while (--testscore > 0) { + method = methods; + while (method->loadbios) { + if (method->score == testscore) { + NV_TRACE(dev, "Using BIOS image from %s\n", + method->desc); + method->loadbios(dev, data); + return true; + } + method++; + } + } + + NV_ERROR(dev, "No valid BIOS image found\n"); + return false; +} + +struct init_tbl_entry { + char *name; + uint8_t id; + int length; + int length_offset; + int length_multiplier; + bool (*handler)(struct nvbios *, uint16_t, struct init_exec *); +}; + +struct bit_entry { + uint8_t id[2]; + uint16_t length; + uint16_t offset; +}; + +static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); + +#define MACRO_INDEX_SIZE 2 +#define MACRO_SIZE 8 +#define CONDITION_SIZE 12 +#define IO_FLAG_CONDITION_SIZE 9 +#define IO_CONDITION_SIZE 5 +#define MEM_INIT_SIZE 66 + +static void still_alive(void) +{ +#if 0 + sync(); + msleep(2); +#endif +} + +static uint32_t +munge_reg(struct nvbios *bios, uint32_t reg) +{ + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + struct dcb_entry *dcbent = bios->display.output; + + if (dev_priv->card_type < NV_50) + return reg; + + if (reg & 0x40000000) { + BUG_ON(!dcbent); + + reg += (ffs(dcbent->or) - 1) * 0x800; + if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1)) + reg += 0x00000080; + } + + reg &= ~0x60000000; + return reg; +} + +static int +valid_reg(struct nvbios *bios, uint32_t reg) +{ + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + struct drm_device *dev = bios->dev; + + /* C51 has misaligned regs on purpose. Marvellous */ + if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { + NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", + reg); + return 0; + } + /* + * Warn on C51 regs that have not been verified accessible in + * mmiotracing + */ + if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && + reg != 0x130d && reg != 0x1311 && reg != 0x60081d) + NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", + reg); + + /* Trust the init scripts on G80 */ + if (dev_priv->card_type >= NV_50) + return 1; + + #define WITHIN(x, y, z) ((x >= y) && (x < y + z)) + if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE)) + return 1; + if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE)) + return 1; + if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE)) + return 1; + if (dev_priv->VBIOS.pub.chip_version >= 0x30 && + (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600)) + return 1; + if (dev_priv->VBIOS.pub.chip_version >= 0x40 && + WITHIN(reg, 0xc000, 0x48)) + return 1; + if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204) + return 1; + if (dev_priv->VBIOS.pub.chip_version >= 0x40) { + if (reg == 0x00011014 || reg == 0x00020328) + return 1; + if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */ + return 1; + } + if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE)) + return 1; + if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE)) + return 1; + if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2)) + return 1; + if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2)) + return 1; + if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0) + return 1; + if (dev_priv->VBIOS.pub.chip_version == 0x51 && + WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE)) + return 1; + #undef WITHIN + + NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg); + + return 0; +} + +static bool +valid_idx_port(struct nvbios *bios, uint16_t port) +{ + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + struct drm_device *dev = bios->dev; + + /* + * If adding more ports here, the read/write functions below will need + * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is + * used for the port in question + */ + if (dev_priv->card_type < NV_50) { + if (port == NV_CIO_CRX__COLOR) + return true; + if (port == NV_VIO_SRX) + return true; + } else { + if (port == NV_CIO_CRX__COLOR) + return true; + } + + NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n", + port); + + return false; +} + +static bool +valid_port(struct nvbios *bios, uint16_t port) +{ + struct drm_device *dev = bios->dev; + + /* + * If adding more ports here, the read/write functions below will need + * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is + * used for the port in question + */ + if (port == NV_VIO_VSE2) + return true; + + NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port); + + return false; +} + +static uint32_t +bios_rd32(struct nvbios *bios, uint32_t reg) +{ + uint32_t data; + + reg = munge_reg(bios, reg); + if (!valid_reg(bios, reg)) + return 0; + + /* + * C51 sometimes uses regs with bit0 set in the address. For these + * cases there should exist a translation in a BIOS table to an IO + * port address which the BIOS uses for accessing the reg + * + * These only seem to appear for the power control regs to a flat panel, + * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs + * for 0x1308 and 0x1310 are used - hence the mask below. An S3 + * suspend-resume mmio trace from a C51 will be required to see if this + * is true for the power microcode in 0x14.., or whether the direct IO + * port access method is needed + */ + if (reg & 0x1) + reg &= ~0x1; + + data = nv_rd32(bios->dev, reg); + + BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data); + + return data; +} + +static void +bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data) +{ + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + + reg = munge_reg(bios, reg); + if (!valid_reg(bios, reg)) + return; + + /* see note in bios_rd32 */ + if (reg & 0x1) + reg &= 0xfffffffe; + + LOG_OLD_VALUE(bios_rd32(bios, reg)); + BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data); + + if (dev_priv->VBIOS.execute) { + still_alive(); + nv_wr32(bios->dev, reg, data); + } +} + +static uint8_t +bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index) +{ + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + struct drm_device *dev = bios->dev; + uint8_t data; + + if (!valid_idx_port(bios, port)) + return 0; + + if (dev_priv->card_type < NV_50) { + if (port == NV_VIO_SRX) + data = NVReadVgaSeq(dev, bios->state.crtchead, index); + else /* assume NV_CIO_CRX__COLOR */ + data = NVReadVgaCrtc(dev, bios->state.crtchead, index); + } else { + uint32_t data32; + + data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3)); + data = (data32 >> ((index & 3) << 3)) & 0xff; + } + + BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, " + "Head: 0x%02X, Data: 0x%02X\n", + port, index, bios->state.crtchead, data); + return data; +} + +static void +bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data) +{ + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + struct drm_device *dev = bios->dev; + + if (!valid_idx_port(bios, port)) + return; + + /* + * The current head is maintained in the nvbios member state.crtchead. + * We trap changes to CR44 and update the head variable and hence the + * register set written. + * As CR44 only exists on CRTC0, we update crtchead to head0 in advance + * of the write, and to head1 after the write + */ + if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 && + data != NV_CIO_CRE_44_HEADB) + bios->state.crtchead = 0; + + LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index)); + BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, " + "Head: 0x%02X, Data: 0x%02X\n", + port, index, bios->state.crtchead, data); + + if (bios->execute && dev_priv->card_type < NV_50) { + still_alive(); + if (port == NV_VIO_SRX) + NVWriteVgaSeq(dev, bios->state.crtchead, index, data); + else /* assume NV_CIO_CRX__COLOR */ + NVWriteVgaCrtc(dev, bios->state.crtchead, index, data); + } else + if (bios->execute) { + uint32_t data32, shift = (index & 3) << 3; + + still_alive(); + + data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3)); + data32 &= ~(0xff << shift); + data32 |= (data << shift); + bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32); + } + + if (port == NV_CIO_CRX__COLOR && + index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB) + bios->state.crtchead = 1; +} + +static uint8_t +bios_port_rd(struct nvbios *bios, uint16_t port) +{ + uint8_t data, head = bios->state.crtchead; + + if (!valid_port(bios, port)) + return 0; + + data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port); + + BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n", + port, head, data); + + return data; +} + +static void +bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data) +{ + int head = bios->state.crtchead; + + if (!valid_port(bios, port)) + return; + + LOG_OLD_VALUE(bios_port_rd(bios, port)); + BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n", + port, head, data); + + if (!bios->execute) + return; + + still_alive(); + NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data); +} + +static bool +io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond) +{ + /* + * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte + * for the CRTC index; 1 byte for the mask to apply to the value + * retrieved from the CRTC; 1 byte for the shift right to apply to the + * masked CRTC value; 2 bytes for the offset to the flag array, to + * which the shifted value is added; 1 byte for the mask applied to the + * value read from the flag array; and 1 byte for the value to compare + * against the masked byte from the flag table. + */ + + uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE; + uint16_t crtcport = ROM16(bios->data[condptr]); + uint8_t crtcindex = bios->data[condptr + 2]; + uint8_t mask = bios->data[condptr + 3]; + uint8_t shift = bios->data[condptr + 4]; + uint16_t flagarray = ROM16(bios->data[condptr + 5]); + uint8_t flagarraymask = bios->data[condptr + 7]; + uint8_t cmpval = bios->data[condptr + 8]; + uint8_t data; + + BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " + "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, " + "Cmpval: 0x%02X\n", + offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval); + + data = bios_idxprt_rd(bios, crtcport, crtcindex); + + data = bios->data[flagarray + ((data & mask) >> shift)]; + data &= flagarraymask; + + BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n", + offset, data, cmpval); + + return (data == cmpval); +} + +static bool +bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond) +{ + /* + * The condition table entry has 4 bytes for the address of the + * register to check, 4 bytes for a mask to apply to the register and + * 4 for a test comparison value + */ + + uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE; + uint32_t reg = ROM32(bios->data[condptr]); + uint32_t mask = ROM32(bios->data[condptr + 4]); + uint32_t cmpval = ROM32(bios->data[condptr + 8]); + uint32_t data; + + BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n", + offset, cond, reg, mask); + + data = bios_rd32(bios, reg) & mask; + + BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n", + offset, data, cmpval); + + return (data == cmpval); +} + +static bool +io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond) +{ + /* + * The IO condition entry has 2 bytes for the IO port address; 1 byte + * for the index to write to io_port; 1 byte for the mask to apply to + * the byte read from io_port+1; and 1 byte for the value to compare + * against the masked byte. + */ + + uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE; + uint16_t io_port = ROM16(bios->data[condptr]); + uint8_t port_index = bios->data[condptr + 2]; + uint8_t mask = bios->data[condptr + 3]; + uint8_t cmpval = bios->data[condptr + 4]; + + uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask; + + BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n", + offset, data, cmpval); + + return (data == cmpval); +} + +static int +nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t reg0 = nv_rd32(dev, reg + 0); + uint32_t reg1 = nv_rd32(dev, reg + 4); + struct nouveau_pll_vals pll; + struct pll_lims pll_limits; + int ret; + + ret = get_pll_limits(dev, reg, &pll_limits); + if (ret) + return ret; + + clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll); + if (!clk) + return -ERANGE; + + reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); + reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; + + if (dev_priv->VBIOS.execute) { + still_alive(); + nv_wr32(dev, reg + 4, reg1); + nv_wr32(dev, reg + 0, reg0); + } + + return 0; +} + +static int +setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk) +{ + struct drm_device *dev = bios->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + /* clk in kHz */ + struct pll_lims pll_lim; + struct nouveau_pll_vals pllvals; + int ret; + + if (dev_priv->card_type >= NV_50) + return nv50_pll_set(dev, reg, clk); + + /* high regs (such as in the mac g5 table) are not -= 4 */ + ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim); + if (ret) + return ret; + + clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals); + if (!clk) + return -ERANGE; + + if (bios->execute) { + still_alive(); + nouveau_hw_setpll(dev, reg, &pllvals); + } + + return 0; +} + +static int dcb_entry_idx_from_crtchead(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + + /* + * For the results of this function to be correct, CR44 must have been + * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0, + * and the DCB table parsed, before the script calling the function is + * run. run_digital_op_script is example of how to do such setup + */ + + uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0); + + if (dcb_entry > bios->bdcb.dcb.entries) { + NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently " + "(%02X)\n", dcb_entry); + dcb_entry = 0x7f; /* unused / invalid marker */ + } + + return dcb_entry; +} + +static struct nouveau_i2c_chan * +init_i2c_device_find(struct drm_device *dev, int i2c_index) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb; + + if (i2c_index == 0xff) { + /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ + int idx = dcb_entry_idx_from_crtchead(dev), shift = 0; + int default_indices = bdcb->i2c_default_indices; + + if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default) + shift = 4; + + i2c_index = (default_indices >> shift) & 0xf; + } + if (i2c_index == 0x80) /* g80+ */ + i2c_index = bdcb->i2c_default_indices & 0xf; + + return nouveau_i2c_find(dev, i2c_index); +} + +static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) +{ + /* + * For mlv < 0x80, it is an index into a table of TMDS base addresses. + * For mlv == 0x80 use the "or" value of the dcb_entry indexed by + * CR58 for CR57 = 0 to index a table of offsets to the basic + * 0x6808b0 address. + * For mlv == 0x81 use the "or" value of the dcb_entry indexed by + * CR58 for CR57 = 0 to index a table of offsets to the basic + * 0x6808b0 address, and then flip the offset by 8. + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; + const int pramdac_offset[13] = { + 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 }; + const uint32_t pramdac_table[4] = { + 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 }; + + if (mlv >= 0x80) { + int dcb_entry, dacoffset; + + /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ + dcb_entry = dcb_entry_idx_from_crtchead(dev); + if (dcb_entry == 0x7f) + return 0; + dacoffset = pramdac_offset[ + dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or]; + if (mlv == 0x81) + dacoffset ^= 8; + return 0x6808b0 + dacoffset; + } else { + if (mlv > ARRAY_SIZE(pramdac_table)) { + NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n", + mlv); + return 0; + } + return pramdac_table[mlv]; + } +} + +static bool +init_io_restrict_prog(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): CRTC port + * offset + 3 (8 bit): CRTC index + * offset + 4 (8 bit): mask + * offset + 5 (8 bit): shift + * offset + 6 (8 bit): count + * offset + 7 (32 bit): register + * offset + 11 (32 bit): configuration 1 + * ... + * + * Starting at offset + 11 there are "count" 32 bit values. + * To find out which value to use read index "CRTC index" on "CRTC + * port", AND this value with "mask" and then bit shift right "shift" + * bits. Read the appropriate value using this index and write to + * "register" + */ + + uint16_t crtcport = ROM16(bios->data[offset + 1]); + uint8_t crtcindex = bios->data[offset + 3]; + uint8_t mask = bios->data[offset + 4]; + uint8_t shift = bios->data[offset + 5]; + uint8_t count = bios->data[offset + 6]; + uint32_t reg = ROM32(bios->data[offset + 7]); + uint8_t config; + uint32_t configval; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " + "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", + offset, crtcport, crtcindex, mask, shift, count, reg); + + config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift; + if (config > count) { + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); + return false; + } + + configval = ROM32(bios->data[offset + 11 + config * 4]); + + BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config); + + bios_wr32(bios, reg, configval); + + return true; +} + +static bool +init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_REPEAT opcode: 0x33 ('3') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): count + * + * Execute script following this opcode up to INIT_REPEAT_END + * "count" times + */ + + uint8_t count = bios->data[offset + 1]; + uint8_t i; + + /* no iexec->execute check by design */ + + BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n", + offset, count); + + iexec->repeat = true; + + /* + * count - 1, as the script block will execute once when we leave this + * opcode -- this is compatible with bios behaviour as: + * a) the block is always executed at least once, even if count == 0 + * b) the bios interpreter skips to the op following INIT_END_REPEAT, + * while we don't + */ + for (i = 0; i < count - 1; i++) + parse_init_table(bios, offset + 2, iexec); + + iexec->repeat = false; + + return true; +} + +static bool +init_io_restrict_pll(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): CRTC port + * offset + 3 (8 bit): CRTC index + * offset + 4 (8 bit): mask + * offset + 5 (8 bit): shift + * offset + 6 (8 bit): IO flag condition index + * offset + 7 (8 bit): count + * offset + 8 (32 bit): register + * offset + 12 (16 bit): frequency 1 + * ... + * + * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz). + * Set PLL register "register" to coefficients for frequency n, + * selected by reading index "CRTC index" of "CRTC port" ANDed with + * "mask" and shifted right by "shift". + * + * If "IO flag condition index" > 0, and condition met, double + * frequency before setting it. + */ + + uint16_t crtcport = ROM16(bios->data[offset + 1]); + uint8_t crtcindex = bios->data[offset + 3]; + uint8_t mask = bios->data[offset + 4]; + uint8_t shift = bios->data[offset + 5]; + int8_t io_flag_condition_idx = bios->data[offset + 6]; + uint8_t count = bios->data[offset + 7]; + uint32_t reg = ROM32(bios->data[offset + 8]); + uint8_t config; + uint16_t freq; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " + "Shift: 0x%02X, IO Flag Condition: 0x%02X, " + "Count: 0x%02X, Reg: 0x%08X\n", + offset, crtcport, crtcindex, mask, shift, + io_flag_condition_idx, count, reg); + + config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift; + if (config > count) { + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); + return false; + } + + freq = ROM16(bios->data[offset + 12 + config * 2]); + + if (io_flag_condition_idx > 0) { + if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) { + BIOSLOG(bios, "0x%04X: Condition fulfilled -- " + "frequency doubled\n", offset); + freq *= 2; + } else + BIOSLOG(bios, "0x%04X: Condition not fulfilled -- " + "frequency unchanged\n", offset); + } + + BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n", + offset, reg, config, freq); + + setPLL(bios, reg, freq * 10); + + return true; +} + +static bool +init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_END_REPEAT opcode: 0x36 ('6') + * + * offset (8 bit): opcode + * + * Marks the end of the block for INIT_REPEAT to repeat + */ + + /* no iexec->execute check by design */ + + /* + * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when + * we're not in repeat mode + */ + if (iexec->repeat) + return false; + + return true; +} + +static bool +init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_COPY opcode: 0x37 ('7') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): register + * offset + 5 (8 bit): shift + * offset + 6 (8 bit): srcmask + * offset + 7 (16 bit): CRTC port + * offset + 9 (8 bit): CRTC index + * offset + 10 (8 bit): mask + * + * Read index "CRTC index" on "CRTC port", AND with "mask", OR with + * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC + * port + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint8_t shift = bios->data[offset + 5]; + uint8_t srcmask = bios->data[offset + 6]; + uint16_t crtcport = ROM16(bios->data[offset + 7]); + uint8_t crtcindex = bios->data[offset + 9]; + uint8_t mask = bios->data[offset + 10]; + uint32_t data; + uint8_t crtcdata; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, " + "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n", + offset, reg, shift, srcmask, crtcport, crtcindex, mask); + + data = bios_rd32(bios, reg); + + if (shift < 0x80) + data >>= shift; + else + data <<= (0x100 - shift); + + data &= srcmask; + + crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask; + crtcdata |= (uint8_t)data; + bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata); + + return true; +} + +static bool +init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_NOT opcode: 0x38 ('8') + * + * offset (8 bit): opcode + * + * Invert the current execute / no-execute condition (i.e. "else") + */ + if (iexec->execute) + BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset); + else + BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset); + + iexec->execute = !iexec->execute; + return true; +} + +static bool +init_io_flag_condition(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): condition number + * + * Check condition "condition number" in the IO flag condition table. + * If condition not met skip subsequent opcodes until condition is + * inverted (INIT_NOT), or we hit INIT_RESUME + */ + + uint8_t cond = bios->data[offset + 1]; + + if (!iexec->execute) + return true; + + if (io_flag_condition_met(bios, offset, cond)) + BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset); + else { + BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset); + iexec->execute = false; + } + + return true; +} + +static bool +init_idx_addr_latched(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): control register + * offset + 5 (32 bit): data register + * offset + 9 (32 bit): mask + * offset + 13 (32 bit): data + * offset + 17 (8 bit): count + * offset + 18 (8 bit): address 1 + * offset + 19 (8 bit): data 1 + * ... + * + * For each of "count" address and data pairs, write "data n" to + * "data register", read the current value of "control register", + * and write it back once ANDed with "mask", ORed with "data", + * and ORed with "address n" + */ + + uint32_t controlreg = ROM32(bios->data[offset + 1]); + uint32_t datareg = ROM32(bios->data[offset + 5]); + uint32_t mask = ROM32(bios->data[offset + 9]); + uint32_t data = ROM32(bios->data[offset + 13]); + uint8_t count = bios->data[offset + 17]; + uint32_t value; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, " + "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n", + offset, controlreg, datareg, mask, data, count); + + for (i = 0; i < count; i++) { + uint8_t instaddress = bios->data[offset + 18 + i * 2]; + uint8_t instdata = bios->data[offset + 19 + i * 2]; + + BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n", + offset, instaddress, instdata); + + bios_wr32(bios, datareg, instdata); + value = bios_rd32(bios, controlreg) & mask; + value |= data; + value |= instaddress; + bios_wr32(bios, controlreg, value); + } + + return true; +} + +static bool +init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): CRTC port + * offset + 3 (8 bit): CRTC index + * offset + 4 (8 bit): mask + * offset + 5 (8 bit): shift + * offset + 6 (8 bit): count + * offset + 7 (32 bit): register + * offset + 11 (32 bit): frequency 1 + * ... + * + * Starting at offset + 11 there are "count" 32 bit frequencies (kHz). + * Set PLL register "register" to coefficients for frequency n, + * selected by reading index "CRTC index" of "CRTC port" ANDed with + * "mask" and shifted right by "shift". + */ + + uint16_t crtcport = ROM16(bios->data[offset + 1]); + uint8_t crtcindex = bios->data[offset + 3]; + uint8_t mask = bios->data[offset + 4]; + uint8_t shift = bios->data[offset + 5]; + uint8_t count = bios->data[offset + 6]; + uint32_t reg = ROM32(bios->data[offset + 7]); + uint8_t config; + uint32_t freq; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " + "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", + offset, crtcport, crtcindex, mask, shift, count, reg); + + if (!reg) + return true; + + config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift; + if (config > count) { + NV_ERROR(bios->dev, + "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", + offset, config, count); + return false; + } + + freq = ROM32(bios->data[offset + 11 + config * 4]); + + BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n", + offset, reg, config, freq); + + setPLL(bios, reg, freq); + + return true; +} + +static bool +init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_PLL2 opcode: 0x4B ('K') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): register + * offset + 5 (32 bit): freq + * + * Set PLL register "register" to coefficients for frequency "freq" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint32_t freq = ROM32(bios->data[offset + 5]); + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n", + offset, reg, freq); + + setPLL(bios, reg, freq); + return true; +} + +static bool +init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_I2C_BYTE opcode: 0x4C ('L') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): DCB I2C table entry index + * offset + 2 (8 bit): I2C slave address + * offset + 3 (8 bit): count + * offset + 4 (8 bit): I2C register 1 + * offset + 5 (8 bit): mask 1 + * offset + 6 (8 bit): data 1 + * ... + * + * For each of "count" registers given by "I2C register n" on the device + * addressed by "I2C slave address" on the I2C bus given by + * "DCB I2C table entry index", read the register, AND the result with + * "mask n" and OR it with "data n" before writing it back to the device + */ + + uint8_t i2c_index = bios->data[offset + 1]; + uint8_t i2c_address = bios->data[offset + 2]; + uint8_t count = bios->data[offset + 3]; + struct nouveau_i2c_chan *chan; + struct i2c_msg msg; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " + "Count: 0x%02X\n", + offset, i2c_index, i2c_address, count); + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) + return false; + + for (i = 0; i < count; i++) { + uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; + uint8_t mask = bios->data[offset + 5 + i * 3]; + uint8_t data = bios->data[offset + 6 + i * 3]; + uint8_t value; + + msg.addr = i2c_address; + msg.flags = I2C_M_RD; + msg.len = 1; + msg.buf = &value; + if (i2c_transfer(&chan->adapter, &msg, 1) != 1) + return false; + + BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " + "Mask: 0x%02X, Data: 0x%02X\n", + offset, i2c_reg, value, mask, data); + + value = (value & mask) | data; + + if (bios->execute) { + msg.addr = i2c_address; + msg.flags = 0; + msg.len = 1; + msg.buf = &value; + if (i2c_transfer(&chan->adapter, &msg, 1) != 1) + return false; + } + } + + return true; +} + +static bool +init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_ZM_I2C_BYTE opcode: 0x4D ('M') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): DCB I2C table entry index + * offset + 2 (8 bit): I2C slave address + * offset + 3 (8 bit): count + * offset + 4 (8 bit): I2C register 1 + * offset + 5 (8 bit): data 1 + * ... + * + * For each of "count" registers given by "I2C register n" on the device + * addressed by "I2C slave address" on the I2C bus given by + * "DCB I2C table entry index", set the register to "data n" + */ + + uint8_t i2c_index = bios->data[offset + 1]; + uint8_t i2c_address = bios->data[offset + 2]; + uint8_t count = bios->data[offset + 3]; + struct nouveau_i2c_chan *chan; + struct i2c_msg msg; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " + "Count: 0x%02X\n", + offset, i2c_index, i2c_address, count); + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) + return false; + + for (i = 0; i < count; i++) { + uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; + uint8_t data = bios->data[offset + 5 + i * 2]; + + BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n", + offset, i2c_reg, data); + + if (bios->execute) { + msg.addr = i2c_address; + msg.flags = 0; + msg.len = 1; + msg.buf = &data; + if (i2c_transfer(&chan->adapter, &msg, 1) != 1) + return false; + } + } + + return true; +} + +static bool +init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_ZM_I2C opcode: 0x4E ('N') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): DCB I2C table entry index + * offset + 2 (8 bit): I2C slave address + * offset + 3 (8 bit): count + * offset + 4 (8 bit): data 1 + * ... + * + * Send "count" bytes ("data n") to the device addressed by "I2C slave + * address" on the I2C bus given by "DCB I2C table entry index" + */ + + uint8_t i2c_index = bios->data[offset + 1]; + uint8_t i2c_address = bios->data[offset + 2]; + uint8_t count = bios->data[offset + 3]; + struct nouveau_i2c_chan *chan; + struct i2c_msg msg; + uint8_t data[256]; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " + "Count: 0x%02X\n", + offset, i2c_index, i2c_address, count); + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) + return false; + + for (i = 0; i < count; i++) { + data[i] = bios->data[offset + 4 + i]; + + BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]); + } + + if (bios->execute) { + msg.addr = i2c_address; + msg.flags = 0; + msg.len = count; + msg.buf = data; + if (i2c_transfer(&chan->adapter, &msg, 1) != 1) + return false; + } + + return true; +} + +static bool +init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_TMDS opcode: 0x4F ('O') (non-canon name) + * + * offset (8 bit): opcode + * offset + 1 (8 bit): magic lookup value + * offset + 2 (8 bit): TMDS address + * offset + 3 (8 bit): mask + * offset + 4 (8 bit): data + * + * Read the data reg for TMDS address "TMDS address", AND it with mask + * and OR it with data, then write it back + * "magic lookup value" determines which TMDS base address register is + * used -- see get_tmds_index_reg() + */ + + uint8_t mlv = bios->data[offset + 1]; + uint32_t tmdsaddr = bios->data[offset + 2]; + uint8_t mask = bios->data[offset + 3]; + uint8_t data = bios->data[offset + 4]; + uint32_t reg, value; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, " + "Mask: 0x%02X, Data: 0x%02X\n", + offset, mlv, tmdsaddr, mask, data); + + reg = get_tmds_index_reg(bios->dev, mlv); + if (!reg) + return false; + + bios_wr32(bios, reg, + tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); + value = (bios_rd32(bios, reg + 4) & mask) | data; + bios_wr32(bios, reg + 4, value); + bios_wr32(bios, reg, tmdsaddr); + + return true; +} + +static bool +init_zm_tmds_group(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name) + * + * offset (8 bit): opcode + * offset + 1 (8 bit): magic lookup value + * offset + 2 (8 bit): count + * offset + 3 (8 bit): addr 1 + * offset + 4 (8 bit): data 1 + * ... + * + * For each of "count" TMDS address and data pairs write "data n" to + * "addr n". "magic lookup value" determines which TMDS base address + * register is used -- see get_tmds_index_reg() + */ + + uint8_t mlv = bios->data[offset + 1]; + uint8_t count = bios->data[offset + 2]; + uint32_t reg; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n", + offset, mlv, count); + + reg = get_tmds_index_reg(bios->dev, mlv); + if (!reg) + return false; + + for (i = 0; i < count; i++) { + uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; + uint8_t tmdsdata = bios->data[offset + 4 + i * 2]; + + bios_wr32(bios, reg + 4, tmdsdata); + bios_wr32(bios, reg, tmdsaddr); + } + + return true; +} + +static bool +init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): CRTC index1 + * offset + 2 (8 bit): CRTC index2 + * offset + 3 (8 bit): baseaddr + * offset + 4 (8 bit): count + * offset + 5 (8 bit): data 1 + * ... + * + * For each of "count" address and data pairs, write "baseaddr + n" to + * "CRTC index1" and "data n" to "CRTC index2" + * Once complete, restore initial value read from "CRTC index1" + */ + uint8_t crtcindex1 = bios->data[offset + 1]; + uint8_t crtcindex2 = bios->data[offset + 2]; + uint8_t baseaddr = bios->data[offset + 3]; + uint8_t count = bios->data[offset + 4]; + uint8_t oldaddr, data; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, " + "BaseAddr: 0x%02X, Count: 0x%02X\n", + offset, crtcindex1, crtcindex2, baseaddr, count); + + oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1); + + for (i = 0; i < count; i++) { + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, + baseaddr + i); + data = bios->data[offset + 5 + i]; + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data); + } + + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr); + + return true; +} + +static bool +init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_CR opcode: 0x52 ('R') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): CRTC index + * offset + 2 (8 bit): mask + * offset + 3 (8 bit): data + * + * Assign the value of at "CRTC index" ANDed with mask and ORed with + * data back to "CRTC index" + */ + + uint8_t crtcindex = bios->data[offset + 1]; + uint8_t mask = bios->data[offset + 2]; + uint8_t data = bios->data[offset + 3]; + uint8_t value; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n", + offset, crtcindex, mask, data); + + value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask; + value |= data; + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value); + + return true; +} + +static bool +init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_ZM_CR opcode: 0x53 ('S') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): CRTC index + * offset + 2 (8 bit): value + * + * Assign "value" to CRTC register with index "CRTC index". + */ + + uint8_t crtcindex = ROM32(bios->data[offset + 1]); + uint8_t data = bios->data[offset + 2]; + + if (!iexec->execute) + return true; + + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data); + + return true; +} + +static bool +init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_ZM_CR_GROUP opcode: 0x54 ('T') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): count + * offset + 2 (8 bit): CRTC index 1 + * offset + 3 (8 bit): value 1 + * ... + * + * For "count", assign "value n" to CRTC register with index + * "CRTC index n". + */ + + uint8_t count = bios->data[offset + 1]; + int i; + + if (!iexec->execute) + return true; + + for (i = 0; i < count; i++) + init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec); + + return true; +} + +static bool +init_condition_time(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_CONDITION_TIME opcode: 0x56 ('V') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): condition number + * offset + 2 (8 bit): retries / 50 + * + * Check condition "condition number" in the condition table. + * Bios code then sleeps for 2ms if the condition is not met, and + * repeats up to "retries" times, but on one C51 this has proved + * insufficient. In mmiotraces the driver sleeps for 20ms, so we do + * this, and bail after "retries" times, or 2s, whichever is less. + * If still not met after retries, clear execution flag for this table. + */ + + uint8_t cond = bios->data[offset + 1]; + uint16_t retries = bios->data[offset + 2] * 50; + unsigned cnt; + + if (!iexec->execute) + return true; + + if (retries > 100) + retries = 100; + + BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n", + offset, cond, retries); + + if (!bios->execute) /* avoid 2s delays when "faking" execution */ + retries = 1; + + for (cnt = 0; cnt < retries; cnt++) { + if (bios_condition_met(bios, offset, cond)) { + BIOSLOG(bios, "0x%04X: Condition met, continuing\n", + offset); + break; + } else { + BIOSLOG(bios, "0x%04X: " + "Condition not met, sleeping for 20ms\n", + offset); + msleep(20); + } + } + + if (!bios_condition_met(bios, offset, cond)) { + NV_WARN(bios->dev, + "0x%04X: Condition still not met after %dms, " + "skipping following opcodes\n", offset, 20 * retries); + iexec->execute = false; + } + + return true; +} + +static bool +init_zm_reg_sequence(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): base register + * offset + 5 (8 bit): count + * offset + 6 (32 bit): value 1 + * ... + * + * Starting at offset + 6 there are "count" 32 bit values. + * For "count" iterations set "base register" + 4 * current_iteration + * to "value current_iteration" + */ + + uint32_t basereg = ROM32(bios->data[offset + 1]); + uint32_t count = bios->data[offset + 5]; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n", + offset, basereg, count); + + for (i = 0; i < count; i++) { + uint32_t reg = basereg + i * 4; + uint32_t data = ROM32(bios->data[offset + 6 + i * 4]); + + bios_wr32(bios, reg, data); + } + + return true; +} + +static bool +init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_SUB_DIRECT opcode: 0x5B ('[') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): subroutine offset (in bios) + * + * Calls a subroutine that will execute commands until INIT_DONE + * is found. + */ + + uint16_t sub_offset = ROM16(bios->data[offset + 1]); + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n", + offset, sub_offset); + + parse_init_table(bios, sub_offset, iexec); + + BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset); + + return true; +} + +static bool +init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_COPY_NV_REG opcode: 0x5F ('_') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): src reg + * offset + 5 (8 bit): shift + * offset + 6 (32 bit): src mask + * offset + 10 (32 bit): xor + * offset + 14 (32 bit): dst reg + * offset + 18 (32 bit): dst mask + * + * Shift REGVAL("src reg") right by (signed) "shift", AND result with + * "src mask", then XOR with "xor". Write this OR'd with + * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg" + */ + + uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1])); + uint8_t shift = bios->data[offset + 5]; + uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6])); + uint32_t xor = *((uint32_t *)(&bios->data[offset + 10])); + uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14])); + uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18])); + uint32_t srcvalue, dstvalue; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, " + "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n", + offset, srcreg, shift, srcmask, xor, dstreg, dstmask); + + srcvalue = bios_rd32(bios, srcreg); + + if (shift < 0x80) + srcvalue >>= shift; + else + srcvalue <<= (0x100 - shift); + + srcvalue = (srcvalue & srcmask) ^ xor; + + dstvalue = bios_rd32(bios, dstreg) & dstmask; + + bios_wr32(bios, dstreg, dstvalue | srcvalue); + + return true; +} + +static bool +init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_ZM_INDEX_IO opcode: 0x62 ('b') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): CRTC port + * offset + 3 (8 bit): CRTC index + * offset + 4 (8 bit): data + * + * Write "data" to index "CRTC index" of "CRTC port" + */ + uint16_t crtcport = ROM16(bios->data[offset + 1]); + uint8_t crtcindex = bios->data[offset + 3]; + uint8_t data = bios->data[offset + 4]; + + if (!iexec->execute) + return true; + + bios_idxprt_wr(bios, crtcport, crtcindex, data); + + return true; +} + +static bool +init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_COMPUTE_MEM opcode: 0x63 ('c') + * + * offset (8 bit): opcode + * + * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so + * that the hardware can correctly calculate how much VRAM it has + * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C)) + * + * The implementation of this opcode in general consists of two parts: + * 1) determination of the memory bus width + * 2) determination of how many of the card's RAM pads have ICs attached + * + * 1) is done by a cunning combination of writes to offsets 0x1c and + * 0x3c in the framebuffer, and seeing whether the written values are + * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0 + * + * 2) is done by a cunning combination of writes to an offset slightly + * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing + * if the test pattern can be read back. This then affects bits 12-15 of + * NV_PFB_CFG0 + * + * In this context a "cunning combination" may include multiple reads + * and writes to varying locations, often alternating the test pattern + * and 0, doubtless to make sure buffers are filled, residual charges + * on tracks are removed etc. + * + * Unfortunately, the "cunning combination"s mentioned above, and the + * changes to the bits in NV_PFB_CFG0 differ with nearly every bios + * trace I have. + * + * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which + * we started was correct, and use that instead + */ + + /* no iexec->execute check by design */ + + /* + * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS + * and kmmio traces of the binary driver POSTing the card show nothing + * being done for this opcode. why is it still listed in the table?! + */ + + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + + if (dev_priv->card_type >= NV_50) + return true; + + /* + * On every card I've seen, this step gets done for us earlier in + * the init scripts + uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01); + bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20); + */ + + /* + * This also has probably been done in the scripts, but an mmio trace of + * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write) + */ + bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1); + + /* write back the saved configuration value */ + bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0); + + return true; +} + +static bool +init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_RESET opcode: 0x65 ('e') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): register + * offset + 5 (32 bit): value1 + * offset + 9 (32 bit): value2 + * + * Assign "value1" to "register", then assign "value2" to "register" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint32_t value1 = ROM32(bios->data[offset + 5]); + uint32_t value2 = ROM32(bios->data[offset + 9]); + uint32_t pci_nv_19, pci_nv_20; + + /* no iexec->execute check by design */ + + pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19); + bios_wr32(bios, NV_PBUS_PCI_NV_19, 0); + bios_wr32(bios, reg, value1); + + udelay(10); + + bios_wr32(bios, reg, value2); + bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19); + + pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20); + pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */ + bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20); + + return true; +} + +static bool +init_configure_mem(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_CONFIGURE_MEM opcode: 0x66 ('f') + * + * offset (8 bit): opcode + * + * Equivalent to INIT_DONE on bios version 3 or greater. + * For early bios versions, sets up the memory registers, using values + * taken from the memory init table + */ + + /* no iexec->execute check by design */ + + uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4); + uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6; + uint32_t reg, data; + + if (bios->major_version > 2) + return false; + + bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( + bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); + + if (bios->data[meminitoffs] & 1) + seqtbloffs = bios->legacy.ddr_seq_tbl_ptr; + + for (reg = ROM32(bios->data[seqtbloffs]); + reg != 0xffffffff; + reg = ROM32(bios->data[seqtbloffs += 4])) { + + switch (reg) { + case NV_PFB_PRE: + data = NV_PFB_PRE_CMD_PRECHARGE; + break; + case NV_PFB_PAD: + data = NV_PFB_PAD_CKE_NORMAL; + break; + case NV_PFB_REF: + data = NV_PFB_REF_CMD_REFRESH; + break; + default: + data = ROM32(bios->data[meminitdata]); + meminitdata += 4; + if (data == 0xffffffff) + continue; + } + + bios_wr32(bios, reg, data); + } + + return true; +} + +static bool +init_configure_clk(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_CONFIGURE_CLK opcode: 0x67 ('g') + * + * offset (8 bit): opcode + * + * Equivalent to INIT_DONE on bios version 3 or greater. + * For early bios versions, sets up the NVClk and MClk PLLs, using + * values taken from the memory init table + */ + + /* no iexec->execute check by design */ + + uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4); + int clock; + + if (bios->major_version > 2) + return false; + + clock = ROM16(bios->data[meminitoffs + 4]) * 10; + setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); + + clock = ROM16(bios->data[meminitoffs + 2]) * 10; + if (bios->data[meminitoffs] & 1) /* DDR */ + clock *= 2; + setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock); + + return true; +} + +static bool +init_configure_preinit(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h') + * + * offset (8 bit): opcode + * + * Equivalent to INIT_DONE on bios version 3 or greater. + * For early bios versions, does early init, loading ram and crystal + * configuration from straps into CR3C + */ + + /* no iexec->execute check by design */ + + uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0); + uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); + + if (bios->major_version > 2) + return false; + + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, + NV_CIO_CRE_SCRATCH4__INDEX, cr3c); + + return true; +} + +static bool +init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_IO opcode: 0x69 ('i') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): CRTC port + * offset + 3 (8 bit): mask + * offset + 4 (8 bit): data + * + * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port" + */ + + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + uint16_t crtcport = ROM16(bios->data[offset + 1]); + uint8_t mask = bios->data[offset + 3]; + uint8_t data = bios->data[offset + 4]; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n", + offset, crtcport, mask, data); + + /* + * I have no idea what this does, but NVIDIA do this magic sequence + * in the places where this INIT_IO happens.. + */ + if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) { + int i; + + bios_wr32(bios, 0x614100, (bios_rd32( + bios, 0x614100) & 0x0fffffff) | 0x00800000); + + bios_wr32(bios, 0x00e18c, bios_rd32( + bios, 0x00e18c) | 0x00020000); + + bios_wr32(bios, 0x614900, (bios_rd32( + bios, 0x614900) & 0x0fffffff) | 0x00800000); + + bios_wr32(bios, 0x000200, bios_rd32( + bios, 0x000200) & ~0x40000000); + + mdelay(10); + + bios_wr32(bios, 0x00e18c, bios_rd32( + bios, 0x00e18c) & ~0x00020000); + + bios_wr32(bios, 0x000200, bios_rd32( + bios, 0x000200) | 0x40000000); + + bios_wr32(bios, 0x614100, 0x00800018); + bios_wr32(bios, 0x614900, 0x00800018); + + mdelay(10); + + bios_wr32(bios, 0x614100, 0x10000018); + bios_wr32(bios, 0x614900, 0x10000018); + + for (i = 0; i < 3; i++) + bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32( + bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0); + + for (i = 0; i < 2; i++) + bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32( + bios, 0x614300 + (i*0x800)) & 0xfffff0f0); + + for (i = 0; i < 3; i++) + bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32( + bios, 0x614380 + (i*0x800)) & 0xfffff0f0); + + for (i = 0; i < 2; i++) + bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32( + bios, 0x614200 + (i*0x800)) & 0xfffffff0); + + for (i = 0; i < 2; i++) + bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32( + bios, 0x614108 + (i*0x800)) & 0x0fffffff); + return true; + } + + bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) | + data); + return true; +} + +static bool +init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_SUB opcode: 0x6B ('k') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): script number + * + * Execute script number "script number", as a subroutine + */ + + uint8_t sub = bios->data[offset + 1]; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub); + + parse_init_table(bios, + ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]), + iexec); + + BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub); + + return true; +} + +static bool +init_ram_condition(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_RAM_CONDITION opcode: 0x6D ('m') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): mask + * offset + 2 (8 bit): cmpval + * + * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval". + * If condition not met skip subsequent opcodes until condition is + * inverted (INIT_NOT), or we hit INIT_RESUME + */ + + uint8_t mask = bios->data[offset + 1]; + uint8_t cmpval = bios->data[offset + 2]; + uint8_t data; + + if (!iexec->execute) + return true; + + data = bios_rd32(bios, NV_PFB_BOOT_0) & mask; + + BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n", + offset, data, cmpval); + + if (data == cmpval) + BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset); + else { + BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset); + iexec->execute = false; + } + + return true; +} + +static bool +init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_NV_REG opcode: 0x6E ('n') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): register + * offset + 5 (32 bit): mask + * offset + 9 (32 bit): data + * + * Assign ((REGVAL("register") & "mask") | "data") to "register" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint32_t mask = ROM32(bios->data[offset + 5]); + uint32_t data = ROM32(bios->data[offset + 9]); + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n", + offset, reg, mask, data); + + bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data); + + return true; +} + +static bool +init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_MACRO opcode: 0x6F ('o') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): macro number + * + * Look up macro index "macro number" in the macro index table. + * The macro index table entry has 1 byte for the index in the macro + * table, and 1 byte for the number of times to repeat the macro. + * The macro table entry has 4 bytes for the register address and + * 4 bytes for the value to write to that register + */ + + uint8_t macro_index_tbl_idx = bios->data[offset + 1]; + uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE); + uint8_t macro_tbl_idx = bios->data[tmp]; + uint8_t count = bios->data[tmp + 1]; + uint32_t reg, data; + int i; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, " + "Count: 0x%02X\n", + offset, macro_index_tbl_idx, macro_tbl_idx, count); + + for (i = 0; i < count; i++) { + uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE; + + reg = ROM32(bios->data[macroentryptr]); + data = ROM32(bios->data[macroentryptr + 4]); + + bios_wr32(bios, reg, data); + } + + return true; +} + +static bool +init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_DONE opcode: 0x71 ('q') + * + * offset (8 bit): opcode + * + * End the current script + */ + + /* mild retval abuse to stop parsing this table */ + return false; +} + +static bool +init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_RESUME opcode: 0x72 ('r') + * + * offset (8 bit): opcode + * + * End the current execute / no-execute condition + */ + + if (iexec->execute) + return true; + + iexec->execute = true; + BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset); + + return true; +} + +static bool +init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_TIME opcode: 0x74 ('t') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): time + * + * Sleep for "time" microseconds. + */ + + unsigned time = ROM16(bios->data[offset + 1]); + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n", + offset, time); + + if (time < 1000) + udelay(time); + else + msleep((time + 900) / 1000); + + return true; +} + +static bool +init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_CONDITION opcode: 0x75 ('u') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): condition number + * + * Check condition "condition number" in the condition table. + * If condition not met skip subsequent opcodes until condition is + * inverted (INIT_NOT), or we hit INIT_RESUME + */ + + uint8_t cond = bios->data[offset + 1]; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond); + + if (bios_condition_met(bios, offset, cond)) + BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset); + else { + BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset); + iexec->execute = false; + } + + return true; +} + +static bool +init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_IO_CONDITION opcode: 0x76 + * + * offset (8 bit): opcode + * offset + 1 (8 bit): condition number + * + * Check condition "condition number" in the io condition table. + * If condition not met skip subsequent opcodes until condition is + * inverted (INIT_NOT), or we hit INIT_RESUME + */ + + uint8_t cond = bios->data[offset + 1]; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond); + + if (io_condition_met(bios, offset, cond)) + BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset); + else { + BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset); + iexec->execute = false; + } + + return true; +} + +static bool +init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_INDEX_IO opcode: 0x78 ('x') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): CRTC port + * offset + 3 (8 bit): CRTC index + * offset + 4 (8 bit): mask + * offset + 5 (8 bit): data + * + * Read value at index "CRTC index" on "CRTC port", AND with "mask", + * OR with "data", write-back + */ + + uint16_t crtcport = ROM16(bios->data[offset + 1]); + uint8_t crtcindex = bios->data[offset + 3]; + uint8_t mask = bios->data[offset + 4]; + uint8_t data = bios->data[offset + 5]; + uint8_t value; + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " + "Data: 0x%02X\n", + offset, crtcport, crtcindex, mask, data); + + value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data; + bios_idxprt_wr(bios, crtcport, crtcindex, value); + + return true; +} + +static bool +init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_PLL opcode: 0x79 ('y') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): register + * offset + 5 (16 bit): freq + * + * Set PLL register "register" to coefficients for frequency (10kHz) + * "freq" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint16_t freq = ROM16(bios->data[offset + 5]); + + if (!iexec->execute) + return true; + + BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq); + + setPLL(bios, reg, freq * 10); + + return true; +} + +static bool +init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_ZM_REG opcode: 0x7A ('z') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): register + * offset + 5 (32 bit): value + * + * Assign "value" to "register" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint32_t value = ROM32(bios->data[offset + 5]); + + if (!iexec->execute) + return true; + + if (reg == 0x000200) + value |= 1; + + bios_wr32(bios, reg, value); + + return true; +} + +static bool +init_ram_restrict_pll(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('') + * + * offset (8 bit): opcode + * offset + 1 (8 bit): PLL type + * offset + 2 (32 bit): frequency 0 + * + * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at + * ram_restrict_table_ptr. The value read from there is used to select + * a frequency from the table starting at 'frequency 0' to be + * programmed into the PLL corresponding to 'type'. + * + * The PLL limits table on cards using this opcode has a mapping of + * 'type' to the relevant registers. + */ + + struct drm_device *dev = bios->dev; + uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2; + uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap]; + uint8_t type = bios->data[offset + 1]; + uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]); + uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry; + int i; + + if (!iexec->execute) + return true; + + if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) { + NV_ERROR(dev, "PLL limits table not version 3.x\n"); + return true; /* deliberate, allow default clocks to remain */ + } + + entry = pll_limits + pll_limits[1]; + for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) { + if (entry[0] == type) { + uint32_t reg = ROM32(entry[3]); + + BIOSLOG(bios, "0x%04X: " + "Type %02x Reg 0x%08x Freq %dKHz\n", + offset, type, reg, freq); + + setPLL(bios, reg, freq); + return true; + } + } + + NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type); + return true; +} + +static bool +init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_8C opcode: 0x8C ('') + * + * NOP so far.... + * + */ + + return true; +} + +static bool +init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_8D opcode: 0x8D ('') + * + * NOP so far.... + * + */ + + return true; +} + +static bool +init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_GPIO opcode: 0x8E ('') + * + * offset (8 bit): opcode + * + * Loop over all entries in the DCB GPIO table, and initialise + * each GPIO according to various values listed in each entry + */ + + const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; + const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr]; + const uint8_t *gpio_entry; + int i; + + if (bios->bdcb.version != 0x40) { + NV_ERROR(bios->dev, "DCB table not version 4.0\n"); + return false; + } + + if (!bios->bdcb.gpio_table_ptr) { + NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); + return false; + } + + gpio_entry = gpio_table + gpio_table[1]; + for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { + uint32_t entry = ROM32(gpio_entry[0]), r, s, v; + int line = (entry & 0x0000001f); + + BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); + + if ((entry & 0x0000ff00) == 0x0000ff00) + continue; + + r = nv50_gpio_reg[line >> 3]; + s = (line & 0x07) << 2; + v = bios_rd32(bios, r) & ~(0x00000003 << s); + if (entry & 0x01000000) + v |= (((entry & 0x60000000) >> 29) ^ 2) << s; + else + v |= (((entry & 0x18000000) >> 27) ^ 2) << s; + bios_wr32(bios, r, v); + + r = nv50_gpio_ctl[line >> 4]; + s = (line & 0x0f); + v = bios_rd32(bios, r) & ~(0x00010001 << s); + switch ((entry & 0x06000000) >> 25) { + case 1: + v |= (0x00000001 << s); + break; + case 2: + v |= (0x00010000 << s); + break; + default: + break; + } + bios_wr32(bios, r, v); + } + + return true; +} + +/* hack to avoid moving the itbl_entry array before this function */ +int init_ram_restrict_zm_reg_group_blocklen; + +static bool +init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): reg + * offset + 5 (8 bit): regincrement + * offset + 6 (8 bit): count + * offset + 7 (32 bit): value 1,1 + * ... + * + * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at + * ram_restrict_table_ptr. The value read from here is 'n', and + * "value 1,n" gets written to "reg". This repeats "count" times and on + * each iteration 'm', "reg" increases by "regincrement" and + * "value m,n" is used. The extent of n is limited by a number read + * from the 'M' BIT table, herein called "blocklen" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint8_t regincrement = bios->data[offset + 5]; + uint8_t count = bios->data[offset + 6]; + uint32_t strap_ramcfg, data; + uint16_t blocklen; + uint8_t index; + int i; + + /* previously set by 'M' BIT table */ + blocklen = init_ram_restrict_zm_reg_group_blocklen; + + if (!iexec->execute) + return true; + + if (!blocklen) { + NV_ERROR(bios->dev, + "0x%04X: Zero block length - has the M table " + "been parsed?\n", offset); + return false; + } + + strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; + index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg]; + + BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, " + "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n", + offset, reg, regincrement, count, strap_ramcfg, index); + + for (i = 0; i < count; i++) { + data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]); + + bios_wr32(bios, reg, data); + + reg += regincrement; + } + + return true; +} + +static bool +init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_COPY_ZM_REG opcode: 0x90 ('') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): src reg + * offset + 5 (32 bit): dst reg + * + * Put contents of "src reg" into "dst reg" + */ + + uint32_t srcreg = ROM32(bios->data[offset + 1]); + uint32_t dstreg = ROM32(bios->data[offset + 5]); + + if (!iexec->execute) + return true; + + bios_wr32(bios, dstreg, bios_rd32(bios, srcreg)); + + return true; +} + +static bool +init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) +{ + /* + * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): dst reg + * offset + 5 (8 bit): count + * offset + 6 (32 bit): data 1 + * ... + * + * For each of "count" values write "data n" to "dst reg" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint8_t count = bios->data[offset + 5]; + int i; + + if (!iexec->execute) + return true; + + for (i = 0; i < count; i++) { + uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]); + bios_wr32(bios, reg, data); + } + + return true; +} + +static bool +init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_RESERVED opcode: 0x92 ('') + * + * offset (8 bit): opcode + * + * Seemingly does nothing + */ + + return true; +} + +static bool +init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_96 opcode: 0x96 ('') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): sreg + * offset + 5 (8 bit): sshift + * offset + 6 (8 bit): smask + * offset + 7 (8 bit): index + * offset + 8 (32 bit): reg + * offset + 12 (32 bit): mask + * offset + 16 (8 bit): shift + * + */ + + uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2); + uint32_t reg = ROM32(bios->data[offset + 8]); + uint32_t mask = ROM32(bios->data[offset + 12]); + uint32_t val; + + val = bios_rd32(bios, ROM32(bios->data[offset + 1])); + if (bios->data[offset + 5] < 0x80) + val >>= bios->data[offset + 5]; + else + val <<= (0x100 - bios->data[offset + 5]); + val &= bios->data[offset + 6]; + + val = bios->data[ROM16(bios->data[xlatptr]) + val]; + val <<= bios->data[offset + 16]; + + if (!iexec->execute) + return true; + + bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val); + return true; +} + +static bool +init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_97 opcode: 0x97 ('') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): register + * offset + 5 (32 bit): mask + * offset + 9 (32 bit): value + * + * Adds "value" to "register" preserving the fields specified + * by "mask" + */ + + uint32_t reg = ROM32(bios->data[offset + 1]); + uint32_t mask = ROM32(bios->data[offset + 5]); + uint32_t add = ROM32(bios->data[offset + 9]); + uint32_t val; + + val = bios_rd32(bios, reg); + val = (val & mask) | ((val + add) & ~mask); + + if (!iexec->execute) + return true; + + bios_wr32(bios, reg, val); + return true; +} + +static bool +init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_AUXCH opcode: 0x98 ('') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): address + * offset + 5 (8 bit): count + * offset + 6 (8 bit): mask 0 + * offset + 7 (8 bit): data 0 + * ... + * + */ + + struct drm_device *dev = bios->dev; + struct nouveau_i2c_chan *auxch; + uint32_t addr = ROM32(bios->data[offset + 1]); + uint8_t len = bios->data[offset + 5]; + int ret, i; + + if (!bios->display.output) { + NV_ERROR(dev, "INIT_AUXCH: no active output\n"); + return false; + } + + auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); + if (!auxch) { + NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", + bios->display.output->i2c_index); + return false; + } + + if (!iexec->execute) + return true; + + offset += 6; + for (i = 0; i < len; i++, offset += 2) { + uint8_t data; + + ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); + if (ret) { + NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); + return false; + } + + data &= bios->data[offset + 0]; + data |= bios->data[offset + 1]; + + ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); + if (ret) { + NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); + return false; + } + } + + return true; +} + +static bool +init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_ZM_AUXCH opcode: 0x99 ('') + * + * offset (8 bit): opcode + * offset + 1 (32 bit): address + * offset + 5 (8 bit): count + * offset + 6 (8 bit): data 0 + * ... + * + */ + + struct drm_device *dev = bios->dev; + struct nouveau_i2c_chan *auxch; + uint32_t addr = ROM32(bios->data[offset + 1]); + uint8_t len = bios->data[offset + 5]; + int ret, i; + + if (!bios->display.output) { + NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); + return false; + } + + auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); + if (!auxch) { + NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", + bios->display.output->i2c_index); + return false; + } + + if (!iexec->execute) + return true; + + offset += 6; + for (i = 0; i < len; i++, offset++) { + ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); + if (ret) { + NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); + return false; + } + } + + return true; +} + +static struct init_tbl_entry itbl_entry[] = { + /* command name , id , length , offset , mult , command handler */ + /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */ + { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog }, + { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat }, + { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll }, + { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat }, + { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy }, + { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not }, + { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition }, + { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched }, + { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 }, + { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 }, + { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte }, + { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte }, + { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c }, + { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds }, + { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group }, + { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch }, + { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr }, + { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr }, + { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group }, + { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time }, + { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence }, + /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ + { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct }, + { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg }, + { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io }, + { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem }, + { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset }, + { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem }, + { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk }, + { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit }, + { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io }, + { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub }, + { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition }, + { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg }, + { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro }, + { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done }, + { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume }, + /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */ + { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time }, + { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition }, + { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition }, + { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io }, + { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll }, + { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg }, + /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */ + { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll }, + { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c }, + { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d }, + { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio }, + /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */ + { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group }, + { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg }, + { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched }, + { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved }, + { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 }, + { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 }, + { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch }, + { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch }, + { NULL , 0 , 0 , 0 , 0 , NULL } +}; + +static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i) +{ + /* Calculates the length of a given init table entry. */ + return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier; +} + +#define MAX_TABLE_OPS 1000 + +static int +parse_init_table(struct nvbios *bios, unsigned int offset, + struct init_exec *iexec) +{ + /* + * Parses all commands in an init table. + * + * We start out executing all commands found in the init table. Some + * opcodes may change the status of iexec->execute to SKIP, which will + * cause the following opcodes to perform no operation until the value + * is changed back to EXECUTE. + */ + + int count = 0, i; + uint8_t id; + + /* + * Loop until INIT_DONE causes us to break out of the loop + * (or until offset > bios length just in case... ) + * (and no more than MAX_TABLE_OPS iterations, just in case... ) + */ + while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) { + id = bios->data[offset]; + + /* Find matching id in itbl_entry */ + for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++) + ; + + if (itbl_entry[i].name) { + BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", + offset, itbl_entry[i].id, itbl_entry[i].name); + + /* execute eventual command handler */ + if (itbl_entry[i].handler) + if (!(*itbl_entry[i].handler)(bios, offset, iexec)) + break; + } else { + NV_ERROR(bios->dev, + "0x%04X: Init table command not found: " + "0x%02X\n", offset, id); + return -ENOENT; + } + + /* + * Add the offset of the current command including all data + * of that command. The offset will then be pointing on the + * next op code. + */ + offset += get_init_table_entry_length(bios, offset, i); + } + + if (offset >= bios->length) + NV_WARN(bios->dev, + "Offset 0x%04X greater than known bios image length. " + "Corrupt image?\n", offset); + if (count >= MAX_TABLE_OPS) + NV_WARN(bios->dev, + "More than %d opcodes to a table is unlikely, " + "is the bios image corrupt?\n", MAX_TABLE_OPS); + + return 0; +} + +static void +parse_init_tables(struct nvbios *bios) +{ + /* Loops and calls parse_init_table() for each present table. */ + + int i = 0; + uint16_t table; + struct init_exec iexec = {true, false}; + + if (bios->old_style_init) { + if (bios->init_script_tbls_ptr) + parse_init_table(bios, bios->init_script_tbls_ptr, &iexec); + if (bios->extra_init_script_tbl_ptr) + parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec); + + return; + } + + while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) { + NV_INFO(bios->dev, + "Parsing VBIOS init table %d at offset 0x%04X\n", + i / 2, table); + BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table); + + parse_init_table(bios, table, &iexec); + i += 2; + } +} + +static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk) +{ + int compare_record_len, i = 0; + uint16_t compareclk, scriptptr = 0; + + if (bios->major_version < 5) /* pre BIT */ + compare_record_len = 3; + else + compare_record_len = 4; + + do { + compareclk = ROM16(bios->data[clktable + compare_record_len * i]); + if (pxclk >= compareclk * 10) { + if (bios->major_version < 5) { + uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i]; + scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]); + } else + scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]); + break; + } + i++; + } while (compareclk); + + return scriptptr; +} + +static void +run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, + struct dcb_entry *dcbent, int head, bool dl) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + struct init_exec iexec = {true, false}; + + NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", + scriptptr); + bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44, + head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA); + /* note: if dcb entries have been merged, index may be misleading */ + NVWriteVgaCrtc5758(dev, head, 0, dcbent->index); + parse_init_table(bios, scriptptr, &iexec); + + nv04_dfp_bind_head(dev, dcbent, head, dl); +} + +static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); + uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); + + if (!bios->fp.xlated_entry || !sub || !scriptofs) + return -EINVAL; + + run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link); + + if (script == LVDS_PANEL_OFF) { + /* off-on delay in ms */ + msleep(ROM16(bios->data[bios->fp.xlated_entry + 7])); + } +#ifdef __powerpc__ + /* Powerbook specific quirks */ + if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) + nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); + if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { + if (script == LVDS_PANEL_ON) { + bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); + bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); + } + if (script == LVDS_PANEL_OFF) { + bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); + bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); + } + } +#endif + + return 0; +} + +static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk) +{ + /* + * The BIT LVDS table's header has the information to setup the + * necessary registers. Following the standard 4 byte header are: + * A bitmask byte and a dual-link transition pxclk value for use in + * selecting the init script when not using straps; 4 script pointers + * for panel power, selected by output and on/off; and 8 table pointers + * for panel init, the needed one determined by output, and bits in the + * conf byte. These tables are similar to the TMDS tables, consisting + * of a list of pxclks and script pointers. + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + unsigned int outputset = (dcbent->or == 4) ? 1 : 0; + uint16_t scriptptr = 0, clktable; + uint8_t clktableptr = 0; + + /* + * For now we assume version 3.0 table - g80 support will need some + * changes + */ + + switch (script) { + case LVDS_INIT: + return -ENOSYS; + case LVDS_BACKLIGHT_ON: + case LVDS_PANEL_ON: + scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]); + break; + case LVDS_BACKLIGHT_OFF: + case LVDS_PANEL_OFF: + scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); + break; + case LVDS_RESET: + if (dcbent->lvdsconf.use_straps_for_mode) { + if (bios->fp.dual_link) + clktableptr += 2; + if (bios->fp.BITbit1) + clktableptr++; + } else { + /* using EDID */ + uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; + int fallbackcmpval = (dcbent->or == 4) ? 4 : 1; + + if (bios->fp.dual_link) { + clktableptr += 2; + fallbackcmpval *= 2; + } + if (fallbackcmpval & fallback) + clktableptr++; + } + + /* adding outputset * 8 may not be correct */ + clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]); + if (!clktable) { + NV_ERROR(dev, "Pixel clock comparison table not found\n"); + return -ENOENT; + } + scriptptr = clkcmptable(bios, clktable, pxclk); + } + + if (!scriptptr) { + NV_ERROR(dev, "LVDS output init script not found\n"); + return -ENOENT; + } + run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link); + + return 0; +} + +int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk) +{ + /* + * LVDS operations are multiplexed in an effort to present a single API + * which works with two vastly differing underlying structures. + * This acts as the demux + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; + uint32_t sel_clk_binding, sel_clk; + int ret; + + if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver || + (lvds_ver >= 0x30 && script == LVDS_INIT)) + return 0; + + if (!bios->fp.lvds_init_run) { + bios->fp.lvds_init_run = true; + call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk); + } + + if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change) + call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk); + if (script == LVDS_RESET && bios->fp.power_off_for_reset) + call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk); + + NV_TRACE(dev, "Calling LVDS script %d:\n", script); + + /* don't let script change pll->head binding */ + sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000; + + if (lvds_ver < 0x30) + ret = call_lvds_manufacturer_script(dev, dcbent, head, script); + else + ret = run_lvds_table(dev, dcbent, head, script, pxclk); + + bios->fp.last_script_invoc = (script << 1 | head); + + sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); + /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ + nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0); + + return ret; +} + +struct lvdstableheader { + uint8_t lvds_ver, headerlen, recordlen; +}; + +static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth) +{ + /* + * BMP version (0xa) LVDS table has a simple header of version and + * record length. The BIT LVDS table has the typical BIT table header: + * version byte, header length byte, record length byte, and a byte for + * the maximum number of records that can be held in the table. + */ + + uint8_t lvds_ver, headerlen, recordlen; + + memset(lth, 0, sizeof(struct lvdstableheader)); + + if (bios->fp.lvdsmanufacturerpointer == 0x0) { + NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n"); + return -EINVAL; + } + + lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; + + switch (lvds_ver) { + case 0x0a: /* pre NV40 */ + headerlen = 2; + recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; + break; + case 0x30: /* NV4x */ + headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; + if (headerlen < 0x1f) { + NV_ERROR(dev, "LVDS table header not understood\n"); + return -EINVAL; + } + recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; + break; + case 0x40: /* G80/G90 */ + headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; + if (headerlen < 0x7) { + NV_ERROR(dev, "LVDS table header not understood\n"); + return -EINVAL; + } + recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; + break; + default: + NV_ERROR(dev, + "LVDS table revision %d.%d not currently supported\n", + lvds_ver >> 4, lvds_ver & 0xf); + return -ENOSYS; + } + + lth->lvds_ver = lvds_ver; + lth->headerlen = headerlen; + lth->recordlen = recordlen; + + return 0; +} + +static int +get_fp_strap(struct drm_device *dev, struct nvbios *bios) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* + * The fp strap is normally dictated by the "User Strap" in + * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the + * Internal_Flags struct at 0x48 is set, the user strap gets overriden + * by the PCI subsystem ID during POST, but not before the previous user + * strap has been committed to CR58 for CR57=0xf on head A, which may be + * read and used instead + */ + + if (bios->major_version < 5 && bios->data[0x48] & 0x4) + return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; + + if (dev_priv->card_type >= NV_50) + return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; + else + return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; +} + +static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) +{ + uint8_t *fptable; + uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex; + int ret, ofs, fpstrapping; + struct lvdstableheader lth; + + if (bios->fp.fptablepointer == 0x0) { + /* Apple cards don't have the fp table; the laptops use DDC */ + /* The table is also missing on some x86 IGPs */ +#ifndef __powerpc__ + NV_ERROR(dev, "Pointer to flat panel table invalid\n"); +#endif + bios->pub.digital_min_front_porch = 0x4b; + return 0; + } + + fptable = &bios->data[bios->fp.fptablepointer]; + fptable_ver = fptable[0]; + + switch (fptable_ver) { + /* + * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no + * version field, and miss one of the spread spectrum/PWM bytes. + * This could affect early GF2Go parts (not seen any appropriate ROMs + * though). Here we assume that a version of 0x05 matches this case + * (combining with a BMP version check would be better), as the + * common case for the panel type field is 0x0005, and that is in + * fact what we are reading the first byte of. + */ + case 0x05: /* some NV10, 11, 15, 16 */ + recordlen = 42; + ofs = -1; + break; + case 0x10: /* some NV15/16, and NV11+ */ + recordlen = 44; + ofs = 0; + break; + case 0x20: /* NV40+ */ + headerlen = fptable[1]; + recordlen = fptable[2]; + fpentries = fptable[3]; + /* + * fptable[4] is the minimum + * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap + */ + bios->pub.digital_min_front_porch = fptable[4]; + ofs = -7; + break; + default: + NV_ERROR(dev, + "FP table revision %d.%d not currently supported\n", + fptable_ver >> 4, fptable_ver & 0xf); + return -ENOSYS; + } + + if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */ + return 0; + + ret = parse_lvds_manufacturer_table_header(dev, bios, <h); + if (ret) + return ret; + + if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) { + bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer + + lth.headerlen + 1; + bios->fp.xlatwidth = lth.recordlen; + } + if (bios->fp.fpxlatetableptr == 0x0) { + NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n"); + return -EINVAL; + } + + fpstrapping = get_fp_strap(dev, bios); + + fpindex = bios->data[bios->fp.fpxlatetableptr + + fpstrapping * bios->fp.xlatwidth]; + + if (fpindex > fpentries) { + NV_ERROR(dev, "Bad flat panel table index\n"); + return -ENOENT; + } + + /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ + if (lth.lvds_ver > 0x10) + bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; + + /* + * If either the strap or xlated fpindex value are 0xf there is no + * panel using a strap-derived bios mode present. this condition + * includes, but is different from, the DDC panel indicator above + */ + if (fpstrapping == 0xf || fpindex == 0xf) + return 0; + + bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen + + recordlen * fpindex + ofs; + + NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n", + ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1, + ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1, + ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10); + + return 0; +} + +bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; + + if (!mode) /* just checking whether we can produce a mode */ + return bios->fp.mode_ptr; + + memset(mode, 0, sizeof(struct drm_display_mode)); + /* + * For version 1.0 (version in byte 0): + * bytes 1-2 are "panel type", including bits on whether Colour/mono, + * single/dual link, and type (TFT etc.) + * bytes 3-6 are bits per colour in RGBX + */ + mode->clock = ROM16(mode_entry[7]) * 10; + /* bytes 9-10 is HActive */ + mode->hdisplay = ROM16(mode_entry[11]) + 1; + /* + * bytes 13-14 is HValid Start + * bytes 15-16 is HValid End + */ + mode->hsync_start = ROM16(mode_entry[17]) + 1; + mode->hsync_end = ROM16(mode_entry[19]) + 1; + mode->htotal = ROM16(mode_entry[21]) + 1; + /* bytes 23-24, 27-30 similarly, but vertical */ + mode->vdisplay = ROM16(mode_entry[25]) + 1; + mode->vsync_start = ROM16(mode_entry[31]) + 1; + mode->vsync_end = ROM16(mode_entry[33]) + 1; + mode->vtotal = ROM16(mode_entry[35]) + 1; + mode->flags |= (mode_entry[37] & 0x10) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (mode_entry[37] & 0x1) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + /* + * bytes 38-39 relate to spread spectrum settings + * bytes 40-43 are something to do with PWM + */ + + mode->status = MODE_OK; + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + return bios->fp.mode_ptr; +} + +int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit) +{ + /* + * The LVDS table header is (mostly) described in + * parse_lvds_manufacturer_table_header(): the BIT header additionally + * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if + * straps are not being used for the panel, this specifies the frequency + * at which modes should be set up in the dual link style. + * + * Following the header, the BMP (ver 0xa) table has several records, + * indexed by a seperate xlat table, indexed in turn by the fp strap in + * EXTDEV_BOOT. Each record had a config byte, followed by 6 script + * numbers for use by INIT_SUB which controlled panel init and power, + * and finally a dword of ms to sleep between power off and on + * operations. + * + * In the BIT versions, the table following the header serves as an + * integrated config and xlat table: the records in the table are + * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has + * two bytes - the first as a config byte, the second for indexing the + * fp mode table pointed to by the BIT 'D' table + * + * DDC is not used until after card init, so selecting the correct table + * entry and setting the dual link flag for EDID equipped panels, + * requiring tests against the native-mode pixel clock, cannot be done + * until later, when this function should be called with non-zero pxclk + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; + struct lvdstableheader lth; + uint16_t lvdsofs; + int ret, chip_version = bios->pub.chip_version; + + ret = parse_lvds_manufacturer_table_header(dev, bios, <h); + if (ret) + return ret; + + switch (lth.lvds_ver) { + case 0x0a: /* pre NV40 */ + lvdsmanufacturerindex = bios->data[ + bios->fp.fpxlatemanufacturertableptr + + fpstrapping]; + + /* we're done if this isn't the EDID panel case */ + if (!pxclk) + break; + + if (chip_version < 0x25) { + /* nv17 behaviour + * + * It seems the old style lvds script pointer is reused + * to select 18/24 bit colour depth for EDID panels. + */ + lvdsmanufacturerindex = + (bios->legacy.lvds_single_a_script_ptr & 1) ? + 2 : 0; + if (pxclk >= bios->fp.duallink_transition_clk) + lvdsmanufacturerindex++; + } else if (chip_version < 0x30) { + /* nv28 behaviour (off-chip encoder) + * + * nv28 does a complex dance of first using byte 121 of + * the EDID to choose the lvdsmanufacturerindex, then + * later attempting to match the EDID manufacturer and + * product IDs in a table (signature 'pidt' (panel id + * table?)), setting an lvdsmanufacturerindex of 0 and + * an fp strap of the match index (or 0xf if none) + */ + lvdsmanufacturerindex = 0; + } else { + /* nv31, nv34 behaviour */ + lvdsmanufacturerindex = 0; + if (pxclk >= bios->fp.duallink_transition_clk) + lvdsmanufacturerindex = 2; + if (pxclk >= 140000) + lvdsmanufacturerindex = 3; + } + + /* + * nvidia set the high nibble of (cr57=f, cr58) to + * lvdsmanufacturerindex in this case; we don't + */ + break; + case 0x30: /* NV4x */ + case 0x40: /* G80/G90 */ + lvdsmanufacturerindex = fpstrapping; + break; + default: + NV_ERROR(dev, "LVDS table revision not currently supported\n"); + return -ENOSYS; + } + + lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex; + switch (lth.lvds_ver) { + case 0x0a: + bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1; + bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2; + bios->fp.dual_link = bios->data[lvdsofs] & 4; + bios->fp.link_c_increment = bios->data[lvdsofs] & 8; + *if_is_24bit = bios->data[lvdsofs] & 16; + break; + case 0x30: + /* + * My money would be on there being a 24 bit interface bit in + * this table, but I have no example of a laptop bios with a + * 24 bit panel to confirm that. Hence we shout loudly if any + * bit other than bit 0 is set (I've not even seen bit 1) + */ + if (bios->data[lvdsofs] > 1) + NV_ERROR(dev, + "You have a very unusual laptop display; please report it\n"); + /* + * No sign of the "power off for reset" or "reset for panel + * on" bits, but it's safer to assume we should + */ + bios->fp.power_off_for_reset = true; + bios->fp.reset_after_pclk_change = true; + /* + * It's ok lvdsofs is wrong for nv4x edid case; dual_link is + * over-written, and BITbit1 isn't used + */ + bios->fp.dual_link = bios->data[lvdsofs] & 1; + bios->fp.BITbit1 = bios->data[lvdsofs] & 2; + bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; + break; + case 0x40: + bios->fp.dual_link = bios->data[lvdsofs] & 1; + bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; + bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; + bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; + break; + } + + /* set dual_link flag for EDID case */ + if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) + bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); + + *dl = bios->fp.dual_link; + + return 0; +} + +static uint8_t * +bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, + uint16_t record, int record_len, int record_nr) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + uint32_t entry; + uint16_t table; + int i, v; + + for (i = 0; i < record_nr; i++, record += record_len) { + table = ROM16(bios->data[record]); + if (!table) + continue; + entry = ROM32(bios->data[table]); + + v = (entry & 0x000f0000) >> 16; + if (!(v & dcbent->or)) + continue; + + v = (entry & 0x000000f0) >> 4; + if (v != dcbent->location) + continue; + + v = (entry & 0x0000000f); + if (v != dcbent->type) + continue; + + return &bios->data[table]; + } + + return NULL; +} + +void * +nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, + int *length) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + uint8_t *table; + + if (!bios->display.dp_table_ptr) { + NV_ERROR(dev, "No pointer to DisplayPort table\n"); + return NULL; + } + table = &bios->data[bios->display.dp_table_ptr]; + + if (table[0] != 0x21) { + NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n", + table[0]); + return NULL; + } + + *length = table[4]; + return bios_output_config_match(dev, dcbent, + bios->display.dp_table_ptr + table[1], + table[2], table[3]); +} + +int +nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, + uint32_t sub, int pxclk) +{ + /* + * The display script table is located by the BIT 'U' table. + * + * It contains an array of pointers to various tables describing + * a particular output type. The first 32-bits of the output + * tables contains similar information to a DCB entry, and is + * used to decide whether that particular table is suitable for + * the output you want to access. + * + * The "record header length" field here seems to indicate the + * offset of the first configuration entry in the output tables. + * This is 10 on most cards I've seen, but 12 has been witnessed + * on DP cards, and there's another script pointer within the + * header. + * + * offset + 0 ( 8 bits): version + * offset + 1 ( 8 bits): header length + * offset + 2 ( 8 bits): record length + * offset + 3 ( 8 bits): number of records + * offset + 4 ( 8 bits): record header length + * offset + 5 (16 bits): pointer to first output script table + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct init_exec iexec = {true, false}; + struct nvbios *bios = &dev_priv->VBIOS; + uint8_t *table = &bios->data[bios->display.script_table_ptr]; + uint8_t *otable = NULL; + uint16_t script; + int i = 0; + + if (!bios->display.script_table_ptr) { + NV_ERROR(dev, "No pointer to output script table\n"); + return 1; + } + + /* + * Nothing useful has been in any of the pre-2.0 tables I've seen, + * so until they are, we really don't need to care. + */ + if (table[0] < 0x20) + return 1; + + if (table[0] != 0x20 && table[0] != 0x21) { + NV_ERROR(dev, "Output script table version 0x%02x unknown\n", + table[0]); + return 1; + } + + /* + * The output script tables describing a particular output type + * look as follows: + * + * offset + 0 (32 bits): output this table matches (hash of DCB) + * offset + 4 ( 8 bits): unknown + * offset + 5 ( 8 bits): number of configurations + * offset + 6 (16 bits): pointer to some script + * offset + 8 (16 bits): pointer to some script + * + * headerlen == 10 + * offset + 10 : configuration 0 + * + * headerlen == 12 + * offset + 10 : pointer to some script + * offset + 12 : configuration 0 + * + * Each config entry is as follows: + * + * offset + 0 (16 bits): unknown, assumed to be a match value + * offset + 2 (16 bits): pointer to script table (clock set?) + * offset + 4 (16 bits): pointer to script table (reset?) + * + * There doesn't appear to be a count value to say how many + * entries exist in each script table, instead, a 0 value in + * the first 16-bit word seems to indicate both the end of the + * list and the default entry. The second 16-bit word in the + * script tables is a pointer to the script to execute. + */ + + NV_DEBUG(dev, "Searching for output entry for %d %d %d\n", + dcbent->type, dcbent->location, dcbent->or); + otable = bios_output_config_match(dev, dcbent, table[1] + + bios->display.script_table_ptr, + table[2], table[3]); + if (!otable) { + NV_ERROR(dev, "Couldn't find matching output script table\n"); + return 1; + } + + if (pxclk < -2 || pxclk > 0) { + /* Try to find matching script table entry */ + for (i = 0; i < otable[5]; i++) { + if (ROM16(otable[table[4] + i*6]) == sub) + break; + } + + if (i == otable[5]) { + NV_ERROR(dev, "Table 0x%04x not found for %d/%d, " + "using first\n", + sub, dcbent->type, dcbent->or); + i = 0; + } + } + + bios->display.output = dcbent; + + if (pxclk == 0) { + script = ROM16(otable[6]); + if (!script) { + NV_DEBUG(dev, "output script 0 not found\n"); + return 1; + } + + NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); + parse_init_table(bios, script, &iexec); + } else + if (pxclk == -1) { + script = ROM16(otable[8]); + if (!script) { + NV_DEBUG(dev, "output script 1 not found\n"); + return 1; + } + + NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); + parse_init_table(bios, script, &iexec); + } else + if (pxclk == -2) { + if (table[4] >= 12) + script = ROM16(otable[10]); + else + script = 0; + if (!script) { + NV_DEBUG(dev, "output script 2 not found\n"); + return 1; + } + + NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); + parse_init_table(bios, script, &iexec); + } else + if (pxclk > 0) { + script = ROM16(otable[table[4] + i*6 + 2]); + if (script) + script = clkcmptable(bios, script, pxclk); + if (!script) { + NV_ERROR(dev, "clock script 0 not found\n"); + return 1; + } + + NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); + parse_init_table(bios, script, &iexec); + } else + if (pxclk < 0) { + script = ROM16(otable[table[4] + i*6 + 4]); + if (script) + script = clkcmptable(bios, script, -pxclk); + if (!script) { + NV_DEBUG(dev, "clock script 1 not found\n"); + return 1; + } + + NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); + parse_init_table(bios, script, &iexec); + } + + return 0; +} + + +int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk) +{ + /* + * the pxclk parameter is in kHz + * + * This runs the TMDS regs setting code found on BIT bios cards + * + * For ffs(or) == 1 use the first table, for ffs(or) == 2 and + * ffs(or) == 3, use the second. + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + int cv = bios->pub.chip_version; + uint16_t clktable = 0, scriptptr; + uint32_t sel_clk_binding, sel_clk; + + /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */ + if (cv >= 0x17 && cv != 0x1a && cv != 0x20 && + dcbent->location != DCB_LOC_ON_CHIP) + return 0; + + switch (ffs(dcbent->or)) { + case 1: + clktable = bios->tmds.output0_script_ptr; + break; + case 2: + case 3: + clktable = bios->tmds.output1_script_ptr; + break; + } + + if (!clktable) { + NV_ERROR(dev, "Pixel clock comparison table not found\n"); + return -EINVAL; + } + + scriptptr = clkcmptable(bios, clktable, pxclk); + + if (!scriptptr) { + NV_ERROR(dev, "TMDS output init script not found\n"); + return -ENOENT; + } + + /* don't let script change pll->head binding */ + sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000; + run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); + sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); + + return 0; +} + +int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim) +{ + /* + * PLL limits table + * + * Version 0x10: NV30, NV31 + * One byte header (version), one record of 24 bytes + * Version 0x11: NV36 - Not implemented + * Seems to have same record style as 0x10, but 3 records rather than 1 + * Version 0x20: Found on Geforce 6 cards + * Trivial 4 byte BIT header. 31 (0x1f) byte record length + * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards + * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record + * length in general, some (integrated) have an extra configuration byte + * Version 0x30: Found on Geforce 8, separates the register mapping + * from the limits tables. + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + int cv = bios->pub.chip_version, pllindex = 0; + uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0; + uint32_t crystal_strap_mask, crystal_straps; + + if (!bios->pll_limit_tbl_ptr) { + if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || + cv >= 0x40) { + NV_ERROR(dev, "Pointer to PLL limits table invalid\n"); + return -EINVAL; + } + } else + pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr]; + + crystal_strap_mask = 1 << 6; + /* open coded dev->twoHeads test */ + if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20) + crystal_strap_mask |= 1 << 22; + crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & + crystal_strap_mask; + + switch (pll_lim_ver) { + /* + * We use version 0 to indicate a pre limit table bios (single stage + * pll) and load the hard coded limits instead. + */ + case 0: + break; + case 0x10: + case 0x11: + /* + * Strictly v0x11 has 3 entries, but the last two don't seem + * to get used. + */ + headerlen = 1; + recordlen = 0x18; + entries = 1; + pllindex = 0; + break; + case 0x20: + case 0x21: + case 0x30: + case 0x40: + headerlen = bios->data[bios->pll_limit_tbl_ptr + 1]; + recordlen = bios->data[bios->pll_limit_tbl_ptr + 2]; + entries = bios->data[bios->pll_limit_tbl_ptr + 3]; + break; + default: + NV_ERROR(dev, "PLL limits table revision 0x%X not currently " + "supported\n", pll_lim_ver); + return -ENOSYS; + } + + /* initialize all members to zero */ + memset(pll_lim, 0, sizeof(struct pll_lims)); + + if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) { + uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex]; + + pll_lim->vco1.minfreq = ROM32(pll_rec[0]); + pll_lim->vco1.maxfreq = ROM32(pll_rec[4]); + pll_lim->vco2.minfreq = ROM32(pll_rec[8]); + pll_lim->vco2.maxfreq = ROM32(pll_rec[12]); + pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]); + pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]); + pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX; + + /* these values taken from nv30/31/36 */ + pll_lim->vco1.min_n = 0x1; + if (cv == 0x36) + pll_lim->vco1.min_n = 0x5; + pll_lim->vco1.max_n = 0xff; + pll_lim->vco1.min_m = 0x1; + pll_lim->vco1.max_m = 0xd; + pll_lim->vco2.min_n = 0x4; + /* + * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this + * table version (apart from nv35)), N2 is compared to + * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and + * save a comparison + */ + pll_lim->vco2.max_n = 0x28; + if (cv == 0x30 || cv == 0x35) + /* only 5 bits available for N2 on nv30/35 */ + pll_lim->vco2.max_n = 0x1f; + pll_lim->vco2.min_m = 0x1; + pll_lim->vco2.max_m = 0x4; + pll_lim->max_log2p = 0x7; + pll_lim->max_usable_log2p = 0x6; + } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) { + uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen; + uint32_t reg = 0; /* default match */ + uint8_t *pll_rec; + int i; + + /* + * First entry is default match, if nothing better. warn if + * reg field nonzero + */ + if (ROM32(bios->data[plloffs])) + NV_WARN(dev, "Default PLL limit entry has non-zero " + "register field\n"); + + if (limit_match > MAX_PLL_TYPES) + /* we've been passed a reg as the match */ + reg = limit_match; + else /* limit match is a pll type */ + for (i = 1; i < entries && !reg; i++) { + uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]); + + if (limit_match == NVPLL && + (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000)) + reg = cmpreg; + if (limit_match == MPLL && + (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020)) + reg = cmpreg; + if (limit_match == VPLL1 && + (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010)) + reg = cmpreg; + if (limit_match == VPLL2 && + (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018)) + reg = cmpreg; + } + + for (i = 1; i < entries; i++) + if (ROM32(bios->data[plloffs + recordlen * i]) == reg) { + pllindex = i; + break; + } + + pll_rec = &bios->data[plloffs + recordlen * pllindex]; + + BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n", + pllindex ? reg : 0); + + /* + * Frequencies are stored in tables in MHz, kHz are more + * useful, so we convert. + */ + + /* What output frequencies can each VCO generate? */ + pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000; + pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000; + pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000; + pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000; + + /* What input frequencies they accept (past the m-divider)? */ + pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000; + pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000; + pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000; + pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000; + + /* What values are accepted as multiplier and divider? */ + pll_lim->vco1.min_n = pll_rec[20]; + pll_lim->vco1.max_n = pll_rec[21]; + pll_lim->vco1.min_m = pll_rec[22]; + pll_lim->vco1.max_m = pll_rec[23]; + pll_lim->vco2.min_n = pll_rec[24]; + pll_lim->vco2.max_n = pll_rec[25]; + pll_lim->vco2.min_m = pll_rec[26]; + pll_lim->vco2.max_m = pll_rec[27]; + + pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29]; + if (pll_lim->max_log2p > 0x7) + /* pll decoding in nv_hw.c assumes never > 7 */ + NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n", + pll_lim->max_log2p); + if (cv < 0x60) + pll_lim->max_usable_log2p = 0x6; + pll_lim->log2p_bias = pll_rec[30]; + + if (recordlen > 0x22) + pll_lim->refclk = ROM32(pll_rec[31]); + + if (recordlen > 0x23 && pll_rec[35]) + NV_WARN(dev, + "Bits set in PLL configuration byte (%x)\n", + pll_rec[35]); + + /* C51 special not seen elsewhere */ + if (cv == 0x51 && !pll_lim->refclk) { + uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK); + + if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) || + ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) { + if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3) + pll_lim->refclk = 200000; + else + pll_lim->refclk = 25000; + } + } + } else if (pll_lim_ver == 0x30) { /* ver 0x30 */ + uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen]; + uint8_t *record = NULL; + int i; + + BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", + limit_match); + + for (i = 0; i < entries; i++, entry += recordlen) { + if (ROM32(entry[3]) == limit_match) { + record = &bios->data[ROM16(entry[1])]; + break; + } + } + + if (!record) { + NV_ERROR(dev, "Register 0x%08x not found in PLL " + "limits table", limit_match); + return -ENOENT; + } + + pll_lim->vco1.minfreq = ROM16(record[0]) * 1000; + pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000; + pll_lim->vco2.minfreq = ROM16(record[4]) * 1000; + pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000; + pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000; + pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000; + pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000; + pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000; + pll_lim->vco1.min_n = record[16]; + pll_lim->vco1.max_n = record[17]; + pll_lim->vco1.min_m = record[18]; + pll_lim->vco1.max_m = record[19]; + pll_lim->vco2.min_n = record[20]; + pll_lim->vco2.max_n = record[21]; + pll_lim->vco2.min_m = record[22]; + pll_lim->vco2.max_m = record[23]; + pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25]; + pll_lim->log2p_bias = record[27]; + pll_lim->refclk = ROM32(record[28]); + } else if (pll_lim_ver) { /* ver 0x40 */ + uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen]; + uint8_t *record = NULL; + int i; + + BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", + limit_match); + + for (i = 0; i < entries; i++, entry += recordlen) { + if (ROM32(entry[3]) == limit_match) { + record = &bios->data[ROM16(entry[1])]; + break; + } + } + + if (!record) { + NV_ERROR(dev, "Register 0x%08x not found in PLL " + "limits table", limit_match); + return -ENOENT; + } + + pll_lim->vco1.minfreq = ROM16(record[0]) * 1000; + pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000; + pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000; + pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000; + pll_lim->vco1.min_m = record[8]; + pll_lim->vco1.max_m = record[9]; + pll_lim->vco1.min_n = record[10]; + pll_lim->vco1.max_n = record[11]; + pll_lim->min_p = record[12]; + pll_lim->max_p = record[13]; + /* where did this go to?? */ + if (limit_match == 0x00614100 || limit_match == 0x00614900) + pll_lim->refclk = 27000; + else + pll_lim->refclk = 100000; + } + + /* + * By now any valid limit table ought to have set a max frequency for + * vco1, so if it's zero it's either a pre limit table bios, or one + * with an empty limit table (seen on nv18) + */ + if (!pll_lim->vco1.maxfreq) { + pll_lim->vco1.minfreq = bios->fminvco; + pll_lim->vco1.maxfreq = bios->fmaxvco; + pll_lim->vco1.min_inputfreq = 0; + pll_lim->vco1.max_inputfreq = INT_MAX; + pll_lim->vco1.min_n = 0x1; + pll_lim->vco1.max_n = 0xff; + pll_lim->vco1.min_m = 0x1; + if (crystal_straps == 0) { + /* nv05 does this, nv11 doesn't, nv10 unknown */ + if (cv < 0x11) + pll_lim->vco1.min_m = 0x7; + pll_lim->vco1.max_m = 0xd; + } else { + if (cv < 0x11) + pll_lim->vco1.min_m = 0x8; + pll_lim->vco1.max_m = 0xe; + } + if (cv < 0x17 || cv == 0x1a || cv == 0x20) + pll_lim->max_log2p = 4; + else + pll_lim->max_log2p = 5; + pll_lim->max_usable_log2p = pll_lim->max_log2p; + } + + if (!pll_lim->refclk) + switch (crystal_straps) { + case 0: + pll_lim->refclk = 13500; + break; + case (1 << 6): + pll_lim->refclk = 14318; + break; + case (1 << 22): + pll_lim->refclk = 27000; + break; + case (1 << 22 | 1 << 6): + pll_lim->refclk = 25000; + break; + } + +#if 0 /* for easy debugging */ + ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); + ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); + ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); + ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); + + ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); + ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); + ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); + ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); + + ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); + ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); + ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); + ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); + ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); + ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); + ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); + ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); + + ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p); + ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias); + + ErrorF("pll.refclk: %d\n", pll_lim->refclk); +#endif + + return 0; +} + +static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset) +{ + /* + * offset + 0 (8 bits): Micro version + * offset + 1 (8 bits): Minor version + * offset + 2 (8 bits): Chip version + * offset + 3 (8 bits): Major version + */ + + bios->major_version = bios->data[offset + 3]; + bios->pub.chip_version = bios->data[offset + 2]; + NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", + bios->data[offset + 3], bios->data[offset + 2], + bios->data[offset + 1], bios->data[offset]); +} + +static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset) +{ + /* + * Parses the init table segment for pointers used in script execution. + * + * offset + 0 (16 bits): init script tables pointer + * offset + 2 (16 bits): macro index table pointer + * offset + 4 (16 bits): macro table pointer + * offset + 6 (16 bits): condition table pointer + * offset + 8 (16 bits): io condition table pointer + * offset + 10 (16 bits): io flag condition table pointer + * offset + 12 (16 bits): init function table pointer + */ + + bios->init_script_tbls_ptr = ROM16(bios->data[offset]); + bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]); + bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]); + bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]); + bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]); + bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]); + bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]); +} + +static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the load detect values for g80 cards. + * + * offset + 0 (16 bits): loadval table pointer + */ + + uint16_t load_table_ptr; + uint8_t version, headerlen, entrylen, num_entries; + + if (bitentry->length != 3) { + NV_ERROR(dev, "Do not understand BIT A table\n"); + return -EINVAL; + } + + load_table_ptr = ROM16(bios->data[bitentry->offset]); + + if (load_table_ptr == 0x0) { + NV_ERROR(dev, "Pointer to BIT loadval table invalid\n"); + return -EINVAL; + } + + version = bios->data[load_table_ptr]; + + if (version != 0x10) { + NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n", + version >> 4, version & 0xF); + return -ENOSYS; + } + + headerlen = bios->data[load_table_ptr + 1]; + entrylen = bios->data[load_table_ptr + 2]; + num_entries = bios->data[load_table_ptr + 3]; + + if (headerlen != 4 || entrylen != 4 || num_entries != 2) { + NV_ERROR(dev, "Do not understand BIT loadval table\n"); + return -EINVAL; + } + + /* First entry is normal dac, 2nd tv-out perhaps? */ + bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; + + return 0; +} + +static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * offset + 8 (16 bits): PLL limits table pointer + * + * There's more in here, but that's unknown. + */ + + if (bitentry->length < 10) { + NV_ERROR(dev, "Do not understand BIT C table\n"); + return -EINVAL; + } + + bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]); + + return 0; +} + +static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the flat panel table segment that the bit entry points to. + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte + * records beginning with a freq. + * offset + 2 (16 bits): mode table pointer + */ + + if (bitentry->length != 4) { + NV_ERROR(dev, "Do not understand BIT display table\n"); + return -EINVAL; + } + + bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]); + + return 0; +} + +static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the init table segment that the bit entry points to. + * + * See parse_script_table_pointers for layout + */ + + if (bitentry->length < 14) { + NV_ERROR(dev, "Do not understand init table\n"); + return -EINVAL; + } + + parse_script_table_pointers(bios, bitentry->offset); + + if (bitentry->length >= 16) + bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]); + if (bitentry->length >= 18) + bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]); + + return 0; +} + +static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * BIT 'i' (info?) table + * + * offset + 0 (32 bits): BIOS version dword (as in B table) + * offset + 5 (8 bits): BIOS feature byte (same as for BMP?) + * offset + 13 (16 bits): pointer to table containing DAC load + * detection comparison values + * + * There's other things in the table, purpose unknown + */ + + uint16_t daccmpoffset; + uint8_t dacver, dacheaderlen; + + if (bitentry->length < 6) { + NV_ERROR(dev, "BIT i table too short for needed information\n"); + return -EINVAL; + } + + parse_bios_version(dev, bios, bitentry->offset); + + /* + * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's + * Quadro identity crisis), other bits possibly as for BMP feature byte + */ + bios->feature_byte = bios->data[bitentry->offset + 5]; + bios->is_mobile = bios->feature_byte & FEATURE_MOBILE; + + if (bitentry->length < 15) { + NV_WARN(dev, "BIT i table not long enough for DAC load " + "detection comparison table\n"); + return -EINVAL; + } + + daccmpoffset = ROM16(bios->data[bitentry->offset + 13]); + + /* doesn't exist on g80 */ + if (!daccmpoffset) + return 0; + + /* + * The first value in the table, following the header, is the + * comparison value, the second entry is a comparison value for + * TV load detection. + */ + + dacver = bios->data[daccmpoffset]; + dacheaderlen = bios->data[daccmpoffset + 1]; + + if (dacver != 0x00 && dacver != 0x10) { + NV_WARN(dev, "DAC load detection comparison table version " + "%d.%d not known\n", dacver >> 4, dacver & 0xf); + return -ENOSYS; + } + + bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); + bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); + + return 0; +} + +static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the LVDS table segment that the bit entry points to. + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): LVDS strap xlate table pointer + */ + + if (bitentry->length != 2) { + NV_ERROR(dev, "Do not understand BIT LVDS table\n"); + return -EINVAL; + } + + /* + * No idea if it's still called the LVDS manufacturer table, but + * the concept's close enough. + */ + bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]); + + return 0; +} + +static int +parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_entry *bitentry) +{ + /* + * offset + 2 (8 bits): number of options in an + * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set + * offset + 3 (16 bits): pointer to strap xlate table for RAM + * restrict option selection + * + * There's a bunch of bits in this table other than the RAM restrict + * stuff that we don't use - their use currently unknown + */ + + uint16_t rr_strap_xlat; + uint8_t rr_group_count; + int i; + + /* + * Older bios versions don't have a sufficiently long table for + * what we want + */ + if (bitentry->length < 0x5) + return 0; + + if (bitentry->id[1] < 2) { + rr_group_count = bios->data[bitentry->offset + 2]; + rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]); + } else { + rr_group_count = bios->data[bitentry->offset + 0]; + rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]); + } + + /* adjust length of INIT_87 */ + for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++); + itbl_entry[i].length += rr_group_count * 4; + + /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */ + for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++); + itbl_entry[i].length_multiplier = rr_group_count * 4; + + init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier; + bios->ram_restrict_tbl_ptr = rr_strap_xlat; + + return 0; +} + +static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the pointer to the TMDS table + * + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): TMDS table pointer + * + * The TMDS table is typically found just before the DCB table, with a + * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being + * length?) + * + * At offset +7 is a pointer to a script, which I don't know how to + * run yet. + * At offset +9 is a pointer to another script, likewise + * Offset +11 has a pointer to a table where the first word is a pxclk + * frequency and the second word a pointer to a script, which should be + * run if the comparison pxclk frequency is less than the pxclk desired. + * This repeats for decreasing comparison frequencies + * Offset +13 has a pointer to a similar table + * The selection of table (and possibly +7/+9 script) is dictated by + * "or" from the DCB. + */ + + uint16_t tmdstableptr, script1, script2; + + if (bitentry->length != 2) { + NV_ERROR(dev, "Do not understand BIT TMDS table\n"); + return -EINVAL; + } + + tmdstableptr = ROM16(bios->data[bitentry->offset]); + + if (tmdstableptr == 0x0) { + NV_ERROR(dev, "Pointer to TMDS table invalid\n"); + return -EINVAL; + } + + /* nv50+ has v2.0, but we don't parse it atm */ + if (bios->data[tmdstableptr] != 0x11) { + NV_WARN(dev, + "TMDS table revision %d.%d not currently supported\n", + bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); + return -ENOSYS; + } + + /* + * These two scripts are odd: they don't seem to get run even when + * they are not stubbed. + */ + script1 = ROM16(bios->data[tmdstableptr + 7]); + script2 = ROM16(bios->data[tmdstableptr + 9]); + if (bios->data[script1] != 'q' || bios->data[script2] != 'q') + NV_WARN(dev, "TMDS table script pointers not stubbed\n"); + + bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]); + bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]); + + return 0; +} + +static int +parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_entry *bitentry) +{ + /* + * Parses the pointer to the G80 output script tables + * + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): output script table pointer + */ + + uint16_t outputscripttableptr; + + if (bitentry->length != 3) { + NV_ERROR(dev, "Do not understand BIT U table\n"); + return -EINVAL; + } + + outputscripttableptr = ROM16(bios->data[bitentry->offset]); + bios->display.script_table_ptr = outputscripttableptr; + return 0; +} + +static int +parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_entry *bitentry) +{ + bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]); + return 0; +} + +struct bit_table { + const char id; + int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); +}; + +#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) + +static int +parse_bit_table(struct nvbios *bios, const uint16_t bitoffset, + struct bit_table *table) +{ + struct drm_device *dev = bios->dev; + uint8_t maxentries = bios->data[bitoffset + 4]; + int i, offset; + struct bit_entry bitentry; + + for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) { + bitentry.id[0] = bios->data[offset]; + + if (bitentry.id[0] != table->id) + continue; + + bitentry.id[1] = bios->data[offset + 1]; + bitentry.length = ROM16(bios->data[offset + 2]); + bitentry.offset = ROM16(bios->data[offset + 4]); + + return table->parse_fn(dev, bios, &bitentry); + } + + NV_INFO(dev, "BIT table '%c' not found\n", table->id); + return -ENOSYS; +} + +static int +parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset) +{ + int ret; + + /* + * The only restriction on parsing order currently is having 'i' first + * for use of bios->*_version or bios->feature_byte while parsing; + * functions shouldn't be actually *doing* anything apart from pulling + * data from the image into the bios struct, thus no interdependencies + */ + ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i)); + if (ret) /* info? */ + return ret; + if (bios->major_version >= 0x60) /* g80+ */ + parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A)); + ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C)); + if (ret) + return ret; + parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display)); + ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init)); + if (ret) + return ret; + parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */ + parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); + parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); + parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U)); + parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport)); + + return 0; +} + +static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset) +{ + /* + * Parses the BMP structure for useful things, but does not act on them + * + * offset + 5: BMP major version + * offset + 6: BMP minor version + * offset + 9: BMP feature byte + * offset + 10: BCD encoded BIOS version + * + * offset + 18: init script table pointer (for bios versions < 5.10h) + * offset + 20: extra init script table pointer (for bios + * versions < 5.10h) + * + * offset + 24: memory init table pointer (used on early bios versions) + * offset + 26: SDR memory sequencing setup data table + * offset + 28: DDR memory sequencing setup data table + * + * offset + 54: index of I2C CRTC pair to use for CRT output + * offset + 55: index of I2C CRTC pair to use for TV output + * offset + 56: index of I2C CRTC pair to use for flat panel output + * offset + 58: write CRTC index for I2C pair 0 + * offset + 59: read CRTC index for I2C pair 0 + * offset + 60: write CRTC index for I2C pair 1 + * offset + 61: read CRTC index for I2C pair 1 + * + * offset + 67: maximum internal PLL frequency (single stage PLL) + * offset + 71: minimum internal PLL frequency (single stage PLL) + * + * offset + 75: script table pointers, as described in + * parse_script_table_pointers + * + * offset + 89: TMDS single link output A table pointer + * offset + 91: TMDS single link output B table pointer + * offset + 95: LVDS single link output A table pointer + * offset + 105: flat panel timings table pointer + * offset + 107: flat panel strapping translation table pointer + * offset + 117: LVDS manufacturer panel config table pointer + * offset + 119: LVDS manufacturer strapping translation table pointer + * + * offset + 142: PLL limits table pointer + * + * offset + 156: minimum pixel clock for LVDS dual link + */ + + uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor; + uint16_t bmplength; + uint16_t legacy_scripts_offset, legacy_i2c_offset; + + /* load needed defaults in case we can't parse this info */ + bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; + bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; + bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; + bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; + bios->pub.digital_min_front_porch = 0x4b; + bios->fmaxvco = 256000; + bios->fminvco = 128000; + bios->fp.duallink_transition_clk = 90000; + + bmp_version_major = bmp[5]; + bmp_version_minor = bmp[6]; + + NV_TRACE(dev, "BMP version %d.%d\n", + bmp_version_major, bmp_version_minor); + + /* + * Make sure that 0x36 is blank and can't be mistaken for a DCB + * pointer on early versions + */ + if (bmp_version_major < 5) + *(uint16_t *)&bios->data[0x36] = 0; + + /* + * Seems that the minor version was 1 for all major versions prior + * to 5. Version 6 could theoretically exist, but I suspect BIT + * happened instead. + */ + if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) { + NV_ERROR(dev, "You have an unsupported BMP version. " + "Please send in your bios\n"); + return -ENOSYS; + } + + if (bmp_version_major == 0) + /* nothing that's currently useful in this version */ + return 0; + else if (bmp_version_major == 1) + bmplength = 44; /* exact for 1.01 */ + else if (bmp_version_major == 2) + bmplength = 48; /* exact for 2.01 */ + else if (bmp_version_major == 3) + bmplength = 54; + /* guessed - mem init tables added in this version */ + else if (bmp_version_major == 4 || bmp_version_minor < 0x1) + /* don't know if 5.0 exists... */ + bmplength = 62; + /* guessed - BMP I2C indices added in version 4*/ + else if (bmp_version_minor < 0x6) + bmplength = 67; /* exact for 5.01 */ + else if (bmp_version_minor < 0x10) + bmplength = 75; /* exact for 5.06 */ + else if (bmp_version_minor == 0x10) + bmplength = 89; /* exact for 5.10h */ + else if (bmp_version_minor < 0x14) + bmplength = 118; /* exact for 5.11h */ + else if (bmp_version_minor < 0x24) + /* + * Not sure of version where pll limits came in; + * certainly exist by 0x24 though. + */ + /* length not exact: this is long enough to get lvds members */ + bmplength = 123; + else if (bmp_version_minor < 0x27) + /* + * Length not exact: this is long enough to get pll limit + * member + */ + bmplength = 144; + else + /* + * Length not exact: this is long enough to get dual link + * transition clock. + */ + bmplength = 158; + + /* checksum */ + if (nv_cksum(bmp, 8)) { + NV_ERROR(dev, "Bad BMP checksum\n"); + return -EINVAL; + } + + /* + * Bit 4 seems to indicate either a mobile bios or a quadro card -- + * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl + * (not nv10gl), bit 5 that the flat panel tables are present, and + * bit 6 a tv bios. + */ + bios->feature_byte = bmp[9]; + + parse_bios_version(dev, bios, offset + 10); + + if (bmp_version_major < 5 || bmp_version_minor < 0x10) + bios->old_style_init = true; + legacy_scripts_offset = 18; + if (bmp_version_major < 2) + legacy_scripts_offset -= 4; + bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]); + bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]); + + if (bmp_version_major > 2) { /* appears in BMP 3 */ + bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]); + bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]); + bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]); + } + + legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */ + if (bmplength > 61) + legacy_i2c_offset = offset + 54; + bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; + bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; + bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; + bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; + bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; + bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; + bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; + + if (bmplength > 74) { + bios->fmaxvco = ROM32(bmp[67]); + bios->fminvco = ROM32(bmp[71]); + } + if (bmplength > 88) + parse_script_table_pointers(bios, offset + 75); + if (bmplength > 94) { + bios->tmds.output0_script_ptr = ROM16(bmp[89]); + bios->tmds.output1_script_ptr = ROM16(bmp[91]); + /* + * Never observed in use with lvds scripts, but is reused for + * 18/24 bit panel interface default for EDID equipped panels + * (if_is_24bit not set directly to avoid any oscillation). + */ + bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]); + } + if (bmplength > 108) { + bios->fp.fptablepointer = ROM16(bmp[105]); + bios->fp.fpxlatetableptr = ROM16(bmp[107]); + bios->fp.xlatwidth = 1; + } + if (bmplength > 120) { + bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]); + bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]); + } + if (bmplength > 143) + bios->pll_limit_tbl_ptr = ROM16(bmp[142]); + + if (bmplength > 157) + bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10; + + return 0; +} + +static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) +{ + int i, j; + + for (i = 0; i <= (n - len); i++) { + for (j = 0; j < len; j++) + if (data[i + j] != str[j]) + break; + if (j == len) + return i; + } + + return 0; +} + +static int +read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) +{ + uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; + int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; + int recordoffset = 0, rdofs = 1, wrofs = 0; + uint8_t port_type = 0; + + if (!i2ctable) + return -EINVAL; + + if (dcb_version >= 0x30) { + if (i2ctable[0] != dcb_version) /* necessary? */ + NV_WARN(dev, + "DCB I2C table version mismatch (%02X vs %02X)\n", + i2ctable[0], dcb_version); + dcb_i2c_ver = i2ctable[0]; + headerlen = i2ctable[1]; + if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) + i2c_entries = i2ctable[2]; + else + NV_WARN(dev, + "DCB I2C table has more entries than indexable " + "(%d entries, max index 15)\n", i2ctable[2]); + entry_len = i2ctable[3]; + /* [4] is i2c_default_indices, read in parse_dcb_table() */ + } + /* + * It's your own fault if you call this function on a DCB 1.1 BIOS -- + * the test below is for DCB 1.2 + */ + if (dcb_version < 0x14) { + recordoffset = 2; + rdofs = 0; + wrofs = 1; + } + + if (index == 0xf) + return 0; + if (index > i2c_entries) { + NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n", + index, i2ctable[2]); + return -ENOENT; + } + if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { + NV_ERROR(dev, "DCB I2C entry invalid\n"); + return -EINVAL; + } + + if (dcb_i2c_ver >= 0x30) { + port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; + + /* + * Fixup for chips using same address offset for read and + * write. + */ + if (port_type == 4) /* seen on C51 */ + rdofs = wrofs = 1; + if (port_type >= 5) /* G80+ */ + rdofs = wrofs = 0; + } + + if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6) + NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); + + i2c->port_type = port_type; + i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; + i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; + + return 0; +} + +static struct dcb_gpio_entry * +new_gpio_entry(struct nvbios *bios) +{ + struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio; + + return &gpio->entry[gpio->entries++]; +} + +struct dcb_gpio_entry * +nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + int i; + + for (i = 0; i < bios->bdcb.gpio.entries; i++) { + if (bios->bdcb.gpio.entry[i].tag != tag) + continue; + + return &bios->bdcb.gpio.entry[i]; + } + + return NULL; +} + +static void +parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset) +{ + struct dcb_gpio_entry *gpio; + uint16_t ent = ROM16(bios->data[offset]); + uint8_t line = ent & 0x1f, + tag = ent >> 5 & 0x3f, + flags = ent >> 11 & 0x1f; + + if (tag == 0x3f) + return; + + gpio = new_gpio_entry(bios); + + gpio->tag = tag; + gpio->line = line; + gpio->invert = flags != 4; +} + +static void +parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) +{ + struct dcb_gpio_entry *gpio; + uint32_t ent = ROM32(bios->data[offset]); + uint8_t line = ent & 0x1f, + tag = ent >> 8 & 0xff; + + if (tag == 0xff) + return; + + gpio = new_gpio_entry(bios); + + /* Currently unused, we may need more fields parsed at some + * point. */ + gpio->tag = tag; + gpio->line = line; +} + +static void +parse_dcb_gpio_table(struct nvbios *bios) +{ + struct drm_device *dev = bios->dev; + uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr; + uint8_t *gpio_table = &bios->data[gpio_table_ptr]; + int header_len = gpio_table[1], + entries = gpio_table[2], + entry_len = gpio_table[3]; + void (*parse_entry)(struct nvbios *, uint16_t) = NULL; + int i; + + if (bios->bdcb.version >= 0x40) { + if (gpio_table_ptr && entry_len != 4) { + NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); + return; + } + + parse_entry = parse_dcb40_gpio_entry; + + } else if (bios->bdcb.version >= 0x30) { + if (gpio_table_ptr && entry_len != 2) { + NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); + return; + } + + parse_entry = parse_dcb30_gpio_entry; + + } else if (bios->bdcb.version >= 0x22) { + /* + * DCBs older than v3.0 don't really have a GPIO + * table, instead they keep some GPIO info at fixed + * locations. + */ + uint16_t dcbptr = ROM16(bios->data[0x36]); + uint8_t *tvdac_gpio = &bios->data[dcbptr - 5]; + + if (tvdac_gpio[0] & 1) { + struct dcb_gpio_entry *gpio = new_gpio_entry(bios); + + gpio->tag = DCB_GPIO_TVDAC0; + gpio->line = tvdac_gpio[1] >> 4; + gpio->invert = tvdac_gpio[0] & 2; + } + } + + if (!gpio_table_ptr) + return; + + if (entries > DCB_MAX_NUM_GPIO_ENTRIES) { + NV_WARN(dev, "Too many entries in the DCB GPIO table.\n"); + entries = DCB_MAX_NUM_GPIO_ENTRIES; + } + + for (i = 0; i < entries; i++) + parse_entry(bios, gpio_table_ptr + header_len + entry_len * i); +} + +struct dcb_connector_table_entry * +nouveau_bios_connector_entry(struct drm_device *dev, int index) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + struct dcb_connector_table_entry *cte; + + if (index >= bios->bdcb.connector.entries) + return NULL; + + cte = &bios->bdcb.connector.entry[index]; + if (cte->type == 0xff) + return NULL; + + return cte; +} + +static void +parse_dcb_connector_table(struct nvbios *bios) +{ + struct drm_device *dev = bios->dev; + struct dcb_connector_table *ct = &bios->bdcb.connector; + struct dcb_connector_table_entry *cte; + uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr]; + uint8_t *entry; + int i; + + if (!bios->bdcb.connector_table_ptr) { + NV_DEBUG(dev, "No DCB connector table present\n"); + return; + } + + NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n", + conntab[0], conntab[1], conntab[2], conntab[3]); + if ((conntab[0] != 0x30 && conntab[0] != 0x40) || + (conntab[3] != 2 && conntab[3] != 4)) { + NV_ERROR(dev, " Unknown! Please report.\n"); + return; + } + + ct->entries = conntab[2]; + + entry = conntab + conntab[1]; + cte = &ct->entry[0]; + for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { + if (conntab[3] == 2) + cte->entry = ROM16(entry[0]); + else + cte->entry = ROM32(entry[0]); + cte->type = (cte->entry & 0x000000ff) >> 0; + cte->index = (cte->entry & 0x00000f00) >> 8; + switch (cte->entry & 0x00033000) { + case 0x00001000: + cte->gpio_tag = 0x07; + break; + case 0x00002000: + cte->gpio_tag = 0x08; + break; + case 0x00010000: + cte->gpio_tag = 0x51; + break; + case 0x00020000: + cte->gpio_tag = 0x52; + break; + default: + cte->gpio_tag = 0xff; + break; + } + + if (cte->type == 0xff) + continue; + + NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", + i, cte->entry, cte->type, cte->index, cte->gpio_tag); + } +} + +static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) +{ + struct dcb_entry *entry = &dcb->entry[dcb->entries]; + + memset(entry, 0, sizeof(struct dcb_entry)); + entry->index = dcb->entries++; + + return entry; +} + +static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) +{ + struct dcb_entry *entry = new_dcb_entry(dcb); + + entry->type = 0; + entry->i2c_index = i2c; + entry->heads = heads; + entry->location = DCB_LOC_ON_CHIP; + /* "or" mostly unused in early gen crt modesetting, 0 is fine */ +} + +static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) +{ + struct dcb_entry *entry = new_dcb_entry(dcb); + + entry->type = 2; + entry->i2c_index = LEGACY_I2C_PANEL; + entry->heads = twoHeads ? 3 : 1; + entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ + entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */ + entry->duallink_possible = false; /* SiI164 and co. are single link */ + +#if 0 + /* + * For dvi-a either crtc probably works, but my card appears to only + * support dvi-d. "nvidia" still attempts to program it for dvi-a, + * doing the full fp output setup (program 0x6808.. fp dimension regs, + * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880); + * the monitor picks up the mode res ok and lights up, but no pixel + * data appears, so the board manufacturer probably connected up the + * sync lines, but missed the video traces / components + * + * with this introduction, dvi-a left as an exercise for the reader. + */ + fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads); +#endif +} + +static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) +{ + struct dcb_entry *entry = new_dcb_entry(dcb); + + entry->type = 1; + entry->i2c_index = LEGACY_I2C_TV; + entry->heads = twoHeads ? 3 : 1; + entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ +} + +static bool +parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, + uint32_t conn, uint32_t conf, struct dcb_entry *entry) +{ + entry->type = conn & 0xf; + entry->i2c_index = (conn >> 4) & 0xf; + entry->heads = (conn >> 8) & 0xf; + if (bdcb->version >= 0x40) + entry->connector = (conn >> 12) & 0xf; + entry->bus = (conn >> 16) & 0xf; + entry->location = (conn >> 20) & 0x3; + entry->or = (conn >> 24) & 0xf; + /* + * Normal entries consist of a single bit, but dual link has the + * next most significant bit set too + */ + entry->duallink_possible = + ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); + + switch (entry->type) { + case OUTPUT_ANALOG: + /* + * Although the rest of a CRT conf dword is usually + * zeros, mac biosen have stuff there so we must mask + */ + entry->crtconf.maxfreq = (bdcb->version < 0x30) ? + (conf & 0xffff) * 10 : + (conf & 0xff) * 10000; + break; + case OUTPUT_LVDS: + { + uint32_t mask; + if (conf & 0x1) + entry->lvdsconf.use_straps_for_mode = true; + if (bdcb->version < 0x22) { + mask = ~0xd; + /* + * The laptop in bug 14567 lies and claims to not use + * straps when it does, so assume all DCB 2.0 laptops + * use straps, until a broken EDID using one is produced + */ + entry->lvdsconf.use_straps_for_mode = true; + /* + * Both 0x4 and 0x8 show up in v2.0 tables; assume they + * mean the same thing (probably wrong, but might work) + */ + if (conf & 0x4 || conf & 0x8) + entry->lvdsconf.use_power_scripts = true; + } else { + mask = ~0x5; + if (conf & 0x4) + entry->lvdsconf.use_power_scripts = true; + } + if (conf & mask) { + /* + * Until we even try to use these on G8x, it's + * useless reporting unknown bits. They all are. + */ + if (bdcb->version >= 0x40) + break; + + NV_ERROR(dev, "Unknown LVDS configuration bits, " + "please report\n"); + } + break; + } + case OUTPUT_TV: + { + if (bdcb->version >= 0x30) + entry->tvconf.has_component_output = conf & (0x8 << 4); + else + entry->tvconf.has_component_output = false; + + break; + } + case OUTPUT_DP: + entry->dpconf.sor.link = (conf & 0x00000030) >> 4; + entry->dpconf.link_bw = (conf & 0x00e00000) >> 21; + switch ((conf & 0x0f000000) >> 24) { + case 0xf: + entry->dpconf.link_nr = 4; + break; + case 0x3: + entry->dpconf.link_nr = 2; + break; + default: + entry->dpconf.link_nr = 1; + break; + } + break; + case OUTPUT_TMDS: + entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; + break; + case 0xe: + /* weird g80 mobile type that "nv" treats as a terminator */ + bdcb->dcb.entries--; + return false; + } + + /* unsure what DCB version introduces this, 3.0? */ + if (conf & 0x100000) + entry->i2c_upper_default = true; + + return true; +} + +static bool +parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, + uint32_t conn, uint32_t conf, struct dcb_entry *entry) +{ + if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && + conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && + conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && + conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && + conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && + conn != 0xf2205004 && conn != 0xf2209004) { + NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); + + /* cause output setting to fail for !TV, so message is seen */ + if ((conn & 0xf) != 0x1) + dcb->entries = 0; + + return false; + } + /* most of the below is a "best guess" atm */ + entry->type = conn & 0xf; + if (entry->type == 2) + /* another way of specifying straps based lvds... */ + entry->type = OUTPUT_LVDS; + if (entry->type == 4) { /* digital */ + if (conn & 0x10) + entry->type = OUTPUT_LVDS; + else + entry->type = OUTPUT_TMDS; + } + /* what's in bits 5-13? could be some encoder maker thing, in tv case */ + entry->i2c_index = (conn >> 14) & 0xf; + /* raw heads field is in range 0-1, so move to 1-2 */ + entry->heads = ((conn >> 18) & 0x7) + 1; + entry->location = (conn >> 21) & 0xf; + /* unused: entry->bus = (conn >> 25) & 0x7; */ + /* set or to be same as heads -- hopefully safe enough */ + entry->or = entry->heads; + entry->duallink_possible = false; + + switch (entry->type) { + case OUTPUT_ANALOG: + entry->crtconf.maxfreq = (conf & 0xffff) * 10; + break; + case OUTPUT_LVDS: + /* + * This is probably buried in conn's unknown bits. + * This will upset EDID-ful models, if they exist + */ + entry->lvdsconf.use_straps_for_mode = true; + entry->lvdsconf.use_power_scripts = true; + break; + case OUTPUT_TMDS: + /* + * Invent a DVI-A output, by copying the fields of the DVI-D + * output; reported to work by math_b on an NV20(!). + */ + fabricate_vga_output(dcb, entry->i2c_index, entry->heads); + break; + case OUTPUT_TV: + entry->tvconf.has_component_output = false; + break; + } + + return true; +} + +static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, + uint32_t conn, uint32_t conf) +{ + struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb); + bool ret; + + if (bdcb->version >= 0x20) + ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry); + else + ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry); + if (!ret) + return ret; + + read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table, + entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]); + + return true; +} + +static +void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) +{ + /* + * DCB v2.0 lists each output combination separately. + * Here we merge compatible entries to have fewer outputs, with + * more options + */ + + int i, newentries = 0; + + for (i = 0; i < dcb->entries; i++) { + struct dcb_entry *ient = &dcb->entry[i]; + int j; + + for (j = i + 1; j < dcb->entries; j++) { + struct dcb_entry *jent = &dcb->entry[j]; + + if (jent->type == 100) /* already merged entry */ + continue; + + /* merge heads field when all other fields the same */ + if (jent->i2c_index == ient->i2c_index && + jent->type == ient->type && + jent->location == ient->location && + jent->or == ient->or) { + NV_TRACE(dev, "Merging DCB entries %d and %d\n", + i, j); + ient->heads |= jent->heads; + jent->type = 100; /* dummy value */ + } + } + } + + /* Compact entries merged into others out of dcb */ + for (i = 0; i < dcb->entries; i++) { + if (dcb->entry[i].type == 100) + continue; + + if (newentries != i) { + dcb->entry[newentries] = dcb->entry[i]; + dcb->entry[newentries].index = newentries; + } + newentries++; + } + + dcb->entries = newentries; +} + +static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) +{ + struct bios_parsed_dcb *bdcb = &bios->bdcb; + struct parsed_dcb *dcb; + uint16_t dcbptr, i2ctabptr = 0; + uint8_t *dcbtable; + uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; + bool configblock = true; + int recordlength = 8, confofs = 4; + int i; + + dcb = bios->pub.dcb = &bdcb->dcb; + dcb->entries = 0; + + /* get the offset from 0x36 */ + dcbptr = ROM16(bios->data[0x36]); + + if (dcbptr == 0x0) { + NV_WARN(dev, "No output data (DCB) found in BIOS, " + "assuming a CRT output exists\n"); + /* this situation likely means a really old card, pre DCB */ + fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); + + if (nv04_tv_identify(dev, + bios->legacy.i2c_indices.tv) >= 0) + fabricate_tv_output(dcb, twoHeads); + + return 0; + } + + dcbtable = &bios->data[dcbptr]; + + /* get DCB version */ + bdcb->version = dcbtable[0]; + NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n", + bdcb->version >> 4, bdcb->version & 0xf); + + if (bdcb->version >= 0x20) { /* NV17+ */ + uint32_t sig; + + if (bdcb->version >= 0x30) { /* NV40+ */ + headerlen = dcbtable[1]; + entries = dcbtable[2]; + recordlength = dcbtable[3]; + i2ctabptr = ROM16(dcbtable[4]); + sig = ROM32(dcbtable[6]); + bdcb->gpio_table_ptr = ROM16(dcbtable[10]); + bdcb->connector_table_ptr = ROM16(dcbtable[20]); + } else { + i2ctabptr = ROM16(dcbtable[2]); + sig = ROM32(dcbtable[4]); + headerlen = 8; + } + + if (sig != 0x4edcbdcb) { + NV_ERROR(dev, "Bad Display Configuration Block " + "signature (%08X)\n", sig); + return -EINVAL; + } + } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */ + char sig[8] = { 0 }; + + strncpy(sig, (char *)&dcbtable[-7], 7); + i2ctabptr = ROM16(dcbtable[2]); + recordlength = 10; + confofs = 6; + + if (strcmp(sig, "DEV_REC")) { + NV_ERROR(dev, "Bad Display Configuration Block " + "signature (%s)\n", sig); + return -EINVAL; + } + } else { + /* + * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always + * has the same single (crt) entry, even when tv-out present, so + * the conclusion is this version cannot really be used. + * v1.2 tables (some NV6/10, and NV15+) normally have the same + * 5 entries, which are not specific to the card and so no use. + * v1.2 does have an I2C table that read_dcb_i2c_table can + * handle, but cards exist (nv11 in #14821) with a bad i2c table + * pointer, so use the indices parsed in parse_bmp_structure. + * v1.1 (NV5+, maybe some NV4) is entirely unhelpful + */ + NV_TRACEWARN(dev, "No useful information in BIOS output table; " + "adding all possible outputs\n"); + fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); + + /* + * Attempt to detect TV before DVI because the test + * for the former is more accurate and it rules the + * latter out. + */ + if (nv04_tv_identify(dev, + bios->legacy.i2c_indices.tv) >= 0) + fabricate_tv_output(dcb, twoHeads); + + else if (bios->tmds.output0_script_ptr || + bios->tmds.output1_script_ptr) + fabricate_dvi_i_output(dcb, twoHeads); + + return 0; + } + + if (!i2ctabptr) + NV_WARN(dev, "No pointer to DCB I2C port table\n"); + else { + bdcb->i2c_table = &bios->data[i2ctabptr]; + if (bdcb->version >= 0x30) + bdcb->i2c_default_indices = bdcb->i2c_table[4]; + } + + parse_dcb_gpio_table(bios); + parse_dcb_connector_table(bios); + + if (entries > DCB_MAX_NUM_ENTRIES) + entries = DCB_MAX_NUM_ENTRIES; + + for (i = 0; i < entries; i++) { + uint32_t connection, config = 0; + + connection = ROM32(dcbtable[headerlen + recordlength * i]); + if (configblock) + config = ROM32(dcbtable[headerlen + confofs + recordlength * i]); + + /* seen on an NV11 with DCB v1.5 */ + if (connection == 0x00000000) + break; + + /* seen on an NV17 with DCB v2.0 */ + if (connection == 0xffffffff) + break; + + if ((connection & 0x0000000f) == 0x0000000f) + continue; + + NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", + dcb->entries, connection, config); + + if (!parse_dcb_entry(dev, bdcb, connection, config)) + break; + } + + /* + * apart for v2.1+ not being known for requiring merging, this + * guarantees dcbent->index is the index of the entry in the rom image + */ + if (bdcb->version < 0x21) + merge_like_dcb_entries(dev, dcb); + + return dcb->entries ? 0 : -ENXIO; +} + +static void +fixup_legacy_connector(struct nvbios *bios) +{ + struct bios_parsed_dcb *bdcb = &bios->bdcb; + struct parsed_dcb *dcb = &bdcb->dcb; + int high = 0, i; + + /* + * DCB 3.0 also has the table in most cases, but there are some cards + * where the table is filled with stub entries, and the DCB entriy + * indices are all 0. We don't need the connector indices on pre-G80 + * chips (yet?) so limit the use to DCB 4.0 and above. + */ + if (bdcb->version >= 0x40) + return; + + /* + * No known connector info before v3.0, so make it up. the rule here + * is: anything on the same i2c bus is considered to be on the same + * connector. any output without an associated i2c bus is assigned + * its own unique connector index. + */ + for (i = 0; i < dcb->entries; i++) { + if (dcb->entry[i].i2c_index == 0xf) + continue; + + /* + * Ignore the I2C index for on-chip TV-out, as there + * are cards with bogus values (nv31m in bug 23212), + * and it's otherwise useless. + */ + if (dcb->entry[i].type == OUTPUT_TV && + dcb->entry[i].location == DCB_LOC_ON_CHIP) { + dcb->entry[i].i2c_index = 0xf; + continue; + } + + dcb->entry[i].connector = dcb->entry[i].i2c_index; + if (dcb->entry[i].connector > high) + high = dcb->entry[i].connector; + } + + for (i = 0; i < dcb->entries; i++) { + if (dcb->entry[i].i2c_index != 0xf) + continue; + + dcb->entry[i].connector = ++high; + } +} + +static void +fixup_legacy_i2c(struct nvbios *bios) +{ + struct parsed_dcb *dcb = &bios->bdcb.dcb; + int i; + + for (i = 0; i < dcb->entries; i++) { + if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT) + dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt; + if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL) + dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel; + if (dcb->entry[i].i2c_index == LEGACY_I2C_TV) + dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv; + } +} + +static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry) +{ + /* + * The header following the "HWSQ" signature has the number of entries, + * and the entry size + * + * An entry consists of a dword to write to the sequencer control reg + * (0x00001304), followed by the ucode bytes, written sequentially, + * starting at reg 0x00001400 + */ + + uint8_t bytes_to_write; + uint16_t hwsq_entry_offset; + int i; + + if (bios->data[hwsq_offset] <= entry) { + NV_ERROR(dev, "Too few entries in HW sequencer table for " + "requested entry\n"); + return -ENOENT; + } + + bytes_to_write = bios->data[hwsq_offset + 1]; + + if (bytes_to_write != 36) { + NV_ERROR(dev, "Unknown HW sequencer entry size\n"); + return -EINVAL; + } + + NV_TRACE(dev, "Loading NV17 power sequencing microcode\n"); + + hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; + + /* set sequencer control */ + bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); + bytes_to_write -= 4; + + /* write ucode */ + for (i = 0; i < bytes_to_write; i += 4) + bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); + + /* twiddle NV_PBUS_DEBUG_4 */ + bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18); + + return 0; +} + +static int load_nv17_hw_sequencer_ucode(struct drm_device *dev, + struct nvbios *bios) +{ + /* + * BMP based cards, from NV17, need a microcode loading to correctly + * control the GPIO etc for LVDS panels + * + * BIT based cards seem to do this directly in the init scripts + * + * The microcode entries are found by the "HWSQ" signature. + */ + + const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' }; + const int sz = sizeof(hwsq_signature); + int hwsq_offset; + + hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz); + if (!hwsq_offset) + return 0; + + /* always use entry 0? */ + return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0); +} + +uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + const uint8_t edid_sig[] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + uint16_t offset = 0; + uint16_t newoffset; + int searchlen = NV_PROM_SIZE; + + if (bios->fp.edid) + return bios->fp.edid; + + while (searchlen) { + newoffset = findstr(&bios->data[offset], searchlen, + edid_sig, 8); + if (!newoffset) + return NULL; + offset += newoffset; + if (!nv_cksum(&bios->data[offset], EDID1_LEN)) + break; + + searchlen -= offset; + offset++; + } + + NV_TRACE(dev, "Found EDID in BIOS\n"); + + return bios->fp.edid = &bios->data[offset]; +} + +void +nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, + struct dcb_entry *dcbent) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + struct init_exec iexec = { true, false }; + + bios->display.output = dcbent; + parse_init_table(bios, table, &iexec); + bios->display.output = NULL; +} + +static bool NVInitVBIOS(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + + memset(bios, 0, sizeof(struct nvbios)); + bios->dev = dev; + + if (!NVShadowVBIOS(dev, bios->data)) + return false; + + bios->length = NV_PROM_SIZE; + return true; +} + +static int nouveau_parse_vbios_struct(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; + const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; + int offset; + + offset = findstr(bios->data, bios->length, + bit_signature, sizeof(bit_signature)); + if (offset) { + NV_TRACE(dev, "BIT BIOS found\n"); + return parse_bit_structure(bios, offset + 6); + } + + offset = findstr(bios->data, bios->length, + bmp_signature, sizeof(bmp_signature)); + if (offset) { + NV_TRACE(dev, "BMP BIOS found\n"); + return parse_bmp_structure(dev, bios, offset); + } + + NV_ERROR(dev, "No known BIOS signature found\n"); + return -ENODEV; +} + +int +nouveau_run_vbios_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + int i, ret = 0; + + NVLockVgaCrtcs(dev, false); + if (nv_two_heads(dev)) + NVSetOwner(dev, bios->state.crtchead); + + if (bios->major_version < 5) /* BMP only */ + load_nv17_hw_sequencer_ucode(dev, bios); + + if (bios->execute) { + bios->fp.last_script_invoc = 0; + bios->fp.lvds_init_run = false; + } + + parse_init_tables(bios); + + /* + * Runs some additional script seen on G8x VBIOSen. The VBIOS' + * parser will run this right after the init tables, the binary + * driver appears to run it at some point later. + */ + if (bios->some_script_ptr) { + struct init_exec iexec = {true, false}; + + NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n", + bios->some_script_ptr); + parse_init_table(bios, bios->some_script_ptr, &iexec); + } + + if (dev_priv->card_type >= NV_50) { + for (i = 0; i < bios->bdcb.dcb.entries; i++) { + nouveau_bios_run_display_table(dev, + &bios->bdcb.dcb.entry[i], + 0, 0); + } + } + + NVLockVgaCrtcs(dev, true); + + return ret; +} + +static void +nouveau_bios_i2c_devices_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + struct dcb_i2c_entry *entry; + int i; + + entry = &bios->bdcb.dcb.i2c[0]; + for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++) + nouveau_i2c_fini(dev, entry); +} + +int +nouveau_bios_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + uint32_t saved_nv_pextdev_boot_0; + bool was_locked; + int ret; + + dev_priv->vbios = &bios->pub; + + if (!NVInitVBIOS(dev)) + return -ENODEV; + + ret = nouveau_parse_vbios_struct(dev); + if (ret) + return ret; + + ret = parse_dcb_table(dev, bios, nv_two_heads(dev)); + if (ret) + return ret; + + fixup_legacy_i2c(bios); + fixup_legacy_connector(bios); + + if (!bios->major_version) /* we don't run version 0 bios */ + return 0; + + /* these will need remembering across a suspend */ + saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0); + bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0); + + /* init script execution disabled */ + bios->execute = false; + + /* ... unless card isn't POSTed already */ + if (dev_priv->card_type >= NV_10 && + NVReadVgaCrtc(dev, 0, 0x00) == 0 && + NVReadVgaCrtc(dev, 0, 0x1a) == 0) { + NV_INFO(dev, "Adaptor not initialised\n"); + if (dev_priv->card_type < NV_50) { + NV_ERROR(dev, "Unable to POST this chipset\n"); + return -ENODEV; + } + + NV_INFO(dev, "Running VBIOS init tables\n"); + bios->execute = true; + } + + bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0); + + ret = nouveau_run_vbios_init(dev); + if (ret) { + dev_priv->vbios = NULL; + return ret; + } + + /* feature_byte on BMP is poor, but init always sets CR4B */ + was_locked = NVLockVgaCrtcs(dev, false); + if (bios->major_version < 5) + bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40; + + /* all BIT systems need p_f_m_t for digital_min_front_porch */ + if (bios->is_mobile || bios->major_version >= 5) + ret = parse_fp_mode_table(dev, bios); + NVLockVgaCrtcs(dev, was_locked); + + /* allow subsequent scripts to execute */ + bios->execute = true; + + return 0; +} + +void +nouveau_bios_takedown(struct drm_device *dev) +{ + nouveau_bios_i2c_devices_takedown(dev); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h new file mode 100644 index 000000000000..1d5f10bd78ed --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -0,0 +1,289 @@ +/* + * Copyright 2007-2008 Nouveau Project + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_BIOS_H__ +#define __NOUVEAU_BIOS_H__ + +#include "nvreg.h" +#include "nouveau_i2c.h" + +#define DCB_MAX_NUM_ENTRIES 16 +#define DCB_MAX_NUM_I2C_ENTRIES 16 +#define DCB_MAX_NUM_GPIO_ENTRIES 32 +#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16 + +#define DCB_LOC_ON_CHIP 0 + +struct dcb_entry { + int index; /* may not be raw dcb index if merging has happened */ + uint8_t type; + uint8_t i2c_index; + uint8_t heads; + uint8_t connector; + uint8_t bus; + uint8_t location; + uint8_t or; + bool duallink_possible; + union { + struct sor_conf { + int link; + } sorconf; + struct { + int maxfreq; + } crtconf; + struct { + struct sor_conf sor; + bool use_straps_for_mode; + bool use_power_scripts; + } lvdsconf; + struct { + bool has_component_output; + } tvconf; + struct { + struct sor_conf sor; + int link_nr; + int link_bw; + } dpconf; + struct { + struct sor_conf sor; + } tmdsconf; + }; + bool i2c_upper_default; +}; + +struct dcb_i2c_entry { + uint8_t port_type; + uint8_t read, write; + struct nouveau_i2c_chan *chan; +}; + +struct parsed_dcb { + int entries; + struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; + struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES]; +}; + +enum dcb_gpio_tag { + DCB_GPIO_TVDAC0 = 0xc, + DCB_GPIO_TVDAC1 = 0x2d, +}; + +struct dcb_gpio_entry { + enum dcb_gpio_tag tag; + int line; + bool invert; +}; + +struct parsed_dcb_gpio { + int entries; + struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES]; +}; + +struct dcb_connector_table_entry { + uint32_t entry; + uint8_t type; + uint8_t index; + uint8_t gpio_tag; +}; + +struct dcb_connector_table { + int entries; + struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES]; +}; + +struct bios_parsed_dcb { + uint8_t version; + + struct parsed_dcb dcb; + + uint8_t *i2c_table; + uint8_t i2c_default_indices; + + uint16_t gpio_table_ptr; + struct parsed_dcb_gpio gpio; + uint16_t connector_table_ptr; + struct dcb_connector_table connector; +}; + +enum nouveau_encoder_type { + OUTPUT_ANALOG = 0, + OUTPUT_TV = 1, + OUTPUT_TMDS = 2, + OUTPUT_LVDS = 3, + OUTPUT_DP = 6, + OUTPUT_ANY = -1 +}; + +enum nouveau_or { + OUTPUT_A = (1 << 0), + OUTPUT_B = (1 << 1), + OUTPUT_C = (1 << 2) +}; + +enum LVDS_script { + /* Order *does* matter here */ + LVDS_INIT = 1, + LVDS_RESET, + LVDS_BACKLIGHT_ON, + LVDS_BACKLIGHT_OFF, + LVDS_PANEL_ON, + LVDS_PANEL_OFF +}; + +/* changing these requires matching changes to reg tables in nv_get_clock */ +#define MAX_PLL_TYPES 4 +enum pll_types { + NVPLL, + MPLL, + VPLL1, + VPLL2 +}; + +struct pll_lims { + struct { + int minfreq; + int maxfreq; + int min_inputfreq; + int max_inputfreq; + + uint8_t min_m; + uint8_t max_m; + uint8_t min_n; + uint8_t max_n; + } vco1, vco2; + + uint8_t max_log2p; + /* + * for most pre nv50 cards setting a log2P of 7 (the common max_log2p + * value) is no different to 6 (at least for vplls) so allowing the MNP + * calc to use 7 causes the generated clock to be out by a factor of 2. + * however, max_log2p cannot be fixed-up during parsing as the + * unmodified max_log2p value is still needed for setting mplls, hence + * an additional max_usable_log2p member + */ + uint8_t max_usable_log2p; + uint8_t log2p_bias; + + uint8_t min_p; + uint8_t max_p; + + int refclk; +}; + +struct nouveau_bios_info { + struct parsed_dcb *dcb; + + uint8_t chip_version; + + uint32_t dactestval; + uint32_t tvdactestval; + uint8_t digital_min_front_porch; + bool fp_no_ddc; +}; + +struct nvbios { + struct drm_device *dev; + struct nouveau_bios_info pub; + + uint8_t data[NV_PROM_SIZE]; + unsigned int length; + bool execute; + + uint8_t major_version; + uint8_t feature_byte; + bool is_mobile; + + uint32_t fmaxvco, fminvco; + + bool old_style_init; + uint16_t init_script_tbls_ptr; + uint16_t extra_init_script_tbl_ptr; + uint16_t macro_index_tbl_ptr; + uint16_t macro_tbl_ptr; + uint16_t condition_tbl_ptr; + uint16_t io_condition_tbl_ptr; + uint16_t io_flag_condition_tbl_ptr; + uint16_t init_function_tbl_ptr; + + uint16_t pll_limit_tbl_ptr; + uint16_t ram_restrict_tbl_ptr; + + uint16_t some_script_ptr; /* BIT I + 14 */ + uint16_t init96_tbl_ptr; /* BIT I + 16 */ + + struct bios_parsed_dcb bdcb; + + struct { + int crtchead; + /* these need remembering across suspend */ + uint32_t saved_nv_pfb_cfg0; + } state; + + struct { + struct dcb_entry *output; + uint16_t script_table_ptr; + uint16_t dp_table_ptr; + } display; + + struct { + uint16_t fptablepointer; /* also used by tmds */ + uint16_t fpxlatetableptr; + int xlatwidth; + uint16_t lvdsmanufacturerpointer; + uint16_t fpxlatemanufacturertableptr; + uint16_t mode_ptr; + uint16_t xlated_entry; + bool power_off_for_reset; + bool reset_after_pclk_change; + bool dual_link; + bool link_c_increment; + bool BITbit1; + bool if_is_24bit; + int duallink_transition_clk; + uint8_t strapless_is_24bit; + uint8_t *edid; + + /* will need resetting after suspend */ + int last_script_invoc; + bool lvds_init_run; + } fp; + + struct { + uint16_t output0_script_ptr; + uint16_t output1_script_ptr; + } tmds; + + struct { + uint16_t mem_init_tbl_ptr; + uint16_t sdr_seq_tbl_ptr; + uint16_t ddr_seq_tbl_ptr; + + struct { + uint8_t crt, tv, panel; + } i2c_indices; + + uint16_t lvds_single_a_script_ptr; + } legacy; +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c new file mode 100644 index 000000000000..320a14bceb99 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -0,0 +1,671 @@ +/* + * Copyright 2007 Dave Airlied + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/* + * Authors: Dave Airlied + * Ben Skeggs + * Jeremy Kolb + */ + +#include "drmP.h" + +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +static void +nouveau_bo_del_ttm(struct ttm_buffer_object *bo) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + ttm_bo_kunmap(&nvbo->kmap); + + if (unlikely(nvbo->gem)) + DRM_ERROR("bo %p still attached to GEM object\n", bo); + + spin_lock(&dev_priv->ttm.bo_list_lock); + list_del(&nvbo->head); + spin_unlock(&dev_priv->ttm.bo_list_lock); + kfree(nvbo); +} + +int +nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, + int size, int align, uint32_t flags, uint32_t tile_mode, + uint32_t tile_flags, bool no_vm, bool mappable, + struct nouveau_bo **pnvbo) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_bo *nvbo; + int ret, n = 0; + + nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); + if (!nvbo) + return -ENOMEM; + INIT_LIST_HEAD(&nvbo->head); + INIT_LIST_HEAD(&nvbo->entry); + nvbo->mappable = mappable; + nvbo->no_vm = no_vm; + nvbo->tile_mode = tile_mode; + nvbo->tile_flags = tile_flags; + + /* + * Some of the tile_flags have a periodic structure of N*4096 bytes, + * align to to that as well as the page size. Overallocate memory to + * avoid corruption of other buffer objects. + */ + switch (tile_flags) { + case 0x1800: + case 0x2800: + case 0x4800: + case 0x7a00: + if (dev_priv->chipset >= 0xA0) { + /* This is based on high end cards with 448 bits + * memory bus, could be different elsewhere.*/ + size += 6 * 28672; + /* 8 * 28672 is the actual alignment requirement, + * but we must also align to page size. */ + align = 2 * 8 * 28672; + } else if (dev_priv->chipset >= 0x90) { + size += 3 * 16384; + align = 12 * 16834; + } else { + size += 3 * 8192; + /* 12 * 8192 is the actual alignment requirement, + * but we must also align to page size. */ + align = 2 * 12 * 8192; + } + break; + default: + break; + } + + align >>= PAGE_SHIFT; + + size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + if (dev_priv->card_type == NV_50) { + size = (size + 65535) & ~65535; + if (align < (65536 / PAGE_SIZE)) + align = (65536 / PAGE_SIZE); + } + + if (flags & TTM_PL_FLAG_VRAM) + nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; + if (flags & TTM_PL_FLAG_TT) + nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + nvbo->placement.fpfn = 0; + nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; + nvbo->placement.placement = nvbo->placements; + nvbo->placement.busy_placement = nvbo->placements; + nvbo->placement.num_placement = n; + nvbo->placement.num_busy_placement = n; + + nvbo->channel = chan; + nouveau_bo_placement_set(nvbo, flags); + ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, + ttm_bo_type_device, &nvbo->placement, align, 0, + false, NULL, size, nouveau_bo_del_ttm); + nvbo->channel = NULL; + if (ret) { + /* ttm will call nouveau_bo_del_ttm if it fails.. */ + return ret; + } + + spin_lock(&dev_priv->ttm.bo_list_lock); + list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); + spin_unlock(&dev_priv->ttm.bo_list_lock); + *pnvbo = nvbo; + return 0; +} + +void +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) +{ + int n = 0; + + if (memtype & TTM_PL_FLAG_VRAM) + nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; + if (memtype & TTM_PL_FLAG_TT) + nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + if (memtype & TTM_PL_FLAG_SYSTEM) + nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; + nvbo->placement.placement = nvbo->placements; + nvbo->placement.busy_placement = nvbo->placements; + nvbo->placement.num_placement = n; + nvbo->placement.num_busy_placement = n; +} + +int +nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct ttm_buffer_object *bo = &nvbo->bo; + int ret, i; + + if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { + NV_ERROR(nouveau_bdev(bo->bdev)->dev, + "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, + 1 << bo->mem.mem_type, memtype); + return -EINVAL; + } + + if (nvbo->pin_refcnt++) + return 0; + + ret = ttm_bo_reserve(bo, false, false, false, 0); + if (ret) + goto out; + + nouveau_bo_placement_set(nvbo, memtype); + for (i = 0; i < nvbo->placement.num_placement; i++) + nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + if (ret == 0) { + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + dev_priv->fb_aper_free -= bo->mem.size; + break; + case TTM_PL_TT: + dev_priv->gart_info.aper_free -= bo->mem.size; + break; + default: + break; + } + } + ttm_bo_unreserve(bo); +out: + if (unlikely(ret)) + nvbo->pin_refcnt--; + return ret; +} + +int +nouveau_bo_unpin(struct nouveau_bo *nvbo) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct ttm_buffer_object *bo = &nvbo->bo; + int ret, i; + + if (--nvbo->pin_refcnt) + return 0; + + ret = ttm_bo_reserve(bo, false, false, false, 0); + if (ret) + return ret; + + for (i = 0; i < nvbo->placement.num_placement; i++) + nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + if (ret == 0) { + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + dev_priv->fb_aper_free += bo->mem.size; + break; + case TTM_PL_TT: + dev_priv->gart_info.aper_free += bo->mem.size; + break; + default: + break; + } + } + + ttm_bo_unreserve(bo); + return ret; +} + +int +nouveau_bo_map(struct nouveau_bo *nvbo) +{ + int ret; + + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return ret; + + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); + ttm_bo_unreserve(&nvbo->bo); + return ret; +} + +void +nouveau_bo_unmap(struct nouveau_bo *nvbo) +{ + ttm_bo_kunmap(&nvbo->kmap); +} + +u16 +nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) +{ + bool is_iomem; + u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + return ioread16_native((void __force __iomem *)mem); + else + return *mem; +} + +void +nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) +{ + bool is_iomem; + u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + iowrite16_native(val, (void __force __iomem *)mem); + else + *mem = val; +} + +u32 +nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) +{ + bool is_iomem; + u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + return ioread32_native((void __force __iomem *)mem); + else + return *mem; +} + +void +nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) +{ + bool is_iomem; + u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + iowrite32_native(val, (void __force __iomem *)mem); + else + *mem = val; +} + +static struct ttm_backend * +nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct drm_device *dev = dev_priv->dev; + + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + return ttm_agp_backend_init(bdev, dev->agp->bridge); + case NOUVEAU_GART_SGDMA: + return nouveau_sgdma_init_ttm(dev); + default: + NV_ERROR(dev, "Unknown GART type %d\n", + dev_priv->gart_info.type); + break; + } + + return NULL; +} + +static int +nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) +{ + /* We'll do this from user space. */ + return 0; +} + +static int +nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct drm_device *dev = dev_priv->dev; + + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + + man->io_addr = NULL; + man->io_offset = drm_get_resource_start(dev, 1); + man->io_size = drm_get_resource_len(dev, 1); + if (man->io_size > nouveau_mem_fb_amount(dev)) + man->io_size = nouveau_mem_fb_amount(dev); + + man->gpu_offset = dev_priv->vm_vram_base; + break; + case TTM_PL_TT: + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; + man->available_caching = TTM_PL_FLAG_UNCACHED; + man->default_caching = TTM_PL_FLAG_UNCACHED; + break; + case NOUVEAU_GART_SGDMA: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + default: + NV_ERROR(dev, "Unknown GART type: %d\n", + dev_priv->gart_info.type); + return -EINVAL; + } + + man->io_offset = dev_priv->gart_info.aper_base; + man->io_size = dev_priv->gart_info.aper_size; + man->io_addr = NULL; + man->gpu_offset = dev_priv->vm_gart_base; + break; + default: + NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +static void +nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) +{ + struct nouveau_bo *nvbo = nouveau_bo(bo); + + switch (bo->mem.mem_type) { + default: + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); + break; + } +} + + +/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access + * TTM_PL_{VRAM,TT} directly. + */ +static int +nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, + struct nouveau_bo *nvbo, bool evict, bool no_wait, + struct ttm_mem_reg *new_mem) +{ + struct nouveau_fence *fence = NULL; + int ret; + + ret = nouveau_fence_new(chan, &fence, true); + if (ret) + return ret; + + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, + evict, no_wait, new_mem); + nouveau_fence_unref((void *)&fence); + return ret; +} + +static inline uint32_t +nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, + struct ttm_mem_reg *mem) +{ + if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) { + if (mem->mem_type == TTM_PL_TT) + return NvDmaGART; + return NvDmaVRAM; + } + + if (mem->mem_type == TTM_PL_TT) + return chan->gart_handle; + return chan->vram_handle; +} + +static int +nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_channel *chan; + uint64_t src_offset, dst_offset; + uint32_t page_count; + int ret; + + chan = nvbo->channel; + if (!chan || nvbo->tile_flags || nvbo->no_vm) { + chan = dev_priv->channel; + if (!chan) + return -EINVAL; + } + + src_offset = old_mem->mm_node->start << PAGE_SHIFT; + dst_offset = new_mem->mm_node->start << PAGE_SHIFT; + if (chan != dev_priv->channel) { + if (old_mem->mem_type == TTM_PL_TT) + src_offset += dev_priv->vm_gart_base; + else + src_offset += dev_priv->vm_vram_base; + + if (new_mem->mem_type == TTM_PL_TT) + dst_offset += dev_priv->vm_gart_base; + else + dst_offset += dev_priv->vm_vram_base; + } + + ret = RING_SPACE(chan, 3); + if (ret) + return ret; + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); + OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem)); + OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem)); + + if (dev_priv->card_type >= NV_50) { + ret = RING_SPACE(chan, 4); + if (ret) + return ret; + BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); + OUT_RING(chan, 1); + } + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + if (dev_priv->card_type >= NV_50) { + ret = RING_SPACE(chan, 3); + if (ret) + return ret; + BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); + OUT_RING(chan, upper_32_bits(src_offset)); + OUT_RING(chan, upper_32_bits(dst_offset)); + } + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + BEGIN_RING(chan, NvSubM2MF, + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + OUT_RING(chan, lower_32_bits(src_offset)); + OUT_RING(chan, lower_32_bits(dst_offset)); + OUT_RING(chan, PAGE_SIZE); /* src_pitch */ + OUT_RING(chan, PAGE_SIZE); /* dst_pitch */ + OUT_RING(chan, PAGE_SIZE); /* line_length */ + OUT_RING(chan, line_count); + OUT_RING(chan, (1<<8)|(1<<0)); + OUT_RING(chan, 0); + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING(chan, 0); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); + } + + return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); +} + +static int +nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, + bool no_wait, struct ttm_mem_reg *new_mem) +{ + u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_placement placement; + struct ttm_mem_reg tmp_mem; + int ret; + + placement.fpfn = placement.lpfn = 0; + placement.num_placement = placement.num_busy_placement = 1; + placement.placement = &placement_memtype; + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); + if (ret) + return ret; + + ret = ttm_tt_bind(bo->ttm, &tmp_mem); + if (ret) + goto out; + + ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem); + if (ret) + goto out; + + ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); +out: + if (tmp_mem.mm_node) { + spin_lock(&bo->bdev->glob->lru_lock); + drm_mm_put_block(tmp_mem.mm_node); + spin_unlock(&bo->bdev->glob->lru_lock); + } + + return ret; +} + +static int +nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, + bool no_wait, struct ttm_mem_reg *new_mem) +{ + u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_placement placement; + struct ttm_mem_reg tmp_mem; + int ret; + + placement.fpfn = placement.lpfn = 0; + placement.num_placement = placement.num_busy_placement = 1; + placement.placement = &placement_memtype; + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); + if (ret) + return ret; + + ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); + if (ret) + goto out; + + ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem); + if (ret) + goto out; + +out: + if (tmp_mem.mm_node) { + spin_lock(&bo->bdev->glob->lru_lock); + drm_mm_put_block(tmp_mem.mm_node); + spin_unlock(&bo->bdev->glob->lru_lock); + } + + return ret; +} + +static int +nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, + bool no_wait, struct ttm_mem_reg *new_mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct drm_device *dev = dev_priv->dev; + struct ttm_mem_reg *old_mem = &bo->mem; + int ret; + + if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM && + !nvbo->no_vm) { + uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT; + + ret = nv50_mem_vm_bind_linear(dev, + offset + dev_priv->vm_vram_base, + new_mem->size, nvbo->tile_flags, + offset); + if (ret) + return ret; + } + + if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE) + return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + + if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { + BUG_ON(bo->mem.mm_node != NULL); + bo->mem = *new_mem; + new_mem->mm_node = NULL; + return 0; + } + + if (new_mem->mem_type == TTM_PL_SYSTEM) { + if (old_mem->mem_type == TTM_PL_SYSTEM) + return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem)) + return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else if (old_mem->mem_type == TTM_PL_SYSTEM) { + if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem)) + return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else { + if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem)) + return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + + return 0; +} + +static int +nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) +{ + return 0; +} + +struct ttm_bo_driver nouveau_bo_driver = { + .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .invalidate_caches = nouveau_bo_invalidate_caches, + .init_mem_type = nouveau_bo_init_mem_type, + .evict_flags = nouveau_bo_evict_flags, + .move = nouveau_bo_move, + .verify_access = nouveau_bo_verify_access, + .sync_obj_signaled = nouveau_fence_signalled, + .sync_obj_wait = nouveau_fence_wait, + .sync_obj_flush = nouveau_fence_flush, + .sync_obj_unref = nouveau_fence_unref, + .sync_obj_ref = nouveau_fence_ref, +}; + diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c new file mode 100644 index 000000000000..ee2b84504d05 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_calc.c @@ -0,0 +1,478 @@ +/* + * Copyright 1993-2003 NVIDIA, Corporation + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" + +/****************************************************************************\ +* * +* The video arbitration routines calculate some "magic" numbers. Fixes * +* the snow seen when accessing the framebuffer without it. * +* It just works (I hope). * +* * +\****************************************************************************/ + +struct nv_fifo_info { + int lwm; + int burst; +}; + +struct nv_sim_state { + int pclk_khz; + int mclk_khz; + int nvclk_khz; + int bpp; + int mem_page_miss; + int mem_latency; + int memory_type; + int memory_width; + int two_heads; +}; + +static void +nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) +{ + int pagemiss, cas, width, bpp; + int nvclks, mclks, pclks, crtpagemiss; + int found, mclk_extra, mclk_loop, cbs, m1, p1; + int mclk_freq, pclk_freq, nvclk_freq; + int us_m, us_n, us_p, crtc_drain_rate; + int cpm_us, us_crt, clwm; + + pclk_freq = arb->pclk_khz; + mclk_freq = arb->mclk_khz; + nvclk_freq = arb->nvclk_khz; + pagemiss = arb->mem_page_miss; + cas = arb->mem_latency; + width = arb->memory_width >> 6; + bpp = arb->bpp; + cbs = 128; + + pclks = 2; + nvclks = 10; + mclks = 13 + cas; + mclk_extra = 3; + found = 0; + + while (!found) { + found = 1; + + mclk_loop = mclks + mclk_extra; + us_m = mclk_loop * 1000 * 1000 / mclk_freq; + us_n = nvclks * 1000 * 1000 / nvclk_freq; + us_p = nvclks * 1000 * 1000 / pclk_freq; + + crtc_drain_rate = pclk_freq * bpp / 8; + crtpagemiss = 2; + crtpagemiss += 1; + cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq; + us_crt = cpm_us + us_m + us_n + us_p; + clwm = us_crt * crtc_drain_rate / (1000 * 1000); + clwm++; + + m1 = clwm + cbs - 512; + p1 = m1 * pclk_freq / mclk_freq; + p1 = p1 * bpp / 8; + if ((p1 < m1 && m1 > 0) || clwm > 519) { + found = !mclk_extra; + mclk_extra--; + } + if (clwm < 384) + clwm = 384; + + fifo->lwm = clwm; + fifo->burst = cbs; + } +} + +static void +nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) +{ + int fill_rate, drain_rate; + int pclks, nvclks, mclks, xclks; + int pclk_freq, nvclk_freq, mclk_freq; + int fill_lat, extra_lat; + int max_burst_o, max_burst_l; + int fifo_len, min_lwm, max_lwm; + const int burst_lat = 80; /* Maximum allowable latency due + * to the CRTC FIFO burst. (ns) */ + + pclk_freq = arb->pclk_khz; + nvclk_freq = arb->nvclk_khz; + mclk_freq = arb->mclk_khz; + + fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */ + drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */ + + fifo_len = arb->two_heads ? 1536 : 1024; /* B */ + + /* Fixed FIFO refill latency. */ + + pclks = 4; /* lwm detect. */ + + nvclks = 3 /* lwm -> sync. */ + + 2 /* fbi bus cycles (1 req + 1 busy) */ + + 1 /* 2 edge sync. may be very close to edge so + * just put one. */ + + 1 /* fbi_d_rdv_n */ + + 1 /* Fbi_d_rdata */ + + 1; /* crtfifo load */ + + mclks = 1 /* 2 edge sync. may be very close to edge so + * just put one. */ + + 1 /* arb_hp_req */ + + 5 /* tiling pipeline */ + + 2 /* latency fifo */ + + 2 /* memory request to fbio block */ + + 7; /* data returned from fbio block */ + + /* Need to accumulate 256 bits for read */ + mclks += (arb->memory_type == 0 ? 2 : 1) + * arb->memory_width / 32; + + fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */ + + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */ + + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */ + + /* Conditional FIFO refill latency. */ + + xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to + * the overlay. */ + + 2 * arb->mem_page_miss /* Extra pagemiss latency. */ + + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */ + + extra_lat = xclks * 1000 * 1000 / mclk_freq; + + if (arb->two_heads) + /* Account for another CRTC. */ + extra_lat += fill_lat + extra_lat + burst_lat; + + /* FIFO burst */ + + /* Max burst not leading to overflows. */ + max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000)) + * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000); + fifo->burst = min(max_burst_o, 1024); + + /* Max burst value with an acceptable latency. */ + max_burst_l = burst_lat * fill_rate / (1000 * 1000); + fifo->burst = min(max_burst_l, fifo->burst); + + fifo->burst = rounddown_pow_of_two(fifo->burst); + + /* FIFO low watermark */ + + min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1; + max_lwm = fifo_len - fifo->burst + + fill_lat * drain_rate / (1000 * 1000) + + fifo->burst * drain_rate / fill_rate; + + fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */ +} + +static void +nv04_update_arb(struct drm_device *dev, int VClk, int bpp, + int *burst, int *lwm) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv_fifo_info fifo_data; + struct nv_sim_state sim_data; + int MClk = nouveau_hw_get_clock(dev, MPLL); + int NVClk = nouveau_hw_get_clock(dev, NVPLL); + uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1); + + sim_data.pclk_khz = VClk; + sim_data.mclk_khz = MClk; + sim_data.nvclk_khz = NVClk; + sim_data.bpp = bpp; + sim_data.two_heads = nv_two_heads(dev); + if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || + (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { + uint32_t type; + + pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); + + sim_data.memory_type = (type >> 12) & 1; + sim_data.memory_width = 64; + sim_data.mem_latency = 3; + sim_data.mem_page_miss = 10; + } else { + sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1; + sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; + sim_data.mem_latency = cfg1 & 0xf; + sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); + } + + if (dev_priv->card_type == NV_04) + nv04_calc_arb(&fifo_data, &sim_data); + else + nv10_calc_arb(&fifo_data, &sim_data); + + *burst = ilog2(fifo_data.burst >> 4); + *lwm = fifo_data.lwm >> 3; +} + +static void +nv30_update_arb(int *burst, int *lwm) +{ + unsigned int fifo_size, burst_size, graphics_lwm; + + fifo_size = 2048; + burst_size = 512; + graphics_lwm = fifo_size - burst_size; + + *burst = ilog2(burst_size >> 5); + *lwm = graphics_lwm >> 3; +} + +void +nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->card_type < NV_30) + nv04_update_arb(dev, vclk, bpp, burst, lwm); + else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || + (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { + *burst = 128; + *lwm = 0x0480; + } else + nv30_update_arb(burst, lwm); +} + +static int +getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk, + struct nouveau_pll_vals *bestpv) +{ + /* Find M, N and P for a single stage PLL + * + * Note that some bioses (NV3x) have lookup tables of precomputed MNP + * values, but we're too lazy to use those atm + * + * "clk" parameter in kHz + * returns calculated clock + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int cv = dev_priv->vbios->chip_version; + int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq; + int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m; + int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n; + int minU = pll_lim->vco1.min_inputfreq; + int maxU = pll_lim->vco1.max_inputfreq; + int minP = pll_lim->max_p ? pll_lim->min_p : 0; + int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p; + int crystal = pll_lim->refclk; + int M, N, thisP, P; + int clkP, calcclk; + int delta, bestdelta = INT_MAX; + int bestclk = 0; + + /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */ + /* possibly correlated with introduction of 27MHz crystal */ + if (dev_priv->card_type < NV_50) { + if (cv < 0x17 || cv == 0x1a || cv == 0x20) { + if (clk > 250000) + maxM = 6; + if (clk > 340000) + maxM = 2; + } else if (cv < 0x40) { + if (clk > 150000) + maxM = 6; + if (clk > 200000) + maxM = 4; + if (clk > 340000) + maxM = 2; + } + } + + P = pll_lim->max_p ? maxP : (1 << maxP); + if ((clk * P) < minvco) { + minvco = clk * maxP; + maxvco = minvco * 2; + } + + if (clk + clk/200 > maxvco) /* +0.5% */ + maxvco = clk + clk/200; + + /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */ + for (thisP = minP; thisP <= maxP; thisP++) { + P = pll_lim->max_p ? thisP : (1 << thisP); + clkP = clk * P; + + if (clkP < minvco) + continue; + if (clkP > maxvco) + return bestclk; + + for (M = minM; M <= maxM; M++) { + if (crystal/M < minU) + return bestclk; + if (crystal/M > maxU) + continue; + + /* add crystal/2 to round better */ + N = (clkP * M + crystal/2) / crystal; + + if (N < minN) + continue; + if (N > maxN) + break; + + /* more rounding additions */ + calcclk = ((N * crystal + P/2) / P + M/2) / M; + delta = abs(calcclk - clk); + /* we do an exhaustive search rather than terminating + * on an optimality condition... + */ + if (delta < bestdelta) { + bestdelta = delta; + bestclk = calcclk; + bestpv->N1 = N; + bestpv->M1 = M; + bestpv->log2P = thisP; + if (delta == 0) /* except this one */ + return bestclk; + } + } + } + + return bestclk; +} + +static int +getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk, + struct nouveau_pll_vals *bestpv) +{ + /* Find M, N and P for a two stage PLL + * + * Note that some bioses (NV30+) have lookup tables of precomputed MNP + * values, but we're too lazy to use those atm + * + * "clk" parameter in kHz + * returns calculated clock + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chip_version = dev_priv->vbios->chip_version; + int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq; + int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq; + int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq; + int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq; + int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m; + int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n; + int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m; + int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n; + int maxlog2P = pll_lim->max_usable_log2p; + int crystal = pll_lim->refclk; + bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2); + int M1, N1, M2, N2, log2P; + int clkP, calcclk1, calcclk2, calcclkout; + int delta, bestdelta = INT_MAX; + int bestclk = 0; + + int vco2 = (maxvco2 - maxvco2/200) / 2; + for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++) + ; + clkP = clk << log2P; + + if (maxvco2 < clk + clk/200) /* +0.5% */ + maxvco2 = clk + clk/200; + + for (M1 = minM1; M1 <= maxM1; M1++) { + if (crystal/M1 < minU1) + return bestclk; + if (crystal/M1 > maxU1) + continue; + + for (N1 = minN1; N1 <= maxN1; N1++) { + calcclk1 = crystal * N1 / M1; + if (calcclk1 < minvco1) + continue; + if (calcclk1 > maxvco1) + break; + + for (M2 = minM2; M2 <= maxM2; M2++) { + if (calcclk1/M2 < minU2) + break; + if (calcclk1/M2 > maxU2) + continue; + + /* add calcclk1/2 to round better */ + N2 = (clkP * M2 + calcclk1/2) / calcclk1; + if (N2 < minN2) + continue; + if (N2 > maxN2) + break; + + if (!fixedgain2) { + if (chip_version < 0x60) + if (N2/M2 < 4 || N2/M2 > 10) + continue; + + calcclk2 = calcclk1 * N2 / M2; + if (calcclk2 < minvco2) + break; + if (calcclk2 > maxvco2) + continue; + } else + calcclk2 = calcclk1; + + calcclkout = calcclk2 >> log2P; + delta = abs(calcclkout - clk); + /* we do an exhaustive search rather than terminating + * on an optimality condition... + */ + if (delta < bestdelta) { + bestdelta = delta; + bestclk = calcclkout; + bestpv->N1 = N1; + bestpv->M1 = M1; + bestpv->N2 = N2; + bestpv->M2 = M2; + bestpv->log2P = log2P; + if (delta == 0) /* except this one */ + return bestclk; + } + } + } + } + + return bestclk; +} + +int +nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk, + struct nouveau_pll_vals *pv) +{ + int outclk; + + if (!pll_lim->vco2.maxfreq) + outclk = getMNP_single(dev, pll_lim, clk, pv); + else + outclk = getMNP_double(dev, pll_lim, clk, pv); + + if (!outclk) + NV_ERROR(dev, "Could not find a compatible set of PLL values\n"); + + return outclk; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c new file mode 100644 index 000000000000..9aaa972f8822 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -0,0 +1,468 @@ +/* + * Copyright 2005-2006 Stephane Marchesin + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" + +static int +nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_bo *pb = chan->pushbuf_bo; + struct nouveau_gpuobj *pushbuf = NULL; + uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT; + int ret; + + if (pb->bo.mem.mem_type == TTM_PL_TT) { + ret = nouveau_gpuobj_gart_dma_new(chan, 0, + dev_priv->gart_info.aper_size, + NV_DMA_ACCESS_RO, &pushbuf, + NULL); + chan->pushbuf_base = start; + } else + if (dev_priv->card_type != NV_04) { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, + dev_priv->fb_available_size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_VIDMEM, &pushbuf); + chan->pushbuf_base = start; + } else { + /* NV04 cmdbuf hack, from original ddx.. not sure of it's + * exact reason for existing :) PCI access to cmdbuf in + * VRAM. + */ + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + drm_get_resource_start(dev, 1), + dev_priv->fb_available_size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_PCI, &pushbuf); + chan->pushbuf_base = start; + } + + ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); + if (ret) { + NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret); + if (pushbuf != dev_priv->gart_info.sg_ctxdma) + nouveau_gpuobj_del(dev, &pushbuf); + return ret; + } + + return 0; +} + +static struct nouveau_bo * +nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) +{ + struct nouveau_bo *pushbuf = NULL; + int location, ret; + + if (nouveau_vram_pushbuf) + location = TTM_PL_FLAG_VRAM; + else + location = TTM_PL_FLAG_TT; + + ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false, + true, &pushbuf); + if (ret) { + NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); + return NULL; + } + + ret = nouveau_bo_pin(pushbuf, location); + if (ret) { + NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); + nouveau_bo_ref(NULL, &pushbuf); + return NULL; + } + + return pushbuf; +} + +/* allocates and initializes a fifo for user space consumption */ +int +nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, + struct drm_file *file_priv, + uint32_t vram_handle, uint32_t tt_handle) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_channel *chan; + int channel, user; + int ret; + + /* + * Alright, here is the full story + * Nvidia cards have multiple hw fifo contexts (praise them for that, + * no complicated crash-prone context switches) + * We allocate a new context for each app and let it write to it + * directly (woo, full userspace command submission !) + * When there are no more contexts, you lost + */ + for (channel = 0; channel < pfifo->channels; channel++) { + if (dev_priv->fifos[channel] == NULL) + break; + } + + /* no more fifos. you lost. */ + if (channel == pfifo->channels) + return -EINVAL; + + dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), + GFP_KERNEL); + if (!dev_priv->fifos[channel]) + return -ENOMEM; + dev_priv->fifo_alloc_count++; + chan = dev_priv->fifos[channel]; + INIT_LIST_HEAD(&chan->nvsw.vbl_wait); + INIT_LIST_HEAD(&chan->fence.pending); + chan->dev = dev; + chan->id = channel; + chan->file_priv = file_priv; + chan->vram_handle = vram_handle; + chan->gart_handle = tt_handle; + + NV_INFO(dev, "Allocating FIFO number %d\n", channel); + + /* Allocate DMA push buffer */ + chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); + if (!chan->pushbuf_bo) { + ret = -ENOMEM; + NV_ERROR(dev, "pushbuf %d\n", ret); + nouveau_channel_free(chan); + return ret; + } + + /* Locate channel's user control regs */ + if (dev_priv->card_type < NV_40) + user = NV03_USER(channel); + else + if (dev_priv->card_type < NV_50) + user = NV40_USER(channel); + else + user = NV50_USER(channel); + + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, + PAGE_SIZE); + if (!chan->user) { + NV_ERROR(dev, "ioremap of regs failed.\n"); + nouveau_channel_free(chan); + return -ENOMEM; + } + chan->user_put = 0x40; + chan->user_get = 0x44; + + /* Allocate space for per-channel fixed notifier memory */ + ret = nouveau_notifier_init_channel(chan); + if (ret) { + NV_ERROR(dev, "ntfy %d\n", ret); + nouveau_channel_free(chan); + return ret; + } + + /* Setup channel's default objects */ + ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); + if (ret) { + NV_ERROR(dev, "gpuobj %d\n", ret); + nouveau_channel_free(chan); + return ret; + } + + /* Create a dma object for the push buffer */ + ret = nouveau_channel_pushbuf_ctxdma_init(chan); + if (ret) { + NV_ERROR(dev, "pbctxdma %d\n", ret); + nouveau_channel_free(chan); + return ret; + } + + /* disable the fifo caches */ + pfifo->reassign(dev, false); + + /* Create a graphics context for new channel */ + ret = pgraph->create_context(chan); + if (ret) { + nouveau_channel_free(chan); + return ret; + } + + /* Construct inital RAMFC for new channel */ + ret = pfifo->create_context(chan); + if (ret) { + nouveau_channel_free(chan); + return ret; + } + + pfifo->reassign(dev, true); + + ret = nouveau_dma_init(chan); + if (!ret) + ret = nouveau_fence_init(chan); + if (ret) { + nouveau_channel_free(chan); + return ret; + } + + nouveau_debugfs_channel_init(chan); + + NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); + *chan_ret = chan; + return 0; +} + +int +nouveau_channel_idle(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + uint32_t caches; + int idle; + + if (!chan) { + NV_ERROR(dev, "no channel...\n"); + return 1; + } + + caches = nv_rd32(dev, NV03_PFIFO_CACHES); + nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1); + + if (engine->fifo.channel_id(dev) != chan->id) { + struct nouveau_gpuobj *ramfc = + chan->ramfc ? chan->ramfc->gpuobj : NULL; + + if (!ramfc) { + NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id); + return 1; + } + + engine->instmem.prepare_access(dev, false); + if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1)) + idle = 0; + else + idle = 1; + engine->instmem.finish_access(dev); + } else { + idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) == + nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); + } + + nv_wr32(dev, NV03_PFIFO_CACHES, caches); + return idle; +} + +/* stops a fifo */ +void +nouveau_channel_free(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + unsigned long flags; + int ret; + + NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); + + nouveau_debugfs_channel_fini(chan); + + /* Give outstanding push buffers a chance to complete */ + spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); + spin_unlock_irqrestore(&chan->fence.lock, flags); + if (chan->fence.sequence != chan->fence.sequence_ack) { + struct nouveau_fence *fence = NULL; + + ret = nouveau_fence_new(chan, &fence, true); + if (ret == 0) { + ret = nouveau_fence_wait(fence, NULL, false, false); + nouveau_fence_unref((void *)&fence); + } + + if (ret) + NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); + } + + /* Ensure all outstanding fences are signaled. They should be if the + * above attempts at idling were OK, but if we failed this'll tell TTM + * we're done with the buffers. + */ + nouveau_fence_fini(chan); + + /* Ensure the channel is no longer active on the GPU */ + pfifo->reassign(dev, false); + + if (pgraph->channel(dev) == chan) { + pgraph->fifo_access(dev, false); + pgraph->unload_context(dev); + pgraph->fifo_access(dev, true); + } + pgraph->destroy_context(chan); + + if (pfifo->channel_id(dev) == chan->id) { + pfifo->disable(dev); + pfifo->unload_context(dev); + pfifo->enable(dev); + } + pfifo->destroy_context(chan); + + pfifo->reassign(dev, true); + + /* Release the channel's resources */ + nouveau_gpuobj_ref_del(dev, &chan->pushbuf); + if (chan->pushbuf_bo) { + nouveau_bo_unpin(chan->pushbuf_bo); + nouveau_bo_ref(NULL, &chan->pushbuf_bo); + } + nouveau_gpuobj_channel_takedown(chan); + nouveau_notifier_takedown_channel(chan); + if (chan->user) + iounmap(chan->user); + + dev_priv->fifos[chan->id] = NULL; + dev_priv->fifo_alloc_count--; + kfree(chan); +} + +/* cleans up all the fifos from file_priv */ +void +nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + int i; + + NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); + for (i = 0; i < engine->fifo.channels; i++) { + struct nouveau_channel *chan = dev_priv->fifos[i]; + + if (chan && chan->file_priv == file_priv) + nouveau_channel_free(chan); + } +} + +int +nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, + int channel) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + + if (channel >= engine->fifo.channels) + return 0; + if (dev_priv->fifos[channel] == NULL) + return 0; + + return (dev_priv->fifos[channel]->file_priv == file_priv); +} + +/*********************************** + * ioctls wrapping the functions + ***********************************/ + +static int +nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_channel_alloc *init = data; + struct nouveau_channel *chan; + int ret; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + if (dev_priv->engine.graph.accel_blocked) + return -ENODEV; + + if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) + return -EINVAL; + + ret = nouveau_channel_alloc(dev, &chan, file_priv, + init->fb_ctxdma_handle, + init->tt_ctxdma_handle); + if (ret) + return ret; + init->channel = chan->id; + + init->subchan[0].handle = NvM2MF; + if (dev_priv->card_type < NV_50) + init->subchan[0].grclass = 0x0039; + else + init->subchan[0].grclass = 0x5039; + init->nr_subchan = 1; + + /* Named memory object area */ + ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, + &init->notifier_handle); + if (ret) { + nouveau_channel_free(chan); + return ret; + } + + return 0; +} + +static int +nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_channel_free *cfree = data; + struct nouveau_channel *chan; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); + + nouveau_channel_free(chan); + return 0; +} + +/*********************************** + * finally, the ioctl table + ***********************************/ + +struct drm_ioctl_desc nouveau_ioctls[] = { + DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH), +}; + +int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c new file mode 100644 index 000000000000..032cf098fa1c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -0,0 +1,824 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_edid.h" +#include "drm_crtc_helper.h" +#include "nouveau_reg.h" +#include "nouveau_drv.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_connector.h" +#include "nouveau_hw.h" + +static inline struct drm_encoder_slave_funcs * +get_slave_funcs(struct nouveau_encoder *enc) +{ + return to_encoder_slave(to_drm_encoder(enc))->slave_funcs; +} + +static struct nouveau_encoder * +find_encoder_by_type(struct drm_connector *connector, int type) +{ + struct drm_device *dev = connector->dev; + struct nouveau_encoder *nv_encoder; + struct drm_mode_object *obj; + int i, id; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + id = connector->encoder_ids[i]; + if (!id) + break; + + obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); + if (!obj) + continue; + nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + + if (type == OUTPUT_ANY || nv_encoder->dcb->type == type) + return nv_encoder; + } + + return NULL; +} + +struct nouveau_connector * +nouveau_encoder_connector_get(struct nouveau_encoder *encoder) +{ + struct drm_device *dev = to_drm_encoder(encoder)->dev; + struct drm_connector *drm_connector; + + list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) { + if (drm_connector->encoder == to_drm_encoder(encoder)) + return nouveau_connector(drm_connector); + } + + return NULL; +} + + +static void +nouveau_connector_destroy(struct drm_connector *drm_connector) +{ + struct nouveau_connector *connector = nouveau_connector(drm_connector); + struct drm_device *dev = connector->base.dev; + + NV_DEBUG(dev, "\n"); + + if (!connector) + return; + + drm_sysfs_connector_remove(drm_connector); + drm_connector_cleanup(drm_connector); + kfree(drm_connector); +} + +static void +nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags) +{ + struct drm_nouveau_private *dev_priv = connector->dev->dev_private; + + if (dev_priv->card_type >= NV_50) + return; + + *flags = 0; + if (NVLockVgaCrtcs(dev_priv->dev, false)) + *flags |= 1; + if (nv_heads_tied(dev_priv->dev)) + *flags |= 2; + + if (*flags & 2) + NVSetOwner(dev_priv->dev, 0); /* necessary? */ +} + +static void +nouveau_connector_ddc_finish(struct drm_connector *connector, int flags) +{ + struct drm_nouveau_private *dev_priv = connector->dev->dev_private; + + if (dev_priv->card_type >= NV_50) + return; + + if (flags & 2) + NVSetOwner(dev_priv->dev, 4); + if (flags & 1) + NVLockVgaCrtcs(dev_priv->dev, true); +} + +static struct nouveau_i2c_chan * +nouveau_connector_ddc_detect(struct drm_connector *connector, + struct nouveau_encoder **pnv_encoder) +{ + struct drm_device *dev = connector->dev; + uint8_t out_buf[] = { 0x0, 0x0}, buf[2]; + int ret, flags, i; + + struct i2c_msg msgs[] = { + { + .addr = 0x50, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = 0x50, + .flags = I2C_M_RD, + .len = 1, + .buf = buf, + } + }; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + struct nouveau_i2c_chan *i2c = NULL; + struct nouveau_encoder *nv_encoder; + struct drm_mode_object *obj; + int id; + + id = connector->encoder_ids[i]; + if (!id) + break; + + obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); + if (!obj) + continue; + nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + + if (nv_encoder->dcb->i2c_index < 0xf) + i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); + if (!i2c) + continue; + + nouveau_connector_ddc_prepare(connector, &flags); + ret = i2c_transfer(&i2c->adapter, msgs, 2); + nouveau_connector_ddc_finish(connector, flags); + + if (ret == 2) { + *pnv_encoder = nv_encoder; + return i2c; + } + } + + return NULL; +} + +static void +nouveau_connector_set_encoder(struct drm_connector *connector, + struct nouveau_encoder *nv_encoder) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_nouveau_private *dev_priv = connector->dev->dev_private; + struct drm_device *dev = connector->dev; + + if (nv_connector->detected_encoder == nv_encoder) + return; + nv_connector->detected_encoder = nv_encoder; + + if (nv_encoder->dcb->type == OUTPUT_LVDS || + nv_encoder->dcb->type == OUTPUT_TMDS) { + connector->doublescan_allowed = false; + connector->interlace_allowed = false; + } else { + connector->doublescan_allowed = true; + if (dev_priv->card_type == NV_20 || + (dev_priv->card_type == NV_10 && + (dev->pci_device & 0x0ff0) != 0x0100 && + (dev->pci_device & 0x0ff0) != 0x0150)) + /* HW is broken */ + connector->interlace_allowed = false; + else + connector->interlace_allowed = true; + } + + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { + drm_connector_property_set_value(connector, + dev->mode_config.dvi_i_subconnector_property, + nv_encoder->dcb->type == OUTPUT_TMDS ? + DRM_MODE_SUBCONNECTOR_DVID : + DRM_MODE_SUBCONNECTOR_DVIA); + } +} + +static enum drm_connector_status +nouveau_connector_detect(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = NULL; + struct nouveau_i2c_chan *i2c; + int type, flags; + + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); + if (nv_encoder && nv_connector->native_mode) { + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + + i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); + if (i2c) { + nouveau_connector_ddc_prepare(connector, &flags); + nv_connector->edid = drm_get_edid(connector, &i2c->adapter); + nouveau_connector_ddc_finish(connector, flags); + drm_mode_connector_update_edid_property(connector, + nv_connector->edid); + if (!nv_connector->edid) { + NV_ERROR(dev, "DDC responded, but no EDID for %s\n", + drm_get_connector_name(connector)); + return connector_status_disconnected; + } + + if (nv_encoder->dcb->type == OUTPUT_DP && + !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { + NV_ERROR(dev, "Detected %s, but failed init\n", + drm_get_connector_name(connector)); + return connector_status_disconnected; + } + + /* Override encoder type for DVI-I based on whether EDID + * says the display is digital or analog, both use the + * same i2c channel so the value returned from ddc_detect + * isn't necessarily correct. + */ + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { + if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) + type = OUTPUT_TMDS; + else + type = OUTPUT_ANALOG; + + nv_encoder = find_encoder_by_type(connector, type); + if (!nv_encoder) { + NV_ERROR(dev, "Detected %d encoder on %s, " + "but no object!\n", type, + drm_get_connector_name(connector)); + return connector_status_disconnected; + } + } + + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + + nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); + if (!nv_encoder) + nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); + if (nv_encoder) { + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); + struct drm_encoder_helper_funcs *helper = + encoder->helper_private; + + if (helper->detect(encoder, connector) == + connector_status_connected) { + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + + } + + return connector_status_disconnected; +} + +static void +nouveau_connector_force(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nouveau_encoder *nv_encoder; + int type; + + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { + if (connector->force == DRM_FORCE_ON_DIGITAL) + type = OUTPUT_TMDS; + else + type = OUTPUT_ANALOG; + } else + type = OUTPUT_ANY; + + nv_encoder = find_encoder_by_type(connector, type); + if (!nv_encoder) { + NV_ERROR(dev, "can't find encoder to force %s on!\n", + drm_get_connector_name(connector)); + connector->status = connector_status_disconnected; + return; + } + + nouveau_connector_set_encoder(connector, nv_encoder); +} + +static int +nouveau_connector_set_property(struct drm_connector *connector, + struct drm_property *property, uint64_t value) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct drm_device *dev = connector->dev; + int ret; + + /* Scaling mode */ + if (property == dev->mode_config.scaling_mode_property) { + struct nouveau_crtc *nv_crtc = NULL; + bool modeset = false; + + switch (value) { + case DRM_MODE_SCALE_NONE: + case DRM_MODE_SCALE_FULLSCREEN: + case DRM_MODE_SCALE_CENTER: + case DRM_MODE_SCALE_ASPECT: + break; + default: + return -EINVAL; + } + + /* LVDS always needs gpu scaling */ + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && + value == DRM_MODE_SCALE_NONE) + return -EINVAL; + + /* Changing between GPU and panel scaling requires a full + * modeset + */ + if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) || + (value == DRM_MODE_SCALE_NONE)) + modeset = true; + nv_connector->scaling_mode = value; + + if (connector->encoder && connector->encoder->crtc) + nv_crtc = nouveau_crtc(connector->encoder->crtc); + if (!nv_crtc) + return 0; + + if (modeset || !nv_crtc->set_scale) { + ret = drm_crtc_helper_set_mode(&nv_crtc->base, + &nv_crtc->base.mode, + nv_crtc->base.x, + nv_crtc->base.y, NULL); + if (!ret) + return -EINVAL; + } else { + ret = nv_crtc->set_scale(nv_crtc, value, true); + if (ret) + return ret; + } + + return 0; + } + + /* Dithering */ + if (property == dev->mode_config.dithering_mode_property) { + struct nouveau_crtc *nv_crtc = NULL; + + if (value == DRM_MODE_DITHERING_ON) + nv_connector->use_dithering = true; + else + nv_connector->use_dithering = false; + + if (connector->encoder && connector->encoder->crtc) + nv_crtc = nouveau_crtc(connector->encoder->crtc); + + if (!nv_crtc || !nv_crtc->set_dither) + return 0; + + return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, + true); + } + + if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV) + return get_slave_funcs(nv_encoder)-> + set_property(to_drm_encoder(nv_encoder), connector, property, value); + + return -EINVAL; +} + +static struct drm_display_mode * +nouveau_connector_native_mode(struct nouveau_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct drm_display_mode *mode, *largest = NULL; + int high_w = 0, high_h = 0, high_v = 0; + + /* Use preferred mode if there is one.. */ + list_for_each_entry(mode, &connector->base.probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + NV_DEBUG(dev, "native mode from preferred\n"); + return drm_mode_duplicate(dev, mode); + } + } + + /* Otherwise, take the resolution with the largest width, then height, + * then vertical refresh + */ + list_for_each_entry(mode, &connector->base.probed_modes, head) { + if (mode->hdisplay < high_w) + continue; + + if (mode->hdisplay == high_w && mode->vdisplay < high_h) + continue; + + if (mode->hdisplay == high_w && mode->vdisplay == high_h && + mode->vrefresh < high_v) + continue; + + high_w = mode->hdisplay; + high_h = mode->vdisplay; + high_v = mode->vrefresh; + largest = mode; + } + + NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n", + high_w, high_h, high_v); + return largest ? drm_mode_duplicate(dev, largest) : NULL; +} + +struct moderec { + int hdisplay; + int vdisplay; +}; + +static struct moderec scaler_modes[] = { + { 1920, 1200 }, + { 1920, 1080 }, + { 1680, 1050 }, + { 1600, 1200 }, + { 1400, 1050 }, + { 1280, 1024 }, + { 1280, 960 }, + { 1152, 864 }, + { 1024, 768 }, + { 800, 600 }, + { 720, 400 }, + { 640, 480 }, + { 640, 400 }, + { 640, 350 }, + {} +}; + +static int +nouveau_connector_scaler_modes_add(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_display_mode *native = nv_connector->native_mode, *m; + struct drm_device *dev = connector->dev; + struct moderec *mode = &scaler_modes[0]; + int modes = 0; + + if (!native) + return 0; + + while (mode->hdisplay) { + if (mode->hdisplay <= native->hdisplay && + mode->vdisplay <= native->vdisplay) { + m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay, + drm_mode_vrefresh(native), false, + false, false); + if (!m) + continue; + + m->type |= DRM_MODE_TYPE_DRIVER; + + drm_mode_probed_add(connector, m); + modes++; + } + + mode++; + } + + return modes; +} + +static int +nouveau_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + int ret = 0; + + /* If we're not LVDS, destroy the previous native mode, the attached + * monitor could have changed. + */ + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && + nv_connector->native_mode) { + drm_mode_destroy(dev, nv_connector->native_mode); + nv_connector->native_mode = NULL; + } + + if (nv_connector->edid) + ret = drm_add_edid_modes(connector, nv_connector->edid); + + /* Find the native mode if this is a digital panel, if we didn't + * find any modes through DDC previously add the native mode to + * the list of modes. + */ + if (!nv_connector->native_mode) + nv_connector->native_mode = + nouveau_connector_native_mode(nv_connector); + if (ret == 0 && nv_connector->native_mode) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(dev, nv_connector->native_mode); + drm_mode_probed_add(connector, mode); + ret = 1; + } + + if (nv_encoder->dcb->type == OUTPUT_TV) + ret = get_slave_funcs(nv_encoder)-> + get_modes(to_drm_encoder(nv_encoder), connector); + + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + ret += nouveau_connector_scaler_modes_add(connector); + + return ret; +} + +static int +nouveau_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_nouveau_private *dev_priv = connector->dev->dev_private; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + unsigned min_clock = 25000, max_clock = min_clock; + unsigned clock = mode->clock; + + switch (nv_encoder->dcb->type) { + case OUTPUT_LVDS: + BUG_ON(!nv_connector->native_mode); + if (mode->hdisplay > nv_connector->native_mode->hdisplay || + mode->vdisplay > nv_connector->native_mode->vdisplay) + return MODE_PANEL; + + min_clock = 0; + max_clock = 400000; + break; + case OUTPUT_TMDS: + if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || + (dev_priv->card_type < NV_50 && + !nv_encoder->dcb->duallink_possible)) + max_clock = 165000; + else + max_clock = 330000; + break; + case OUTPUT_ANALOG: + max_clock = nv_encoder->dcb->crtconf.maxfreq; + if (!max_clock) + max_clock = 350000; + break; + case OUTPUT_TV: + return get_slave_funcs(nv_encoder)-> + mode_valid(to_drm_encoder(nv_encoder), mode); + case OUTPUT_DP: + if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7) + max_clock = nv_encoder->dp.link_nr * 270000; + else + max_clock = nv_encoder->dp.link_nr * 162000; + + clock *= 3; + break; + } + + if (clock < min_clock) + return MODE_CLOCK_LOW; + + if (clock > max_clock) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static struct drm_encoder * +nouveau_connector_best_encoder(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + + if (nv_connector->detected_encoder) + return to_drm_encoder(nv_connector->detected_encoder); + + return NULL; +} + +static const struct drm_connector_helper_funcs +nouveau_connector_helper_funcs = { + .get_modes = nouveau_connector_get_modes, + .mode_valid = nouveau_connector_mode_valid, + .best_encoder = nouveau_connector_best_encoder, +}; + +static const struct drm_connector_funcs +nouveau_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .save = NULL, + .restore = NULL, + .detect = nouveau_connector_detect, + .destroy = nouveau_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = nouveau_connector_set_property, + .force = nouveau_connector_force +}; + +static int +nouveau_connector_create_lvds(struct drm_device *dev, + struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_i2c_chan *i2c = NULL; + struct nouveau_encoder *nv_encoder; + struct drm_display_mode native, *mode, *temp; + bool dummy, if_is_24bit = false; + int ret, flags; + + nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); + if (!nv_encoder) + return -ENODEV; + + ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit); + if (ret) { + NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n"); + return ret; + } + nv_connector->use_dithering = !if_is_24bit; + + /* Firstly try getting EDID over DDC, if allowed and I2C channel + * is available. + */ + if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) + i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); + + if (i2c) { + nouveau_connector_ddc_prepare(connector, &flags); + nv_connector->edid = drm_get_edid(connector, &i2c->adapter); + nouveau_connector_ddc_finish(connector, flags); + } + + /* If no EDID found above, and the VBIOS indicates a hardcoded + * modeline is avalilable for the panel, set it as the panel's + * native mode and exit. + */ + if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && + (nv_encoder->dcb->lvdsconf.use_straps_for_mode || + dev_priv->VBIOS.pub.fp_no_ddc)) { + nv_connector->native_mode = drm_mode_duplicate(dev, &native); + goto out; + } + + /* Still nothing, some VBIOS images have a hardcoded EDID block + * stored for the panel stored in them. + */ + if (!nv_connector->edid && !nv_connector->native_mode && + !dev_priv->VBIOS.pub.fp_no_ddc) { + nv_connector->edid = + (struct edid *)nouveau_bios_embedded_edid(dev); + } + + if (!nv_connector->edid) + goto out; + + /* We didn't find/use a panel mode from the VBIOS, so parse the EDID + * block and look for the preferred mode there. + */ + ret = drm_add_edid_modes(connector, nv_connector->edid); + if (ret == 0) + goto out; + nv_connector->detected_encoder = nv_encoder; + nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); + list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) + drm_mode_remove(connector, mode); + +out: + if (!nv_connector->native_mode) { + NV_ERROR(dev, "LVDS present in DCB table, but couldn't " + "determine its native mode. Disabling.\n"); + return -ENODEV; + } + + drm_mode_connector_update_edid_property(connector, nv_connector->edid); + return 0; +} + +int +nouveau_connector_create(struct drm_device *dev, int index, int type) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = NULL; + struct drm_connector *connector; + struct drm_encoder *encoder; + int ret; + + NV_DEBUG(dev, "\n"); + + nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); + if (!nv_connector) + return -ENOMEM; + nv_connector->dcb = nouveau_bios_connector_entry(dev, index); + connector = &nv_connector->base; + + switch (type) { + case DRM_MODE_CONNECTOR_VGA: + NV_INFO(dev, "Detected a VGA connector\n"); + break; + case DRM_MODE_CONNECTOR_DVID: + NV_INFO(dev, "Detected a DVI-D connector\n"); + break; + case DRM_MODE_CONNECTOR_DVII: + NV_INFO(dev, "Detected a DVI-I connector\n"); + break; + case DRM_MODE_CONNECTOR_LVDS: + NV_INFO(dev, "Detected a LVDS connector\n"); + break; + case DRM_MODE_CONNECTOR_TV: + NV_INFO(dev, "Detected a TV connector\n"); + break; + case DRM_MODE_CONNECTOR_DisplayPort: + NV_INFO(dev, "Detected a DisplayPort connector\n"); + break; + default: + NV_ERROR(dev, "Unknown connector, this is not good.\n"); + break; + } + + /* defaults, will get overridden in detect() */ + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + drm_connector_init(dev, connector, &nouveau_connector_funcs, type); + drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); + + /* Init DVI-I specific properties */ + if (type == DRM_MODE_CONNECTOR_DVII) { + drm_mode_create_dvi_i_properties(dev); + drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); + drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); + } + + if (type != DRM_MODE_CONNECTOR_LVDS) + nv_connector->use_dithering = false; + + if (type == DRM_MODE_CONNECTOR_DVID || + type == DRM_MODE_CONNECTOR_DVII || + type == DRM_MODE_CONNECTOR_LVDS || + type == DRM_MODE_CONNECTOR_DisplayPort) { + nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; + + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, + nv_connector->scaling_mode); + drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property, + nv_connector->use_dithering ? DRM_MODE_DITHERING_ON + : DRM_MODE_DITHERING_OFF); + + } else { + nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; + + if (type == DRM_MODE_CONNECTOR_VGA && + dev_priv->card_type >= NV_50) { + drm_connector_attach_property(connector, + dev->mode_config.scaling_mode_property, + nv_connector->scaling_mode); + } + } + + /* attach encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (nv_encoder->dcb->connector != index) + continue; + + if (get_slave_funcs(nv_encoder)) + get_slave_funcs(nv_encoder)->create_resources(encoder, connector); + + drm_mode_connector_attach_encoder(connector, encoder); + } + + drm_sysfs_connector_add(connector); + + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + ret = nouveau_connector_create_lvds(dev, connector); + if (ret) { + connector->funcs->destroy(connector); + return ret; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h new file mode 100644 index 000000000000..728b8090e5ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_CONNECTOR_H__ +#define __NOUVEAU_CONNECTOR_H__ + +#include "drm_edid.h" +#include "nouveau_i2c.h" + +struct nouveau_connector { + struct drm_connector base; + + struct dcb_connector_table_entry *dcb; + + int scaling_mode; + bool use_dithering; + + struct nouveau_encoder *detected_encoder; + struct edid *edid; + struct drm_display_mode *native_mode; +}; + +static inline struct nouveau_connector *nouveau_connector( + struct drm_connector *con) +{ + return container_of(con, struct nouveau_connector, base); +} + +int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type); + +#endif /* __NOUVEAU_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h new file mode 100644 index 000000000000..49fa7b2d257e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_CRTC_H__ +#define __NOUVEAU_CRTC_H__ + +struct nouveau_crtc { + struct drm_crtc base; + + int index; + + struct drm_display_mode *mode; + + uint32_t dpms_saved_fp_control; + uint32_t fp_users; + int saturation; + int sharpness; + int last_dpms; + + struct { + int cpp; + bool blanked; + uint32_t offset; + uint32_t tile_flags; + } fb; + + struct { + struct nouveau_bo *nvbo; + bool visible; + uint32_t offset; + void (*set_offset)(struct nouveau_crtc *, uint32_t offset); + void (*set_pos)(struct nouveau_crtc *, int x, int y); + void (*hide)(struct nouveau_crtc *, bool update); + void (*show)(struct nouveau_crtc *, bool update); + } cursor; + + struct { + struct nouveau_bo *nvbo; + uint16_t r[256]; + uint16_t g[256]; + uint16_t b[256]; + int depth; + } lut; + + int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update); + int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update); +}; + +static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) +{ + return container_of(crtc, struct nouveau_crtc, base); +} + +static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc) +{ + return &crtc->base; +} + +int nv50_crtc_create(struct drm_device *dev, int index); +int nv50_cursor_init(struct nouveau_crtc *); +void nv50_cursor_fini(struct nouveau_crtc *); +int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, + uint32_t height); +int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y); + +int nv04_cursor_init(struct nouveau_crtc *); + +struct nouveau_connector * +nouveau_crtc_connector_get(struct nouveau_crtc *crtc); + +#endif /* __NOUVEAU_CRTC_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c new file mode 100644 index 000000000000..d79db3698f16 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2009 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs + */ + +#include + +#include "drmP.h" +#include "nouveau_drv.h" + +static int +nouveau_debugfs_channel_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct nouveau_channel *chan = node->info_ent->data; + + seq_printf(m, "channel id : %d\n", chan->id); + + seq_printf(m, "cpu fifo state:\n"); + seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base); + seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2); + seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); + seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); + seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2); + + seq_printf(m, "gpu fifo state:\n"); + seq_printf(m, " get: 0x%08x\n", + nvchan_rd32(chan, chan->user_get)); + seq_printf(m, " put: 0x%08x\n", + nvchan_rd32(chan, chan->user_put)); + + seq_printf(m, "last fence : %d\n", chan->fence.sequence); + seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack); + return 0; +} + +int +nouveau_debugfs_channel_init(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_minor *minor = chan->dev->primary; + int ret; + + if (!dev_priv->debugfs.channel_root) { + dev_priv->debugfs.channel_root = + debugfs_create_dir("channel", minor->debugfs_root); + if (!dev_priv->debugfs.channel_root) + return -ENOENT; + } + + snprintf(chan->debugfs.name, 32, "%d", chan->id); + chan->debugfs.info.name = chan->debugfs.name; + chan->debugfs.info.show = nouveau_debugfs_channel_info; + chan->debugfs.info.driver_features = 0; + chan->debugfs.info.data = chan; + + ret = drm_debugfs_create_files(&chan->debugfs.info, 1, + dev_priv->debugfs.channel_root, + chan->dev->primary); + if (ret == 0) + chan->debugfs.active = true; + return ret; +} + +void +nouveau_debugfs_channel_fini(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + + if (!chan->debugfs.active) + return; + + drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary); + chan->debugfs.active = false; + + if (chan == dev_priv->channel) { + debugfs_remove(dev_priv->debugfs.channel_root); + dev_priv->debugfs.channel_root = NULL; + } +} + +static int +nouveau_debugfs_chipset_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_minor *minor = node->minor; + struct drm_device *dev = minor->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t ppci_0; + + ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800); + + seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0)); + seq_printf(m, "PCI ID : 0x%04x:0x%04x\n", + ppci_0 & 0xffff, ppci_0 >> 16); + return 0; +} + +static int +nouveau_debugfs_memory_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_minor *minor = node->minor; + struct drm_device *dev = minor->dev; + + seq_printf(m, "VRAM total: %dKiB\n", + (int)(nouveau_mem_fb_amount(dev) >> 10)); + return 0; +} + +static struct drm_info_list nouveau_debugfs_list[] = { + { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, + { "memory", nouveau_debugfs_memory_info, 0, NULL }, +}; +#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) + +int +nouveau_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); + return 0; +} + +void +nouveau_debugfs_takedown(struct drm_minor *minor) +{ + drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, + minor); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c new file mode 100644 index 000000000000..dfc94391d71e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" +#include "nouveau_drv.h" +#include "nouveau_fb.h" +#include "nouveau_fbcon.h" + +static void +nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) +{ + struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + struct drm_device *dev = drm_fb->dev; + + if (drm_fb->fbdev) + nouveau_fbcon_remove(dev, drm_fb); + + if (fb->nvbo) { + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(fb->nvbo->gem); + mutex_unlock(&dev->struct_mutex); + } + + drm_framebuffer_cleanup(drm_fb); + kfree(fb); +} + +static int +nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + + return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle); +} + +static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { + .destroy = nouveau_user_framebuffer_destroy, + .create_handle = nouveau_user_framebuffer_create_handle, +}; + +struct drm_framebuffer * +nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo, + struct drm_mode_fb_cmd *mode_cmd) +{ + struct nouveau_framebuffer *fb; + int ret; + + fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); + if (!fb) + return NULL; + + ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs); + if (ret) { + kfree(fb); + return NULL; + } + + drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); + + fb->nvbo = nvbo; + return &fb->base; +} + +static struct drm_framebuffer * +nouveau_user_framebuffer_create(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd *mode_cmd) +{ + struct drm_framebuffer *fb; + struct drm_gem_object *gem; + + gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); + if (!gem) + return NULL; + + fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd); + if (!fb) { + drm_gem_object_unreference(gem); + return NULL; + } + + return fb; +} + +const struct drm_mode_config_funcs nouveau_mode_config_funcs = { + .fb_create = nouveau_user_framebuffer_create, + .fb_changed = nouveau_fbcon_probe, +}; + diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c new file mode 100644 index 000000000000..703553687b20 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +int +nouveau_dma_init(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *m2mf = NULL; + int ret, i; + + /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ + ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? + 0x0039 : 0x5039, &m2mf); + if (ret) + return ret; + + ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL); + if (ret) + return ret; + + /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); + if (ret) + return ret; + + /* Map push buffer */ + ret = nouveau_bo_map(chan->pushbuf_bo); + if (ret) + return ret; + + /* Map M2MF notifier object - fbcon. */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = nouveau_bo_map(chan->notifier_bo); + if (ret) + return ret; + } + + /* Initialise DMA vars */ + chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; + chan->dma.put = 0; + chan->dma.cur = chan->dma.put; + chan->dma.free = chan->dma.max - chan->dma.cur; + + /* Insert NOPS for NOUVEAU_DMA_SKIPS */ + ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); + if (ret) + return ret; + + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(chan, 0); + + /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */ + ret = RING_SPACE(chan, 4); + if (ret) + return ret; + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); + OUT_RING(chan, NvM2MF); + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); + OUT_RING(chan, NvNotify0); + + /* Sit back and pray the channel works.. */ + FIRE_RING(chan); + + return 0; +} + +void +OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) +{ + bool is_iomem; + u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); + mem = &mem[chan->dma.cur]; + if (is_iomem) + memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); + else + memcpy(mem, data, nr_dwords * 4); + chan->dma.cur += nr_dwords; +} + +static inline bool +READ_GET(struct nouveau_channel *chan, uint32_t *get) +{ + uint32_t val; + + val = nvchan_rd32(chan, chan->user_get); + if (val < chan->pushbuf_base || + val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) { + /* meaningless to dma_wait() except to know whether the + * GPU has stalled or not + */ + *get = val; + return false; + } + + *get = (val - chan->pushbuf_base) >> 2; + return true; +} + +int +nouveau_dma_wait(struct nouveau_channel *chan, int size) +{ + uint32_t get, prev_get = 0, cnt = 0; + bool get_valid; + + while (chan->dma.free < size) { + /* reset counter as long as GET is still advancing, this is + * to avoid misdetecting a GPU lockup if the GPU happens to + * just be processing an operation that takes a long time + */ + get_valid = READ_GET(chan, &get); + if (get != prev_get) { + prev_get = get; + cnt = 0; + } + + if ((++cnt & 0xff) == 0) { + DRM_UDELAY(1); + if (cnt > 100000) + return -EBUSY; + } + + /* loop until we have a usable GET pointer. the value + * we read from the GPU may be outside the main ring if + * PFIFO is processing a buffer called from the main ring, + * discard these values until something sensible is seen. + * + * the other case we discard GET is while the GPU is fetching + * from the SKIPS area, so the code below doesn't have to deal + * with some fun corner cases. + */ + if (!get_valid || get < NOUVEAU_DMA_SKIPS) + continue; + + if (get <= chan->dma.cur) { + /* engine is fetching behind us, or is completely + * idle (GET == PUT) so we have free space up until + * the end of the push buffer + * + * we can only hit that path once per call due to + * looping back to the beginning of the push buffer, + * we'll hit the fetching-ahead-of-us path from that + * point on. + * + * the *one* exception to that rule is if we read + * GET==PUT, in which case the below conditional will + * always succeed and break us out of the wait loop. + */ + chan->dma.free = chan->dma.max - chan->dma.cur; + if (chan->dma.free >= size) + break; + + /* not enough space left at the end of the push buffer, + * instruct the GPU to jump back to the start right + * after processing the currently pending commands. + */ + OUT_RING(chan, chan->pushbuf_base | 0x20000000); + WRITE_PUT(NOUVEAU_DMA_SKIPS); + + /* we're now submitting commands at the start of + * the push buffer. + */ + chan->dma.cur = + chan->dma.put = NOUVEAU_DMA_SKIPS; + } + + /* engine fetching ahead of us, we have space up until the + * current GET pointer. the "- 1" is to ensure there's + * space left to emit a jump back to the beginning of the + * push buffer if we require it. we can never get GET == PUT + * here, so this is safe. + */ + chan->dma.free = get - chan->dma.cur - 1; + } + + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h new file mode 100644 index 000000000000..04e85d8f757e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_DMA_H__ +#define __NOUVEAU_DMA_H__ + +#ifndef NOUVEAU_DMA_DEBUG +#define NOUVEAU_DMA_DEBUG 0 +#endif + +/* + * There's a hw race condition where you can't jump to your PUT offset, + * to avoid this we jump to offset + SKIPS and fill the difference with + * NOPs. + * + * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses + * a SKIPS value of 8. Lets assume that the race condition is to do + * with writing into the fetch area, we configure a fetch size of 128 + * bytes so we need a larger SKIPS value. + */ +#define NOUVEAU_DMA_SKIPS (128 / 4) + +/* Hardcoded object assignments to subchannels (subchannel id). */ +enum { + NvSubM2MF = 0, + NvSub2D = 1, + NvSubCtxSurf2D = 1, + NvSubGdiRect = 2, + NvSubImageBlit = 3 +}; + +/* Object handles. */ +enum { + NvM2MF = 0x80000001, + NvDmaFB = 0x80000002, + NvDmaTT = 0x80000003, + NvDmaVRAM = 0x80000004, + NvDmaGART = 0x80000005, + NvNotify0 = 0x80000006, + Nv2D = 0x80000007, + NvCtxSurf2D = 0x80000008, + NvRop = 0x80000009, + NvImagePatt = 0x8000000a, + NvClipRect = 0x8000000b, + NvGdiRect = 0x8000000c, + NvImageBlit = 0x8000000d, + + /* G80+ display objects */ + NvEvoVRAM = 0x01000000, + NvEvoFB16 = 0x01000001, + NvEvoFB32 = 0x01000002 +}; + +#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 +#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000 +#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001 +#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180 +#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184 +#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c + +#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 +#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200 +#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c +#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238 +#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c + +static __must_check inline int +RING_SPACE(struct nouveau_channel *chan, int size) +{ + if (chan->dma.free < size) { + int ret; + + ret = nouveau_dma_wait(chan, size); + if (ret) + return ret; + } + + chan->dma.free -= size; + return 0; +} + +static inline void +OUT_RING(struct nouveau_channel *chan, int data) +{ + if (NOUVEAU_DMA_DEBUG) { + NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n", + chan->id, chan->dma.cur << 2, data); + } + + nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data); +} + +extern void +OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords); + +static inline void +BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size) +{ + OUT_RING(chan, (subc << 13) | (size << 18) | mthd); +} + +#define WRITE_PUT(val) do { \ + DRM_MEMORYBARRIER(); \ + nouveau_bo_rd32(chan->pushbuf_bo, 0); \ + nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \ +} while (0) + +static inline void +FIRE_RING(struct nouveau_channel *chan) +{ + if (NOUVEAU_DMA_DEBUG) { + NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n", + chan->id, chan->dma.cur << 2); + } + + if (chan->dma.cur == chan->dma.put) + return; + chan->accel_done = true; + + WRITE_PUT(chan->dma.cur); + chan->dma.put = chan->dma.cur; +} + +static inline void +WIND_RING(struct nouveau_channel *chan) +{ + chan->dma.cur = chan->dma.put; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c new file mode 100644 index 000000000000..de61f4640e12 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -0,0 +1,569 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_i2c.h" +#include "nouveau_encoder.h" + +static int +auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_i2c_chan *auxch; + int ret; + + auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); + if (!auxch) + return -ENODEV; + + ret = nouveau_dp_auxch(auxch, 9, address, buf, size); + if (ret) + return ret; + + return 0; +} + +static int +auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_i2c_chan *auxch; + int ret; + + auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); + if (!auxch) + return -ENODEV; + + ret = nouveau_dp_auxch(auxch, 8, address, buf, size); + return ret; +} + +static int +nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint32_t tmp; + int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); + + tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); + tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED | + NV50_SOR_DP_CTRL_LANE_MASK); + tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16; + if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN) + tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED; + nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); + + return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1); +} + +static int +nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint32_t tmp; + int reg = 0x614300 + (nv_encoder->or * 0x800); + + tmp = nv_rd32(dev, reg); + tmp &= 0xfff3ffff; + if (cmd == DP_LINK_BW_2_7) + tmp |= 0x00040000; + nv_wr32(dev, reg, tmp); + + return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1); +} + +static int +nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint32_t tmp; + uint8_t cmd; + int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); + int ret; + + tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); + tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN; + tmp |= (pattern << 24); + nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); + + ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1); + if (ret) + return ret; + cmd &= ~DP_TRAINING_PATTERN_MASK; + cmd |= (pattern & DP_TRAINING_PATTERN_MASK); + return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1); +} + +static int +nouveau_dp_max_voltage_swing(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct bit_displayport_encoder_table_entry *dpse; + struct bit_displayport_encoder_table *dpe; + int i, dpe_headerlen, max_vs = 0; + + dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); + if (!dpe) + return false; + dpse = (void *)((char *)dpe + dpe_headerlen); + + for (i = 0; i < dpe_headerlen; i++, dpse++) { + if (dpse->vs_level > max_vs) + max_vs = dpse->vs_level; + } + + return max_vs; +} + +static int +nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct bit_displayport_encoder_table_entry *dpse; + struct bit_displayport_encoder_table *dpe; + int i, dpe_headerlen, max_pre = 0; + + dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); + if (!dpe) + return false; + dpse = (void *)((char *)dpe + dpe_headerlen); + + for (i = 0; i < dpe_headerlen; i++, dpse++) { + if (dpse->vs_level != vs) + continue; + + if (dpse->pre_level > max_pre) + max_pre = dpse->pre_level; + } + + return max_pre; +} + +static bool +nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct bit_displayport_encoder_table_entry *dpse; + struct bit_displayport_encoder_table *dpe; + int ret, i, dpe_headerlen, vs = 0, pre = 0; + uint8_t request[2]; + + dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); + if (!dpe) + return false; + dpse = (void *)((char *)dpe + dpe_headerlen); + + ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2); + if (ret) + return false; + + NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]); + + /* Keep all lanes at the same level.. */ + for (i = 0; i < nv_encoder->dp.link_nr; i++) { + int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf; + int lane_vs = lane_req & 3; + int lane_pre = (lane_req >> 2) & 3; + + if (lane_vs > vs) + vs = lane_vs; + if (lane_pre > pre) + pre = lane_pre; + } + + if (vs >= nouveau_dp_max_voltage_swing(encoder)) { + vs = nouveau_dp_max_voltage_swing(encoder); + vs |= 4; + } + + if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) { + pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3); + pre |= 4; + } + + /* Update the configuration for all lanes.. */ + for (i = 0; i < nv_encoder->dp.link_nr; i++) + config[i] = (pre << 3) | vs; + + return true; +} + +static bool +nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct bit_displayport_encoder_table_entry *dpse; + struct bit_displayport_encoder_table *dpe; + int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); + int dpe_headerlen, ret, i; + + NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n", + config[0], config[1], config[2], config[3]); + + dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); + if (!dpe) + return false; + dpse = (void *)((char *)dpe + dpe_headerlen); + + for (i = 0; i < dpe->record_nr; i++, dpse++) { + if (dpse->vs_level == (config[0] & 3) && + dpse->pre_level == ((config[0] >> 3) & 3)) + break; + } + BUG_ON(i == dpe->record_nr); + + for (i = 0; i < nv_encoder->dp.link_nr; i++) { + const int shift[4] = { 16, 8, 0, 24 }; + uint32_t mask = 0xff << shift[i]; + uint32_t reg0, reg1, reg2; + + reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask; + reg0 |= (dpse->reg0 << shift[i]); + reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask; + reg1 |= (dpse->reg1 << shift[i]); + reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff; + reg2 |= (dpse->reg2 << 8); + nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0); + nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1); + nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2); + } + + ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4); + if (ret) + return false; + + return true; +} + +bool +nouveau_dp_link_train(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint8_t config[4]; + uint8_t status[3]; + bool cr_done, cr_max_vs, eq_done; + int ret = 0, i, tries, voltage; + + NV_DEBUG(dev, "link training!!\n"); +train: + cr_done = eq_done = false; + + /* set link configuration */ + NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n", + nv_encoder->dp.link_bw, nv_encoder->dp.link_nr); + + ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw); + if (ret) + return false; + + config[0] = nv_encoder->dp.link_nr; + if (nv_encoder->dp.dpcd_version >= 0x11) + config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = nouveau_dp_lane_count_set(encoder, config[0]); + if (ret) + return false; + + /* clock recovery */ + NV_DEBUG(dev, "\tbegin cr\n"); + ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1); + if (ret) + goto stop; + + tries = 0; + voltage = -1; + memset(config, 0x00, sizeof(config)); + for (;;) { + if (!nouveau_dp_link_train_commit(encoder, config)) + break; + + udelay(100); + + ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2); + if (ret) + break; + NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n", + status[0], status[1]); + + cr_done = true; + cr_max_vs = false; + for (i = 0; i < nv_encoder->dp.link_nr; i++) { + int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf; + + if (!(lane & DP_LANE_CR_DONE)) { + cr_done = false; + if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED) + cr_max_vs = true; + break; + } + } + + if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { + voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + tries = 0; + } + + if (cr_done || cr_max_vs || (++tries == 5)) + break; + + if (!nouveau_dp_link_train_adjust(encoder, config)) + break; + } + + if (!cr_done) + goto stop; + + /* channel equalisation */ + NV_DEBUG(dev, "\tbegin eq\n"); + ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2); + if (ret) + goto stop; + + for (tries = 0; tries <= 5; tries++) { + udelay(400); + + ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3); + if (ret) + break; + NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n", + status[0], status[1]); + + eq_done = true; + if (!(status[2] & DP_INTERLANE_ALIGN_DONE)) + eq_done = false; + + for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) { + int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf; + + if (!(lane & DP_LANE_CR_DONE)) { + cr_done = false; + break; + } + + if (!(lane & DP_LANE_CHANNEL_EQ_DONE) || + !(lane & DP_LANE_SYMBOL_LOCKED)) { + eq_done = false; + break; + } + } + + if (eq_done || !cr_done) + break; + + if (!nouveau_dp_link_train_adjust(encoder, config) || + !nouveau_dp_link_train_commit(encoder, config)) + break; + } + +stop: + /* end link training */ + ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE); + if (ret) + return false; + + /* retry at a lower setting, if possible */ + if (!ret && !(eq_done && cr_done)) { + NV_DEBUG(dev, "\twe failed\n"); + if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) { + NV_DEBUG(dev, "retry link training at low rate\n"); + nv_encoder->dp.link_bw = DP_LINK_BW_1_62; + goto train; + } + } + + return eq_done; +} + +bool +nouveau_dp_detect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + uint8_t dpcd[4]; + int ret; + + ret = auxch_rd(encoder, 0x0000, dpcd, 4); + if (ret) + return false; + + NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n" + "display: link_bw %d, link_nr %d version 0x%02x\n", + nv_encoder->dcb->dpconf.link_bw, + nv_encoder->dcb->dpconf.link_nr, + dpcd[1], dpcd[2] & 0x0f, dpcd[0]); + + nv_encoder->dp.dpcd_version = dpcd[0]; + + nv_encoder->dp.link_bw = dpcd[1]; + if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 && + !nv_encoder->dcb->dpconf.link_bw) + nv_encoder->dp.link_bw = DP_LINK_BW_1_62; + + nv_encoder->dp.link_nr = dpcd[2] & 0xf; + if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr) + nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; + + return true; +} + +int +nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, + uint8_t *data, int data_nr) +{ + struct drm_device *dev = auxch->dev; + uint32_t tmp, ctrl, stat = 0, data32[4] = {}; + int ret = 0, i, index = auxch->rd; + + NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr); + + tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); + nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000); + tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); + if (!(tmp & 0x01000000)) { + NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp); + ret = -EIO; + goto out; + } + + for (i = 0; i < 3; i++) { + tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd)); + if (tmp & NV50_AUXCH_STAT_STATE_READY) + break; + udelay(100); + } + + if (i == 3) { + ret = -EBUSY; + goto out; + } + + if (!(cmd & 1)) { + memcpy(data32, data, data_nr); + for (i = 0; i < 4; i++) { + NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]); + nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]); + } + } + + nv_wr32(dev, NV50_AUXCH_ADDR(index), addr); + ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index)); + ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN); + ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); + ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); + + for (;;) { + nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); + nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); + nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); + if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { + NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", + nv_rd32(dev, NV50_AUXCH_CTRL(index))); + return -EBUSY; + } + + udelay(400); + + stat = nv_rd32(dev, NV50_AUXCH_STAT(index)); + if ((stat & NV50_AUXCH_STAT_REPLY_AUX) != + NV50_AUXCH_STAT_REPLY_AUX_DEFER) + break; + } + + if (cmd & 1) { + for (i = 0; i < 4; i++) { + data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); + NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]); + } + memcpy(data, data32, data_nr); + } + +out: + tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); + nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000); + tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); + if (tmp & 0x01000000) { + NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp); + ret = -EIO; + } + + udelay(400); + + return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY); +} + +int +nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter; + struct drm_device *dev = auxch->dev; + int ret = 0, cmd, addr = algo_data->address; + uint8_t *buf; + + if (mode == MODE_I2C_READ) { + cmd = AUX_I2C_READ; + buf = read_byte; + } else { + cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE; + buf = &write_byte; + } + + if (!(mode & MODE_I2C_STOP)) + cmd |= AUX_I2C_MOT; + + if (mode & MODE_I2C_START) + return 1; + + for (;;) { + ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1); + if (ret < 0) + return ret; + + switch (ret & NV50_AUXCH_STAT_REPLY_I2C) { + case NV50_AUXCH_STAT_REPLY_I2C_ACK: + return 1; + case NV50_AUXCH_STAT_REPLY_I2C_NACK: + return -EREMOTEIO; + case NV50_AUXCH_STAT_REPLY_I2C_DEFER: + udelay(100); + break; + default: + NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret); + return -EREMOTEIO; + } + } +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c new file mode 100644 index 000000000000..35249c35118f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -0,0 +1,405 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "drmP.h" +#include "drm.h" +#include "drm_crtc_helper.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" +#include "nouveau_fb.h" +#include "nouveau_fbcon.h" +#include "nv50_display.h" + +#include "drm_pciids.h" + +MODULE_PARM_DESC(noagp, "Disable AGP"); +int nouveau_noagp; +module_param_named(noagp, nouveau_noagp, int, 0400); + +MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); +static int nouveau_modeset = -1; /* kms */ +module_param_named(modeset, nouveau_modeset, int, 0400); + +MODULE_PARM_DESC(vbios, "Override default VBIOS location"); +char *nouveau_vbios; +module_param_named(vbios, nouveau_vbios, charp, 0400); + +MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM"); +int nouveau_vram_pushbuf; +module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); + +MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); +int nouveau_vram_notify; +module_param_named(vram_notify, nouveau_vram_notify, int, 0400); + +MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); +int nouveau_duallink = 1; +module_param_named(duallink, nouveau_duallink, int, 0400); + +MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)"); +int nouveau_uscript_lvds = -1; +module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400); + +MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)"); +int nouveau_uscript_tmds = -1; +module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); + +MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" + "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" + "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" + "\t\tDefault: PAL\n" + "\t\t*NOTE* Ignored for cards with external TV encoders."); +char *nouveau_tv_norm; +module_param_named(tv_norm, nouveau_tv_norm, charp, 0400); + +MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n" + "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n" + "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n" + "\t\t0x100 vgaattr, 0x200 EVO (G80+). "); +int nouveau_reg_debug; +module_param_named(reg_debug, nouveau_reg_debug, int, 0600); + +int nouveau_fbpercrtc; +#if 0 +module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); +#endif + +static struct pci_device_id pciidlist[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + }, + {} +}; + +MODULE_DEVICE_TABLE(pci, pciidlist); + +static struct drm_driver driver; + +static int __devinit +nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static void +nouveau_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); +} + +static int +nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_channel *chan; + struct drm_crtc *crtc; + uint32_t fbdev_flags; + int ret, i; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (pm_state.event == PM_EVENT_PRETHAW) + return 0; + + fbdev_flags = dev_priv->fbdev_info->flags; + dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_framebuffer *nouveau_fb; + + nouveau_fb = nouveau_framebuffer(crtc->fb); + if (!nouveau_fb || !nouveau_fb->nvbo) + continue; + + nouveau_bo_unpin(nouveau_fb->nvbo); + } + + NV_INFO(dev, "Evicting buffers...\n"); + ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); + + NV_INFO(dev, "Idling channels...\n"); + for (i = 0; i < pfifo->channels; i++) { + struct nouveau_fence *fence = NULL; + + chan = dev_priv->fifos[i]; + if (!chan || (dev_priv->card_type >= NV_50 && + chan == dev_priv->fifos[0])) + continue; + + ret = nouveau_fence_new(chan, &fence, true); + if (ret == 0) { + ret = nouveau_fence_wait(fence, NULL, false, false); + nouveau_fence_unref((void *)&fence); + } + + if (ret) { + NV_ERROR(dev, "Failed to idle channel %d for suspend\n", + chan->id); + } + } + + pgraph->fifo_access(dev, false); + nouveau_wait_for_idle(dev); + pfifo->reassign(dev, false); + pfifo->disable(dev); + pfifo->unload_context(dev); + pgraph->unload_context(dev); + + NV_INFO(dev, "Suspending GPU objects...\n"); + ret = nouveau_gpuobj_suspend(dev); + if (ret) { + NV_ERROR(dev, "... failed: %d\n", ret); + goto out_abort; + } + + ret = pinstmem->suspend(dev); + if (ret) { + NV_ERROR(dev, "... failed: %d\n", ret); + nouveau_gpuobj_suspend_cleanup(dev); + goto out_abort; + } + + NV_INFO(dev, "And we're gone!\n"); + pci_save_state(pdev); + if (pm_state.event == PM_EVENT_SUSPEND) { + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + } + + acquire_console_sem(); + fb_set_suspend(dev_priv->fbdev_info, 1); + release_console_sem(); + dev_priv->fbdev_info->flags = fbdev_flags; + return 0; + +out_abort: + NV_INFO(dev, "Re-enabling acceleration..\n"); + pfifo->enable(dev); + pfifo->reassign(dev, true); + pgraph->fifo_access(dev, true); + return ret; +} + +static int +nouveau_pci_resume(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + struct drm_crtc *crtc; + uint32_t fbdev_flags; + int ret, i; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + fbdev_flags = dev_priv->fbdev_info->flags; + dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; + + NV_INFO(dev, "We're back, enabling device...\n"); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + if (pci_enable_device(pdev)) + return -1; + pci_set_master(dev->pdev); + + NV_INFO(dev, "POSTing device...\n"); + ret = nouveau_run_vbios_init(dev); + if (ret) + return ret; + + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + ret = nouveau_mem_init_agp(dev); + if (ret) { + NV_ERROR(dev, "error reinitialising AGP: %d\n", ret); + return ret; + } + } + + NV_INFO(dev, "Reinitialising engines...\n"); + engine->instmem.resume(dev); + engine->mc.init(dev); + engine->timer.init(dev); + engine->fb.init(dev); + engine->graph.init(dev); + engine->fifo.init(dev); + + NV_INFO(dev, "Restoring GPU objects...\n"); + nouveau_gpuobj_resume(dev); + + nouveau_irq_postinstall(dev); + + /* Re-write SKIPS, they'll have been lost over the suspend */ + if (nouveau_vram_pushbuf) { + struct nouveau_channel *chan; + int j; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + chan = dev_priv->fifos[i]; + if (!chan) + continue; + + for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) + nouveau_bo_wr32(chan->pushbuf_bo, i, 0); + } + } + + NV_INFO(dev, "Restoring mode...\n"); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_framebuffer *nouveau_fb; + + nouveau_fb = nouveau_framebuffer(crtc->fb); + if (!nouveau_fb || !nouveau_fb->nvbo) + continue; + + nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); + } + + if (dev_priv->card_type < NV_50) { + nv04_display_restore(dev); + NVLockVgaCrtcs(dev, false); + } else + nv50_display_init(dev); + + /* Force CLUT to get re-loaded during modeset */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->lut.depth = 0; + } + + acquire_console_sem(); + fb_set_suspend(dev_priv->fbdev_info, 0); + release_console_sem(); + + nouveau_fbcon_zfill(dev); + + drm_helper_resume_force_mode(dev); + dev_priv->fbdev_info->flags = fbdev_flags; + return 0; +} + +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, + .load = nouveau_load, + .firstopen = nouveau_firstopen, + .lastclose = nouveau_lastclose, + .unload = nouveau_unload, + .preclose = nouveau_preclose, +#if defined(CONFIG_DRM_NOUVEAU_DEBUG) + .debugfs_init = nouveau_debugfs_init, + .debugfs_cleanup = nouveau_debugfs_takedown, +#endif + .irq_preinstall = nouveau_irq_preinstall, + .irq_postinstall = nouveau_irq_postinstall, + .irq_uninstall = nouveau_irq_uninstall, + .irq_handler = nouveau_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = nouveau_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = nouveau_ttm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) + .compat_ioctl = nouveau_compat_ioctl, +#endif + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = nouveau_pci_probe, + .remove = nouveau_pci_remove, + .suspend = nouveau_pci_suspend, + .resume = nouveau_pci_resume + }, + + .gem_init_object = nouveau_gem_object_new, + .gem_free_object = nouveau_gem_object_del, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, +#ifdef GIT_REVISION + .date = GIT_REVISION, +#else + .date = DRIVER_DATE, +#endif + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int __init nouveau_init(void) +{ + driver.num_ioctls = nouveau_max_ioctl; + + if (nouveau_modeset == -1) { +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force()) + nouveau_modeset = 0; + else +#endif + nouveau_modeset = 1; + } + + if (nouveau_modeset == 1) + driver.driver_features |= DRIVER_MODESET; + + return drm_init(&driver); +} + +static void __exit nouveau_exit(void) +{ + drm_exit(&driver); +} + +module_init(nouveau_init); +module_exit(nouveau_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h new file mode 100644 index 000000000000..88b4c7b77e7f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -0,0 +1,1286 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_DRV_H__ +#define __NOUVEAU_DRV_H__ + +#define DRIVER_AUTHOR "Stephane Marchesin" +#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net" + +#define DRIVER_NAME "nouveau" +#define DRIVER_DESC "nVidia Riva/TNT/GeForce" +#define DRIVER_DATE "20090420" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 15 + +#define NOUVEAU_FAMILY 0x0000FFFF +#define NOUVEAU_FLAGS 0xFFFF0000 + +#include "ttm/ttm_bo_api.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" +#include "ttm/ttm_memory.h" +#include "ttm/ttm_module.h" + +struct nouveau_fpriv { + struct ttm_object_file *tfile; +}; + +#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_bios.h" + +#define MAX_NUM_DCB_ENTRIES 16 + +#define NOUVEAU_MAX_CHANNEL_NR 128 + +#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) +#define NV50_VM_BLOCK (512*1024*1024ULL) +#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) + +struct nouveau_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + u32 placements[3]; + struct ttm_bo_kmap_obj kmap; + struct list_head head; + + /* protected by ttm_bo_reserve() */ + struct drm_file *reserved_by; + struct list_head entry; + int pbbo_index; + + struct nouveau_channel *channel; + + bool mappable; + bool no_vm; + + uint32_t tile_mode; + uint32_t tile_flags; + + struct drm_gem_object *gem; + struct drm_file *cpu_filp; + int pin_refcnt; +}; + +static inline struct nouveau_bo * +nouveau_bo(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct nouveau_bo, bo); +} + +static inline struct nouveau_bo * +nouveau_gem_object(struct drm_gem_object *gem) +{ + return gem ? gem->driver_private : NULL; +} + +/* TODO: submit equivalent to TTM generic API upstream? */ +static inline void __iomem * +nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) +{ + bool is_iomem; + void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual( + &nvbo->kmap, &is_iomem); + WARN_ON_ONCE(ioptr && !is_iomem); + return ioptr; +} + +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + uint64_t start; + uint64_t size; + struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ +}; + +enum nouveau_flags { + NV_NFORCE = 0x10000000, + NV_NFORCE2 = 0x20000000 +}; + +#define NVOBJ_ENGINE_SW 0 +#define NVOBJ_ENGINE_GR 1 +#define NVOBJ_ENGINE_DISPLAY 2 +#define NVOBJ_ENGINE_INT 0xdeadbeef + +#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) +#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) +#define NVOBJ_FLAG_ZERO_FREE (1 << 2) +#define NVOBJ_FLAG_FAKE (1 << 3) +struct nouveau_gpuobj { + struct list_head list; + + struct nouveau_channel *im_channel; + struct mem_block *im_pramin; + struct nouveau_bo *im_backing; + uint32_t im_backing_start; + uint32_t *im_backing_suspend; + int im_bound; + + uint32_t flags; + int refcount; + + uint32_t engine; + uint32_t class; + + void (*dtor)(struct drm_device *, struct nouveau_gpuobj *); + void *priv; +}; + +struct nouveau_gpuobj_ref { + struct list_head list; + + struct nouveau_gpuobj *gpuobj; + uint32_t instance; + + struct nouveau_channel *channel; + int handle; +}; + +struct nouveau_channel { + struct drm_device *dev; + int id; + + /* owner of this fifo */ + struct drm_file *file_priv; + /* mapping of the fifo itself */ + struct drm_local_map *map; + + /* mapping of the regs controling the fifo */ + void __iomem *user; + uint32_t user_get; + uint32_t user_put; + + /* Fencing */ + struct { + /* lock protects the pending list only */ + spinlock_t lock; + struct list_head pending; + uint32_t sequence; + uint32_t sequence_ack; + uint32_t last_sequence_irq; + } fence; + + /* DMA push buffer */ + struct nouveau_gpuobj_ref *pushbuf; + struct nouveau_bo *pushbuf_bo; + uint32_t pushbuf_base; + + /* Notifier memory */ + struct nouveau_bo *notifier_bo; + struct mem_block *notifier_heap; + + /* PFIFO context */ + struct nouveau_gpuobj_ref *ramfc; + struct nouveau_gpuobj_ref *cache; + + /* PGRAPH context */ + /* XXX may be merge 2 pointers as private data ??? */ + struct nouveau_gpuobj_ref *ramin_grctx; + void *pgraph_ctx; + + /* NV50 VM */ + struct nouveau_gpuobj *vm_pd; + struct nouveau_gpuobj_ref *vm_gart_pt; + struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR]; + + /* Objects */ + struct nouveau_gpuobj_ref *ramin; /* Private instmem */ + struct mem_block *ramin_heap; /* Private PRAMIN heap */ + struct nouveau_gpuobj_ref *ramht; /* Hash table */ + struct list_head ramht_refs; /* Objects referenced by RAMHT */ + + /* GPU object info for stuff used in-kernel (mm_enabled) */ + uint32_t m2mf_ntfy; + uint32_t vram_handle; + uint32_t gart_handle; + bool accel_done; + + /* Push buffer state (only for drm's channel on !mm_enabled) */ + struct { + int max; + int free; + int cur; + int put; + /* access via pushbuf_bo */ + } dma; + + uint32_t sw_subchannel[8]; + + struct { + struct nouveau_gpuobj *vblsem; + uint32_t vblsem_offset; + uint32_t vblsem_rval; + struct list_head vbl_wait; + } nvsw; + + struct { + bool active; + char name[32]; + struct drm_info_list info; + } debugfs; +}; + +struct nouveau_instmem_engine { + void *priv; + + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); + int (*suspend)(struct drm_device *dev); + void (*resume)(struct drm_device *dev); + + int (*populate)(struct drm_device *, struct nouveau_gpuobj *, + uint32_t *size); + void (*clear)(struct drm_device *, struct nouveau_gpuobj *); + int (*bind)(struct drm_device *, struct nouveau_gpuobj *); + int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); + void (*prepare_access)(struct drm_device *, bool write); + void (*finish_access)(struct drm_device *); +}; + +struct nouveau_mc_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); +}; + +struct nouveau_timer_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); + uint64_t (*read)(struct drm_device *dev); +}; + +struct nouveau_fb_engine { + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); +}; + +struct nouveau_fifo_engine { + void *priv; + + int channels; + + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + + void (*disable)(struct drm_device *); + void (*enable)(struct drm_device *); + bool (*reassign)(struct drm_device *, bool enable); + + int (*channel_id)(struct drm_device *); + + int (*create_context)(struct nouveau_channel *); + void (*destroy_context)(struct nouveau_channel *); + int (*load_context)(struct nouveau_channel *); + int (*unload_context)(struct drm_device *); +}; + +struct nouveau_pgraph_object_method { + int id; + int (*exec)(struct nouveau_channel *chan, int grclass, int mthd, + uint32_t data); +}; + +struct nouveau_pgraph_object_class { + int id; + bool software; + struct nouveau_pgraph_object_method *methods; +}; + +struct nouveau_pgraph_engine { + struct nouveau_pgraph_object_class *grclass; + bool accel_blocked; + void *ctxprog; + void *ctxvals; + + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + + void (*fifo_access)(struct drm_device *, bool); + + struct nouveau_channel *(*channel)(struct drm_device *); + int (*create_context)(struct nouveau_channel *); + void (*destroy_context)(struct nouveau_channel *); + int (*load_context)(struct nouveau_channel *); + int (*unload_context)(struct drm_device *); +}; + +struct nouveau_engine { + struct nouveau_instmem_engine instmem; + struct nouveau_mc_engine mc; + struct nouveau_timer_engine timer; + struct nouveau_fb_engine fb; + struct nouveau_pgraph_engine graph; + struct nouveau_fifo_engine fifo; +}; + +struct nouveau_pll_vals { + union { + struct { +#ifdef __BIG_ENDIAN + uint8_t N1, M1, N2, M2; +#else + uint8_t M1, N1, M2, N2; +#endif + }; + struct { + uint16_t NM1, NM2; + } __attribute__((packed)); + }; + int log2P; + + int refclk; +}; + +enum nv04_fp_display_regs { + FP_DISPLAY_END, + FP_TOTAL, + FP_CRTC, + FP_SYNC_START, + FP_SYNC_END, + FP_VALID_START, + FP_VALID_END +}; + +struct nv04_crtc_reg { + unsigned char MiscOutReg; /* */ + uint8_t CRTC[0x9f]; + uint8_t CR58[0x10]; + uint8_t Sequencer[5]; + uint8_t Graphics[9]; + uint8_t Attribute[21]; + unsigned char DAC[768]; /* Internal Colorlookuptable */ + + /* PCRTC regs */ + uint32_t fb_start; + uint32_t crtc_cfg; + uint32_t cursor_cfg; + uint32_t gpio_ext; + uint32_t crtc_830; + uint32_t crtc_834; + uint32_t crtc_850; + uint32_t crtc_eng_ctrl; + + /* PRAMDAC regs */ + uint32_t nv10_cursync; + struct nouveau_pll_vals pllvals; + uint32_t ramdac_gen_ctrl; + uint32_t ramdac_630; + uint32_t ramdac_634; + uint32_t tv_setup; + uint32_t tv_vtotal; + uint32_t tv_vskew; + uint32_t tv_vsync_delay; + uint32_t tv_htotal; + uint32_t tv_hskew; + uint32_t tv_hsync_delay; + uint32_t tv_hsync_delay2; + uint32_t fp_horiz_regs[7]; + uint32_t fp_vert_regs[7]; + uint32_t dither; + uint32_t fp_control; + uint32_t dither_regs[6]; + uint32_t fp_debug_0; + uint32_t fp_debug_1; + uint32_t fp_debug_2; + uint32_t fp_margin_color; + uint32_t ramdac_8c0; + uint32_t ramdac_a20; + uint32_t ramdac_a24; + uint32_t ramdac_a34; + uint32_t ctv_regs[38]; +}; + +struct nv04_output_reg { + uint32_t output; + int head; +}; + +struct nv04_mode_state { + uint32_t bpp; + uint32_t width; + uint32_t height; + uint32_t interlace; + uint32_t repaint0; + uint32_t repaint1; + uint32_t screen; + uint32_t scale; + uint32_t dither; + uint32_t extra; + uint32_t fifo; + uint32_t pixel; + uint32_t horiz; + int arbitration0; + int arbitration1; + uint32_t pll; + uint32_t pllB; + uint32_t vpll; + uint32_t vpll2; + uint32_t vpllB; + uint32_t vpll2B; + uint32_t pllsel; + uint32_t sel_clk; + uint32_t general; + uint32_t crtcOwner; + uint32_t head; + uint32_t head2; + uint32_t cursorConfig; + uint32_t cursor0; + uint32_t cursor1; + uint32_t cursor2; + uint32_t timingH; + uint32_t timingV; + uint32_t displayV; + uint32_t crtcSync; + + struct nv04_crtc_reg crtc_reg[2]; +}; + +enum nouveau_card_type { + NV_04 = 0x00, + NV_10 = 0x10, + NV_20 = 0x20, + NV_30 = 0x30, + NV_40 = 0x40, + NV_50 = 0x50, +}; + +struct drm_nouveau_private { + struct drm_device *dev; + enum { + NOUVEAU_CARD_INIT_DOWN, + NOUVEAU_CARD_INIT_DONE, + NOUVEAU_CARD_INIT_FAILED + } init_state; + + /* the card type, takes NV_* as values */ + enum nouveau_card_type card_type; + /* exact chipset, derived from NV_PMC_BOOT_0 */ + int chipset; + int flags; + + void __iomem *mmio; + void __iomem *ramin; + uint32_t ramin_size; + + struct workqueue_struct *wq; + struct work_struct irq_work; + + struct list_head vbl_waiting; + + struct { + struct ttm_global_reference mem_global_ref; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_bo_device bdev; + spinlock_t bo_list_lock; + struct list_head bo_list; + atomic_t validate_sequence; + } ttm; + + struct fb_info *fbdev_info; + + int fifo_alloc_count; + struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; + + struct nouveau_engine engine; + struct nouveau_channel *channel; + + /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ + struct nouveau_gpuobj *ramht; + uint32_t ramin_rsvd_vram; + uint32_t ramht_offset; + uint32_t ramht_size; + uint32_t ramht_bits; + uint32_t ramfc_offset; + uint32_t ramfc_size; + uint32_t ramro_offset; + uint32_t ramro_size; + + /* base physical adresses */ + uint64_t fb_phys; + uint64_t fb_available_size; + uint64_t fb_mappable_pages; + uint64_t fb_aper_free; + + struct { + enum { + NOUVEAU_GART_NONE = 0, + NOUVEAU_GART_AGP, + NOUVEAU_GART_SGDMA + } type; + uint64_t aper_base; + uint64_t aper_size; + uint64_t aper_free; + + struct nouveau_gpuobj *sg_ctxdma; + struct page *sg_dummy_page; + dma_addr_t sg_dummy_bus; + + /* nottm hack */ + struct drm_ttm_backend *sg_be; + unsigned long sg_handle; + } gart_info; + + /* G8x/G9x virtual address space */ + uint64_t vm_gart_base; + uint64_t vm_gart_size; + uint64_t vm_vram_base; + uint64_t vm_vram_size; + uint64_t vm_end; + struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; + int vm_vram_pt_nr; + + /* the mtrr covering the FB */ + int fb_mtrr; + + struct mem_block *ramin_heap; + + /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ + uint32_t ctx_table_size; + struct nouveau_gpuobj_ref *ctx_table; + + struct list_head gpuobj_list; + + struct nvbios VBIOS; + struct nouveau_bios_info *vbios; + + struct nv04_mode_state mode_reg; + struct nv04_mode_state saved_reg; + uint32_t saved_vga_font[4][16384]; + uint32_t crtc_owner; + uint32_t dac_users[4]; + + struct nouveau_suspend_resume { + uint32_t fifo_mode; + uint32_t graph_ctx_control; + uint32_t graph_state; + uint32_t *ramin_copy; + uint64_t ramin_size; + } susres; + + struct backlight_device *backlight; + bool acpi_dsm; + + struct nouveau_channel *evo; + + struct { + struct dentry *channel_root; + } debugfs; +}; + +static inline struct drm_nouveau_private * +nouveau_bdev(struct ttm_bo_device *bd) +{ + return container_of(bd, struct drm_nouveau_private, ttm.bdev); +} + +static inline int +nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *prev; + + if (!pnvbo) + return -EINVAL; + prev = *pnvbo; + + *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL; + if (prev) { + struct ttm_buffer_object *bo = &prev->bo; + + ttm_bo_unref(&bo); + } + + return 0; +} + +#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ + struct drm_nouveau_private *nv = dev->dev_private; \ + if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ + NV_ERROR(dev, "called without init\n"); \ + return -EINVAL; \ + } \ +} while (0) + +#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ + struct drm_nouveau_private *nv = dev->dev_private; \ + if (!nouveau_channel_owner(dev, (cl), (id))) { \ + NV_ERROR(dev, "pid %d doesn't own channel %d\n", \ + DRM_CURRENTPID, (id)); \ + return -EPERM; \ + } \ + (ch) = nv->fifos[(id)]; \ +} while (0) + +/* nouveau_drv.c */ +extern int nouveau_noagp; +extern int nouveau_duallink; +extern int nouveau_uscript_lvds; +extern int nouveau_uscript_tmds; +extern int nouveau_vram_pushbuf; +extern int nouveau_vram_notify; +extern int nouveau_fbpercrtc; +extern char *nouveau_tv_norm; +extern int nouveau_reg_debug; +extern char *nouveau_vbios; + +/* nouveau_state.c */ +extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); +extern int nouveau_load(struct drm_device *, unsigned long flags); +extern int nouveau_firstopen(struct drm_device *); +extern void nouveau_lastclose(struct drm_device *); +extern int nouveau_unload(struct drm_device *); +extern int nouveau_ioctl_getparam(struct drm_device *, void *data, + struct drm_file *); +extern int nouveau_ioctl_setparam(struct drm_device *, void *data, + struct drm_file *); +extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val); +extern bool nouveau_wait_for_idle(struct drm_device *); +extern int nouveau_card_init(struct drm_device *); +extern int nouveau_ioctl_card_init(struct drm_device *, void *data, + struct drm_file *); +extern int nouveau_ioctl_suspend(struct drm_device *, void *data, + struct drm_file *); +extern int nouveau_ioctl_resume(struct drm_device *, void *data, + struct drm_file *); + +/* nouveau_mem.c */ +extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, + uint64_t size); +extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, + uint64_t size, int align2, + struct drm_file *, int tail); +extern void nouveau_mem_takedown(struct mem_block **heap); +extern void nouveau_mem_free_block(struct mem_block *); +extern uint64_t nouveau_mem_fb_amount(struct drm_device *); +extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); +extern int nouveau_mem_init(struct drm_device *); +extern int nouveau_mem_init_agp(struct drm_device *); +extern void nouveau_mem_close(struct drm_device *); +extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, + uint32_t size, uint32_t flags, + uint64_t phys); +extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt, + uint32_t size); + +/* nouveau_notifier.c */ +extern int nouveau_notifier_init_channel(struct nouveau_channel *); +extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); +extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, + int cout, uint32_t *offset); +extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); +extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, + struct drm_file *); +extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, + struct drm_file *); + +/* nouveau_channel.c */ +extern struct drm_ioctl_desc nouveau_ioctls[]; +extern int nouveau_max_ioctl; +extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); +extern int nouveau_channel_owner(struct drm_device *, struct drm_file *, + int channel); +extern int nouveau_channel_alloc(struct drm_device *dev, + struct nouveau_channel **chan, + struct drm_file *file_priv, + uint32_t fb_ctxdma, uint32_t tt_ctxdma); +extern void nouveau_channel_free(struct nouveau_channel *); +extern int nouveau_channel_idle(struct nouveau_channel *chan); + +/* nouveau_object.c */ +extern int nouveau_gpuobj_early_init(struct drm_device *); +extern int nouveau_gpuobj_init(struct drm_device *); +extern void nouveau_gpuobj_takedown(struct drm_device *); +extern void nouveau_gpuobj_late_takedown(struct drm_device *); +extern int nouveau_gpuobj_suspend(struct drm_device *dev); +extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); +extern void nouveau_gpuobj_resume(struct drm_device *dev); +extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, + uint32_t vram_h, uint32_t tt_h); +extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); +extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, + uint32_t size, int align, uint32_t flags, + struct nouveau_gpuobj **); +extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, + uint32_t handle, struct nouveau_gpuobj *, + struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_ref_del(struct drm_device *, + struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, + struct nouveau_gpuobj_ref **ref_ret); +extern int nouveau_gpuobj_new_ref(struct drm_device *, + struct nouveau_channel *alloc_chan, + struct nouveau_channel *ref_chan, + uint32_t handle, uint32_t size, int align, + uint32_t flags, struct nouveau_gpuobj_ref **); +extern int nouveau_gpuobj_new_fake(struct drm_device *, + uint32_t p_offset, uint32_t b_offset, + uint32_t size, uint32_t flags, + struct nouveau_gpuobj **, + struct nouveau_gpuobj_ref**); +extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, + uint64_t offset, uint64_t size, int access, + int target, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, + uint64_t offset, uint64_t size, + int access, struct nouveau_gpuobj **, + uint32_t *o_ret); +extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, + struct nouveau_gpuobj **); +extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, + struct drm_file *); +extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, + struct drm_file *); + +/* nouveau_irq.c */ +extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); +extern void nouveau_irq_preinstall(struct drm_device *); +extern int nouveau_irq_postinstall(struct drm_device *); +extern void nouveau_irq_uninstall(struct drm_device *); + +/* nouveau_sgdma.c */ +extern int nouveau_sgdma_init(struct drm_device *); +extern void nouveau_sgdma_takedown(struct drm_device *); +extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, + uint32_t *page); +extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); + +/* nouveau_debugfs.c */ +#if defined(CONFIG_DRM_NOUVEAU_DEBUG) +extern int nouveau_debugfs_init(struct drm_minor *); +extern void nouveau_debugfs_takedown(struct drm_minor *); +extern int nouveau_debugfs_channel_init(struct nouveau_channel *); +extern void nouveau_debugfs_channel_fini(struct nouveau_channel *); +#else +static inline int +nouveau_debugfs_init(struct drm_minor *minor) +{ + return 0; +} + +static inline void nouveau_debugfs_takedown(struct drm_minor *minor) +{ +} + +static inline int +nouveau_debugfs_channel_init(struct nouveau_channel *chan) +{ + return 0; +} + +static inline void +nouveau_debugfs_channel_fini(struct nouveau_channel *chan) +{ +} +#endif + +/* nouveau_dma.c */ +extern int nouveau_dma_init(struct nouveau_channel *); +extern int nouveau_dma_wait(struct nouveau_channel *, int size); + +/* nouveau_acpi.c */ +#ifdef CONFIG_ACPI +extern int nouveau_hybrid_setup(struct drm_device *dev); +extern bool nouveau_dsm_probe(struct drm_device *dev); +#else +static inline int nouveau_hybrid_setup(struct drm_device *dev) +{ + return 0; +} +static inline bool nouveau_dsm_probe(struct drm_device *dev) +{ + return false; +} +#endif + +/* nouveau_backlight.c */ +#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT +extern int nouveau_backlight_init(struct drm_device *); +extern void nouveau_backlight_exit(struct drm_device *); +#else +static inline int nouveau_backlight_init(struct drm_device *dev) +{ + return 0; +} + +static inline void nouveau_backlight_exit(struct drm_device *dev) { } +#endif + +/* nouveau_bios.c */ +extern int nouveau_bios_init(struct drm_device *); +extern void nouveau_bios_takedown(struct drm_device *dev); +extern int nouveau_run_vbios_init(struct drm_device *); +extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table, + struct dcb_entry *); +extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *, + enum dcb_gpio_tag); +extern struct dcb_connector_table_entry * +nouveau_bios_connector_entry(struct drm_device *, int index); +extern int get_pll_limits(struct drm_device *, uint32_t limit_match, + struct pll_lims *); +extern int nouveau_bios_run_display_table(struct drm_device *, + struct dcb_entry *, + uint32_t script, int pxclk); +extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *, + int *length); +extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); +extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *); +extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, + bool *dl, bool *if_is_24bit); +extern int run_tmds_table(struct drm_device *, struct dcb_entry *, + int head, int pxclk); +extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head, + enum LVDS_script, int pxclk); + +/* nouveau_ttm.c */ +int nouveau_ttm_global_init(struct drm_nouveau_private *); +void nouveau_ttm_global_release(struct drm_nouveau_private *); +int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); + +/* nouveau_dp.c */ +int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, + uint8_t *data, int data_nr); +bool nouveau_dp_detect(struct drm_encoder *); +bool nouveau_dp_link_train(struct drm_encoder *); + +/* nv04_fb.c */ +extern int nv04_fb_init(struct drm_device *); +extern void nv04_fb_takedown(struct drm_device *); + +/* nv10_fb.c */ +extern int nv10_fb_init(struct drm_device *); +extern void nv10_fb_takedown(struct drm_device *); + +/* nv40_fb.c */ +extern int nv40_fb_init(struct drm_device *); +extern void nv40_fb_takedown(struct drm_device *); + +/* nv04_fifo.c */ +extern int nv04_fifo_init(struct drm_device *); +extern void nv04_fifo_disable(struct drm_device *); +extern void nv04_fifo_enable(struct drm_device *); +extern bool nv04_fifo_reassign(struct drm_device *, bool); +extern int nv04_fifo_channel_id(struct drm_device *); +extern int nv04_fifo_create_context(struct nouveau_channel *); +extern void nv04_fifo_destroy_context(struct nouveau_channel *); +extern int nv04_fifo_load_context(struct nouveau_channel *); +extern int nv04_fifo_unload_context(struct drm_device *); + +/* nv10_fifo.c */ +extern int nv10_fifo_init(struct drm_device *); +extern int nv10_fifo_channel_id(struct drm_device *); +extern int nv10_fifo_create_context(struct nouveau_channel *); +extern void nv10_fifo_destroy_context(struct nouveau_channel *); +extern int nv10_fifo_load_context(struct nouveau_channel *); +extern int nv10_fifo_unload_context(struct drm_device *); + +/* nv40_fifo.c */ +extern int nv40_fifo_init(struct drm_device *); +extern int nv40_fifo_create_context(struct nouveau_channel *); +extern void nv40_fifo_destroy_context(struct nouveau_channel *); +extern int nv40_fifo_load_context(struct nouveau_channel *); +extern int nv40_fifo_unload_context(struct drm_device *); + +/* nv50_fifo.c */ +extern int nv50_fifo_init(struct drm_device *); +extern void nv50_fifo_takedown(struct drm_device *); +extern int nv50_fifo_channel_id(struct drm_device *); +extern int nv50_fifo_create_context(struct nouveau_channel *); +extern void nv50_fifo_destroy_context(struct nouveau_channel *); +extern int nv50_fifo_load_context(struct nouveau_channel *); +extern int nv50_fifo_unload_context(struct drm_device *); + +/* nv04_graph.c */ +extern struct nouveau_pgraph_object_class nv04_graph_grclass[]; +extern int nv04_graph_init(struct drm_device *); +extern void nv04_graph_takedown(struct drm_device *); +extern void nv04_graph_fifo_access(struct drm_device *, bool); +extern struct nouveau_channel *nv04_graph_channel(struct drm_device *); +extern int nv04_graph_create_context(struct nouveau_channel *); +extern void nv04_graph_destroy_context(struct nouveau_channel *); +extern int nv04_graph_load_context(struct nouveau_channel *); +extern int nv04_graph_unload_context(struct drm_device *); +extern void nv04_graph_context_switch(struct drm_device *); + +/* nv10_graph.c */ +extern struct nouveau_pgraph_object_class nv10_graph_grclass[]; +extern int nv10_graph_init(struct drm_device *); +extern void nv10_graph_takedown(struct drm_device *); +extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); +extern int nv10_graph_create_context(struct nouveau_channel *); +extern void nv10_graph_destroy_context(struct nouveau_channel *); +extern int nv10_graph_load_context(struct nouveau_channel *); +extern int nv10_graph_unload_context(struct drm_device *); +extern void nv10_graph_context_switch(struct drm_device *); + +/* nv20_graph.c */ +extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; +extern struct nouveau_pgraph_object_class nv30_graph_grclass[]; +extern int nv20_graph_create_context(struct nouveau_channel *); +extern void nv20_graph_destroy_context(struct nouveau_channel *); +extern int nv20_graph_load_context(struct nouveau_channel *); +extern int nv20_graph_unload_context(struct drm_device *); +extern int nv20_graph_init(struct drm_device *); +extern void nv20_graph_takedown(struct drm_device *); +extern int nv30_graph_init(struct drm_device *); + +/* nv40_graph.c */ +extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; +extern int nv40_graph_init(struct drm_device *); +extern void nv40_graph_takedown(struct drm_device *); +extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); +extern int nv40_graph_create_context(struct nouveau_channel *); +extern void nv40_graph_destroy_context(struct nouveau_channel *); +extern int nv40_graph_load_context(struct nouveau_channel *); +extern int nv40_graph_unload_context(struct drm_device *); +extern int nv40_grctx_init(struct drm_device *); +extern void nv40_grctx_fini(struct drm_device *); +extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *); + +/* nv50_graph.c */ +extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; +extern int nv50_graph_init(struct drm_device *); +extern void nv50_graph_takedown(struct drm_device *); +extern void nv50_graph_fifo_access(struct drm_device *, bool); +extern struct nouveau_channel *nv50_graph_channel(struct drm_device *); +extern int nv50_graph_create_context(struct nouveau_channel *); +extern void nv50_graph_destroy_context(struct nouveau_channel *); +extern int nv50_graph_load_context(struct nouveau_channel *); +extern int nv50_graph_unload_context(struct drm_device *); +extern void nv50_graph_context_switch(struct drm_device *); + +/* nv04_instmem.c */ +extern int nv04_instmem_init(struct drm_device *); +extern void nv04_instmem_takedown(struct drm_device *); +extern int nv04_instmem_suspend(struct drm_device *); +extern void nv04_instmem_resume(struct drm_device *); +extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, + uint32_t *size); +extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); +extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern void nv04_instmem_prepare_access(struct drm_device *, bool write); +extern void nv04_instmem_finish_access(struct drm_device *); + +/* nv50_instmem.c */ +extern int nv50_instmem_init(struct drm_device *); +extern void nv50_instmem_takedown(struct drm_device *); +extern int nv50_instmem_suspend(struct drm_device *); +extern void nv50_instmem_resume(struct drm_device *); +extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, + uint32_t *size); +extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); +extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern void nv50_instmem_prepare_access(struct drm_device *, bool write); +extern void nv50_instmem_finish_access(struct drm_device *); + +/* nv04_mc.c */ +extern int nv04_mc_init(struct drm_device *); +extern void nv04_mc_takedown(struct drm_device *); + +/* nv40_mc.c */ +extern int nv40_mc_init(struct drm_device *); +extern void nv40_mc_takedown(struct drm_device *); + +/* nv50_mc.c */ +extern int nv50_mc_init(struct drm_device *); +extern void nv50_mc_takedown(struct drm_device *); + +/* nv04_timer.c */ +extern int nv04_timer_init(struct drm_device *); +extern uint64_t nv04_timer_read(struct drm_device *); +extern void nv04_timer_takedown(struct drm_device *); + +extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + +/* nv04_dac.c */ +extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); +extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, + struct drm_connector *connector); +extern int nv04_dac_output_offset(struct drm_encoder *encoder); +extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); + +/* nv04_dfp.c */ +extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry); +extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent); +extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, + int head, bool dl); +extern void nv04_dfp_disable(struct drm_device *dev, int head); +extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode); + +/* nv04_tv.c */ +extern int nv04_tv_identify(struct drm_device *dev, int i2c_index); +extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry); + +/* nv17_tv.c */ +extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); +extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, + struct drm_connector *connector, + uint32_t pin_mask); + +/* nv04_display.c */ +extern int nv04_display_create(struct drm_device *); +extern void nv04_display_destroy(struct drm_device *); +extern void nv04_display_restore(struct drm_device *); + +/* nv04_crtc.c */ +extern int nv04_crtc_create(struct drm_device *, int index); + +/* nouveau_bo.c */ +extern struct ttm_bo_driver nouveau_bo_driver; +extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, + int size, int align, uint32_t flags, + uint32_t tile_mode, uint32_t tile_flags, + bool no_vm, bool mappable, struct nouveau_bo **); +extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); +extern int nouveau_bo_unpin(struct nouveau_bo *); +extern int nouveau_bo_map(struct nouveau_bo *); +extern void nouveau_bo_unmap(struct nouveau_bo *); +extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); +extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); +extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); +extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); +extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); + +/* nouveau_fence.c */ +struct nouveau_fence; +extern int nouveau_fence_init(struct nouveau_channel *); +extern void nouveau_fence_fini(struct nouveau_channel *); +extern void nouveau_fence_update(struct nouveau_channel *); +extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **, + bool emit); +extern int nouveau_fence_emit(struct nouveau_fence *); +struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); +extern bool nouveau_fence_signalled(void *obj, void *arg); +extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); +extern int nouveau_fence_flush(void *obj, void *arg); +extern void nouveau_fence_unref(void **obj); +extern void *nouveau_fence_ref(void *obj); +extern void nouveau_fence_handler(struct drm_device *dev, int channel); + +/* nouveau_gem.c */ +extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, + int size, int align, uint32_t flags, + uint32_t tile_mode, uint32_t tile_flags, + bool no_vm, bool mappable, struct nouveau_bo **); +extern int nouveau_gem_object_new(struct drm_gem_object *); +extern void nouveau_gem_object_del(struct drm_gem_object *); +extern int nouveau_gem_ioctl_new(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_pin(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_tile(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_info(struct drm_device *, void *, + struct drm_file *); + +/* nv17_gpio.c */ +int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); +int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); + +#ifndef ioread32_native +#ifdef __BIG_ENDIAN +#define ioread16_native ioread16be +#define iowrite16_native iowrite16be +#define ioread32_native ioread32be +#define iowrite32_native iowrite32be +#else /* def __BIG_ENDIAN */ +#define ioread16_native ioread16 +#define iowrite16_native iowrite16 +#define ioread32_native ioread32 +#define iowrite32_native iowrite32 +#endif /* def __BIG_ENDIAN else */ +#endif /* !ioread32_native */ + +/* channel control reg access */ +static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg) +{ + return ioread32_native(chan->user + reg); +} + +static inline void nvchan_wr32(struct nouveau_channel *chan, + unsigned reg, u32 val) +{ + iowrite32_native(val, chan->user + reg); +} + +/* register access */ +static inline u32 nv_rd32(struct drm_device *dev, unsigned reg) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + return ioread32_native(dev_priv->mmio + reg); +} + +static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + iowrite32_native(val, dev_priv->mmio + reg); +} + +static inline u8 nv_rd08(struct drm_device *dev, unsigned reg) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + return ioread8(dev_priv->mmio + reg); +} + +static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + iowrite8(val, dev_priv->mmio + reg); +} + +#define nv_wait(reg, mask, val) \ + nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) + +/* PRAMIN access */ +static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + return ioread32_native(dev_priv->ramin + offset); +} + +static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + iowrite32_native(val, dev_priv->ramin + offset); +} + +/* object access */ +static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj, + unsigned index) +{ + return nv_ri32(dev, obj->im_pramin->start + index * 4); +} + +static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj, + unsigned index, u32 val) +{ + nv_wi32(dev, obj->im_pramin->start + index * 4, val); +} + +/* + * Logging + * Argument d is (struct drm_device *). + */ +#define NV_PRINTK(level, d, fmt, arg...) \ + printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \ + pci_name(d->pdev), ##arg) +#ifndef NV_DEBUG_NOTRACE +#define NV_DEBUG(d, fmt, arg...) do { \ + if (drm_debug) { \ + NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \ + __LINE__, ##arg); \ + } \ +} while (0) +#else +#define NV_DEBUG(d, fmt, arg...) do { \ + if (drm_debug) \ + NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \ +} while (0) +#endif +#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg) +#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg) +#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg) +#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg) +#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg) + +/* nouveau_reg_debug bitmask */ +enum { + NOUVEAU_REG_DEBUG_MC = 0x1, + NOUVEAU_REG_DEBUG_VIDEO = 0x2, + NOUVEAU_REG_DEBUG_FB = 0x4, + NOUVEAU_REG_DEBUG_EXTDEV = 0x8, + NOUVEAU_REG_DEBUG_CRTC = 0x10, + NOUVEAU_REG_DEBUG_RAMDAC = 0x20, + NOUVEAU_REG_DEBUG_VGACRTC = 0x40, + NOUVEAU_REG_DEBUG_RMVIO = 0x80, + NOUVEAU_REG_DEBUG_VGAATTR = 0x100, + NOUVEAU_REG_DEBUG_EVO = 0x200, +}; + +#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \ + if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \ + NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \ +} while (0) + +static inline bool +nv_two_heads(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + const int impl = dev->pci_device & 0x0ff0; + + if (dev_priv->card_type >= NV_10 && impl != 0x0100 && + impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) + return true; + + return false; +} + +static inline bool +nv_gf4_disp_arch(struct drm_device *dev) +{ + return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110; +} + +static inline bool +nv_two_reg_pll(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + const int impl = dev->pci_device & 0x0ff0; + + if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40) + return true; + return false; +} + +#define NV50_NVSW 0x0000506e +#define NV50_NVSW_DMA_SEMAPHORE 0x00000060 +#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064 +#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068 +#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c +#define NV50_NVSW_DMA_VBLSEM 0x0000018c +#define NV50_NVSW_VBLSEM_OFFSET 0x00000400 +#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404 +#define NV50_NVSW_VBLSEM_RELEASE 0x00000408 + +#endif /* __NOUVEAU_DRV_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h new file mode 100644 index 000000000000..bc4a24029ed1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_ENCODER_H__ +#define __NOUVEAU_ENCODER_H__ + +#include "drm_encoder_slave.h" +#include "nouveau_drv.h" + +#define NV_DPMS_CLEARED 0x80 + +struct nouveau_encoder { + struct drm_encoder_slave base; + + struct dcb_entry *dcb; + int or; + + struct drm_display_mode mode; + int last_dpms; + + struct nv04_output_reg restore; + + void (*disconnect)(struct nouveau_encoder *encoder); + + union { + struct { + int dpcd_version; + int link_nr; + int link_bw; + } dp; + }; +}; + +static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc) +{ + struct drm_encoder_slave *slave = to_encoder_slave(enc); + + return container_of(slave, struct nouveau_encoder, base); +} + +static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc) +{ + return &enc->base.base; +} + +struct nouveau_connector * +nouveau_encoder_connector_get(struct nouveau_encoder *encoder); +int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry); +int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry); + +struct bit_displayport_encoder_table { + uint32_t match; + uint8_t record_nr; + uint8_t unknown; + uint16_t script0; + uint16_t script1; + uint16_t unknown_table; +} __attribute__ ((packed)); + +struct bit_displayport_encoder_table_entry { + uint8_t vs_level; + uint8_t pre_level; + uint8_t reg0; + uint8_t reg1; + uint8_t reg2; +} __attribute__ ((packed)); + +#endif /* __NOUVEAU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h new file mode 100644 index 000000000000..4a3f31aa1949 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fb.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_FB_H__ +#define __NOUVEAU_FB_H__ + +struct nouveau_framebuffer { + struct drm_framebuffer base; + struct nouveau_bo *nvbo; +}; + +static inline struct nouveau_framebuffer * +nouveau_framebuffer(struct drm_framebuffer *fb) +{ + return container_of(fb, struct nouveau_framebuffer, base); +} + +extern const struct drm_mode_config_funcs nouveau_mode_config_funcs; + +struct drm_framebuffer * +nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *, + struct drm_mode_fb_cmd *); + +#endif /* __NOUVEAU_FB_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c new file mode 100644 index 000000000000..36e8c5e4503a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -0,0 +1,380 @@ +/* + * Copyright © 2007 David Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * David Airlie + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drmP.h" +#include "drm.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "drm_fb_helper.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" +#include "nouveau_crtc.h" +#include "nouveau_fb.h" +#include "nouveau_fbcon.h" +#include "nouveau_dma.h" + +static int +nouveau_fbcon_sync(struct fb_info *info) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + int ret, i; + + if (!chan->accel_done || + info->state != FBINFO_STATE_RUNNING || + info->flags & FBINFO_HWACCEL_DISABLED) + return 0; + + if (RING_SPACE(chan, 4)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + return 0; + } + + BEGIN_RING(chan, 0, 0x0104, 1); + OUT_RING(chan, 0); + BEGIN_RING(chan, 0, 0x0100, 1); + OUT_RING(chan, 0); + nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); + FIRE_RING(chan); + + ret = -EBUSY; + for (i = 0; i < 100000; i++) { + if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { + ret = 0; + break; + } + DRM_UDELAY(1); + } + + if (ret) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + return 0; + } + + chan->accel_done = false; + return 0; +} + +static struct fb_ops nouveau_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_setcolreg = drm_fb_helper_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_sync = nouveau_fbcon_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->lut.r[regno] = red; + nv_crtc->lut.g[regno] = green; + nv_crtc->lut.b[regno] = blue; +} + +static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + *red = nv_crtc->lut.r[regno]; + *green = nv_crtc->lut.g[regno]; + *blue = nv_crtc->lut.b[regno]; +} + +static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { + .gamma_set = nouveau_fbcon_gamma_set, + .gamma_get = nouveau_fbcon_gamma_get +}; + +#if defined(__i386__) || defined(__x86_64__) +static bool +nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + int ramin; + + if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB && + screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + return false; + + if (screen_info.lfb_base < pci_resource_start(pdev, 1)) + goto not_fb; + + if (screen_info.lfb_base + screen_info.lfb_size >= + pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1)) + goto not_fb; + + return true; +not_fb: + ramin = 2; + if (pci_resource_len(pdev, ramin) == 0) { + ramin = 3; + if (pci_resource_len(pdev, ramin) == 0) + return false; + } + + if (screen_info.lfb_base < pci_resource_start(pdev, ramin)) + return false; + + if (screen_info.lfb_base + screen_info.lfb_size >= + pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin)) + return false; + + return true; +} +#endif + +void +nouveau_fbcon_zfill(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct fb_info *info = dev_priv->fbdev_info; + struct fb_fillrect rect; + + /* Clear the entire fbcon. The drm will program every connector + * with it's preferred mode. If the sizes differ, one display will + * quite likely have garbage around the console. + */ + rect.dx = rect.dy = 0; + rect.width = info->var.xres_virtual; + rect.height = info->var.yres_virtual; + rect.color = 0; + rect.rop = ROP_COPY; + info->fbops->fb_fillrect(info, &rect); +} + +static int +nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, + uint32_t fb_height, uint32_t surface_width, + uint32_t surface_height, uint32_t surface_depth, + uint32_t surface_bpp, struct drm_framebuffer **pfb) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct fb_info *info; + struct nouveau_fbcon_par *par; + struct drm_framebuffer *fb; + struct nouveau_framebuffer *nouveau_fb; + struct nouveau_bo *nvbo; + struct drm_mode_fb_cmd mode_cmd; + struct device *device = &dev->pdev->dev; + int size, ret; + + mode_cmd.width = surface_width; + mode_cmd.height = surface_height; + + mode_cmd.bpp = surface_bpp; + mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); + mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256); + mode_cmd.depth = surface_depth; + + size = mode_cmd.pitch * mode_cmd.height; + size = ALIGN(size, PAGE_SIZE); + + ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, + 0, 0x0000, false, true, &nvbo); + if (ret) { + NV_ERROR(dev, "failed to allocate framebuffer\n"); + goto out; + } + + ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(dev, "failed to pin fb: %d\n", ret); + nouveau_bo_ref(NULL, &nvbo); + goto out; + } + + ret = nouveau_bo_map(nvbo); + if (ret) { + NV_ERROR(dev, "failed to map fb: %d\n", ret); + nouveau_bo_unpin(nvbo); + nouveau_bo_ref(NULL, &nvbo); + goto out; + } + + mutex_lock(&dev->struct_mutex); + + fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd); + if (!fb) { + ret = -ENOMEM; + NV_ERROR(dev, "failed to allocate fb.\n"); + goto out_unref; + } + + list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); + + nouveau_fb = nouveau_framebuffer(fb); + *pfb = fb; + + info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device); + if (!info) { + ret = -ENOMEM; + goto out_unref; + } + + par = info->par; + par->helper.funcs = &nouveau_fbcon_helper_funcs; + par->helper.dev = dev; + ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4); + if (ret) + goto out_unref; + dev_priv->fbdev_info = info; + + strcpy(info->fix.id, "nouveaufb"); + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | + FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; + info->fbops = &nouveau_fbcon_ops; + info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - + dev_priv->vm_vram_base; + info->fix.smem_len = size; + + info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); + info->screen_size = size; + + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); + drm_fb_helper_fill_var(info, fb, fb_width, fb_height); + + /* FIXME: we really shouldn't expose mmio space at all */ + info->fix.mmio_start = pci_resource_start(dev->pdev, 1); + info->fix.mmio_len = pci_resource_len(dev->pdev, 1); + + /* Set aperture base/size for vesafb takeover */ +#if defined(__i386__) || defined(__x86_64__) + if (nouveau_fbcon_has_vesafb_or_efifb(dev)) { + /* Some NVIDIA VBIOS' are stupid and decide to put the + * framebuffer in the middle of the PRAMIN BAR for + * whatever reason. We need to know the exact lfb_base + * to get vesafb kicked off, and the only reliable way + * we have left is to find out lfb_base the same way + * vesafb did. + */ + info->aperture_base = screen_info.lfb_base; + info->aperture_size = screen_info.lfb_size; + if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) + info->aperture_size *= 65536; + } else +#endif + { + info->aperture_base = info->fix.mmio_start; + info->aperture_size = info->fix.mmio_len; + } + + info->pixmap.size = 64*1024; + info->pixmap.buf_align = 8; + info->pixmap.access_align = 32; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; + + fb->fbdev = info; + + par->nouveau_fb = nouveau_fb; + par->dev = dev; + + switch (dev_priv->card_type) { + case NV_50: + nv50_fbcon_accel_init(info); + break; + default: + nv04_fbcon_accel_init(info); + break; + }; + + nouveau_fbcon_zfill(dev); + + /* To allow resizeing without swapping buffers */ + NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", + nouveau_fb->base.width, + nouveau_fb->base.height, + nvbo->bo.offset, nvbo); + + mutex_unlock(&dev->struct_mutex); + return 0; + +out_unref: + mutex_unlock(&dev->struct_mutex); +out: + return ret; +} + +int +nouveau_fbcon_probe(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); +} + +int +nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) +{ + struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb); + struct fb_info *info; + + if (!fb) + return -EINVAL; + + info = fb->fbdev; + if (info) { + struct nouveau_fbcon_par *par = info->par; + + unregister_framebuffer(info); + nouveau_bo_unmap(nouveau_fb->nvbo); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(nouveau_fb->nvbo->gem); + nouveau_fb->nvbo = NULL; + mutex_unlock(&dev->struct_mutex); + if (par) + drm_fb_helper_free(&par->helper); + framebuffer_release(info); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h new file mode 100644 index 000000000000..8531140fedbc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_FBCON_H__ +#define __NOUVEAU_FBCON_H__ + +#include "drm_fb_helper.h" + +struct nouveau_fbcon_par { + struct drm_fb_helper helper; + struct drm_device *dev; + struct nouveau_framebuffer *nouveau_fb; +}; + +int nouveau_fbcon_probe(struct drm_device *dev); +int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); +void nouveau_fbcon_restore(void); +void nouveau_fbcon_zfill(struct drm_device *dev); + +int nv04_fbcon_accel_init(struct fb_info *info); +int nv50_fbcon_accel_init(struct fb_info *info); + +#endif /* __NV50_FBCON_H__ */ + diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c new file mode 100644 index 000000000000..0cff7eb3690a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" + +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +#define USE_REFCNT (dev_priv->card_type >= NV_10) + +struct nouveau_fence { + struct nouveau_channel *channel; + struct kref refcount; + struct list_head entry; + + uint32_t sequence; + bool signalled; +}; + +static inline struct nouveau_fence * +nouveau_fence(void *sync_obj) +{ + return (struct nouveau_fence *)sync_obj; +} + +static void +nouveau_fence_del(struct kref *ref) +{ + struct nouveau_fence *fence = + container_of(ref, struct nouveau_fence, refcount); + + kfree(fence); +} + +void +nouveau_fence_update(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct list_head *entry, *tmp; + struct nouveau_fence *fence; + uint32_t sequence; + + if (USE_REFCNT) + sequence = nvchan_rd32(chan, 0x48); + else + sequence = chan->fence.last_sequence_irq; + + if (chan->fence.sequence_ack == sequence) + return; + chan->fence.sequence_ack = sequence; + + list_for_each_safe(entry, tmp, &chan->fence.pending) { + fence = list_entry(entry, struct nouveau_fence, entry); + + sequence = fence->sequence; + fence->signalled = true; + list_del(&fence->entry); + kref_put(&fence->refcount, nouveau_fence_del); + + if (sequence == chan->fence.sequence_ack) + break; + } +} + +int +nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, + bool emit) +{ + struct nouveau_fence *fence; + int ret = 0; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return -ENOMEM; + kref_init(&fence->refcount); + fence->channel = chan; + + if (emit) + ret = nouveau_fence_emit(fence); + + if (ret) + nouveau_fence_unref((void *)&fence); + *pfence = fence; + return ret; +} + +struct nouveau_channel * +nouveau_fence_channel(struct nouveau_fence *fence) +{ + return fence ? fence->channel : NULL; +} + +int +nouveau_fence_emit(struct nouveau_fence *fence) +{ + struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private; + struct nouveau_channel *chan = fence->channel; + unsigned long flags; + int ret; + + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) { + spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); + spin_unlock_irqrestore(&chan->fence.lock, flags); + + BUG_ON(chan->fence.sequence == + chan->fence.sequence_ack - 1); + } + + fence->sequence = ++chan->fence.sequence; + + kref_get(&fence->refcount); + spin_lock_irqsave(&chan->fence.lock, flags); + list_add_tail(&fence->entry, &chan->fence.pending); + spin_unlock_irqrestore(&chan->fence.lock, flags); + + BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1); + OUT_RING(chan, fence->sequence); + FIRE_RING(chan); + + return 0; +} + +void +nouveau_fence_unref(void **sync_obj) +{ + struct nouveau_fence *fence = nouveau_fence(*sync_obj); + + if (fence) + kref_put(&fence->refcount, nouveau_fence_del); + *sync_obj = NULL; +} + +void * +nouveau_fence_ref(void *sync_obj) +{ + struct nouveau_fence *fence = nouveau_fence(sync_obj); + + kref_get(&fence->refcount); + return sync_obj; +} + +bool +nouveau_fence_signalled(void *sync_obj, void *sync_arg) +{ + struct nouveau_fence *fence = nouveau_fence(sync_obj); + struct nouveau_channel *chan = fence->channel; + unsigned long flags; + + if (fence->signalled) + return true; + + spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); + spin_unlock_irqrestore(&chan->fence.lock, flags); + return fence->signalled; +} + +int +nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) +{ + unsigned long timeout = jiffies + (3 * DRM_HZ); + int ret = 0; + + __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + + while (1) { + if (nouveau_fence_signalled(sync_obj, sync_arg)) + break; + + if (time_after_eq(jiffies, timeout)) { + ret = -EBUSY; + break; + } + + if (lazy) + schedule_timeout(1); + + if (intr && signal_pending(current)) { + ret = -ERESTART; + break; + } + } + + __set_current_state(TASK_RUNNING); + + return ret; +} + +int +nouveau_fence_flush(void *sync_obj, void *sync_arg) +{ + return 0; +} + +void +nouveau_fence_handler(struct drm_device *dev, int channel) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + + if (channel >= 0 && channel < dev_priv->engine.fifo.channels) + chan = dev_priv->fifos[channel]; + + if (chan) { + spin_lock_irq(&chan->fence.lock); + nouveau_fence_update(chan); + spin_unlock_irq(&chan->fence.lock); + } +} + +int +nouveau_fence_init(struct nouveau_channel *chan) +{ + INIT_LIST_HEAD(&chan->fence.pending); + spin_lock_init(&chan->fence.lock); + return 0; +} + +void +nouveau_fence_fini(struct nouveau_channel *chan) +{ + struct list_head *entry, *tmp; + struct nouveau_fence *fence; + + list_for_each_safe(entry, tmp, &chan->fence.pending) { + fence = list_entry(entry, struct nouveau_fence, entry); + + fence->signalled = true; + list_del(&fence->entry); + kref_put(&fence->refcount, nouveau_fence_del); + } +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c new file mode 100644 index 000000000000..11f831f0ddc5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -0,0 +1,992 @@ +/* + * Copyright (C) 2008 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "drm.h" + +#include "nouveau_drv.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" + +#define nouveau_gem_pushbuf_sync(chan) 0 + +int +nouveau_gem_object_new(struct drm_gem_object *gem) +{ + return 0; +} + +void +nouveau_gem_object_del(struct drm_gem_object *gem) +{ + struct nouveau_bo *nvbo = gem->driver_private; + struct ttm_buffer_object *bo = &nvbo->bo; + + if (!nvbo) + return; + nvbo->gem = NULL; + + if (unlikely(nvbo->cpu_filp)) + ttm_bo_synccpu_write_release(bo); + + if (unlikely(nvbo->pin_refcnt)) { + nvbo->pin_refcnt = 1; + nouveau_bo_unpin(nvbo); + } + + ttm_bo_unref(&bo); +} + +int +nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, + int size, int align, uint32_t flags, uint32_t tile_mode, + uint32_t tile_flags, bool no_vm, bool mappable, + struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, + tile_flags, no_vm, mappable, pnvbo); + if (ret) + return ret; + nvbo = *pnvbo; + + nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); + if (!nvbo->gem) { + nouveau_bo_ref(NULL, pnvbo); + return -ENOMEM; + } + + nvbo->bo.persistant_swap_storage = nvbo->gem->filp; + nvbo->gem->driver_private = nvbo; + return 0; +} + +static int +nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + rep->domain = NOUVEAU_GEM_DOMAIN_GART; + else + rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; + + rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; + rep->offset = nvbo->bo.offset; + rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; + rep->tile_mode = nvbo->tile_mode; + rep->tile_flags = nvbo->tile_flags; + return 0; +} + +static bool +nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { + switch (tile_flags) { + case 0x0000: + case 0x1800: + case 0x2800: + case 0x4800: + case 0x7000: + case 0x7400: + case 0x7a00: + case 0xe000: + break; + default: + NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); + return false; + } + + return true; +} + +int +nouveau_gem_ioctl_new(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_gem_new *req = data; + struct nouveau_bo *nvbo = NULL; + struct nouveau_channel *chan = NULL; + uint32_t flags = 0; + int ret = 0; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) + dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; + + if (req->channel_hint) { + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, + file_priv, chan); + } + + if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) + flags |= TTM_PL_FLAG_VRAM; + if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) + flags |= TTM_PL_FLAG_TT; + if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) + flags |= TTM_PL_FLAG_SYSTEM; + + if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) + return -EINVAL; + + ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, + req->info.tile_mode, req->info.tile_flags, false, + (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), + &nvbo); + if (ret) + return ret; + + ret = nouveau_gem_info(nvbo->gem, &req->info); + if (ret) + goto out; + + ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); +out: + mutex_lock(&dev->struct_mutex); + drm_gem_object_handle_unreference(nvbo->gem); + mutex_unlock(&dev->struct_mutex); + + if (ret) + drm_gem_object_unreference(nvbo->gem); + return ret; +} + +static int +nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, + uint32_t write_domains, uint32_t valid_domains) +{ + struct nouveau_bo *nvbo = gem->driver_private; + struct ttm_buffer_object *bo = &nvbo->bo; + uint64_t flags; + + if (!valid_domains || (!read_domains && !write_domains)) + return -EINVAL; + + if (write_domains) { + if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && + (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) + flags = TTM_PL_FLAG_VRAM; + else + if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && + (write_domains & NOUVEAU_GEM_DOMAIN_GART)) + flags = TTM_PL_FLAG_TT; + else + return -EINVAL; + } else { + if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && + (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && + bo->mem.mem_type == TTM_PL_VRAM) + flags = TTM_PL_FLAG_VRAM; + else + if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && + (read_domains & NOUVEAU_GEM_DOMAIN_GART) && + bo->mem.mem_type == TTM_PL_TT) + flags = TTM_PL_FLAG_TT; + else + if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && + (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) + flags = TTM_PL_FLAG_VRAM; + else + flags = TTM_PL_FLAG_TT; + } + + nouveau_bo_placement_set(nvbo, flags); + return 0; +} + +struct validate_op { + struct nouveau_fence *fence; + struct list_head vram_list; + struct list_head gart_list; + struct list_head both_list; +}; + +static void +validate_fini_list(struct list_head *list, struct nouveau_fence *fence) +{ + struct list_head *entry, *tmp; + struct nouveau_bo *nvbo; + + list_for_each_safe(entry, tmp, list) { + nvbo = list_entry(entry, struct nouveau_bo, entry); + if (likely(fence)) { + struct nouveau_fence *prev_fence; + + spin_lock(&nvbo->bo.lock); + prev_fence = nvbo->bo.sync_obj; + nvbo->bo.sync_obj = nouveau_fence_ref(fence); + spin_unlock(&nvbo->bo.lock); + nouveau_fence_unref((void *)&prev_fence); + } + + list_del(&nvbo->entry); + nvbo->reserved_by = NULL; + ttm_bo_unreserve(&nvbo->bo); + drm_gem_object_unreference(nvbo->gem); + } +} + +static void +validate_fini(struct validate_op *op, bool success) +{ + struct nouveau_fence *fence = op->fence; + + if (unlikely(!success)) + op->fence = NULL; + + validate_fini_list(&op->vram_list, op->fence); + validate_fini_list(&op->gart_list, op->fence); + validate_fini_list(&op->both_list, op->fence); + nouveau_fence_unref((void *)&fence); +} + +static int +validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, + struct drm_nouveau_gem_pushbuf_bo *pbbo, + int nr_buffers, struct validate_op *op) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t sequence; + int trycnt = 0; + int ret, i; + + sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); +retry: + if (++trycnt > 100000) { + NV_ERROR(dev, "%s failed and gave up.\n", __func__); + return -EINVAL; + } + + for (i = 0; i < nr_buffers; i++) { + struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + + gem = drm_gem_object_lookup(dev, file_priv, b->handle); + if (!gem) { + NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); + validate_fini(op, NULL); + return -EINVAL; + } + nvbo = gem->driver_private; + + if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { + NV_ERROR(dev, "multiple instances of buffer %d on " + "validation list\n", b->handle); + validate_fini(op, NULL); + return -EINVAL; + } + + ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); + if (ret) { + validate_fini(op, NULL); + if (ret == -EAGAIN) + ret = ttm_bo_wait_unreserved(&nvbo->bo, false); + drm_gem_object_unreference(gem); + if (ret) + return ret; + goto retry; + } + + nvbo->reserved_by = file_priv; + nvbo->pbbo_index = i; + if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && + (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) + list_add_tail(&nvbo->entry, &op->both_list); + else + if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) + list_add_tail(&nvbo->entry, &op->vram_list); + else + if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) + list_add_tail(&nvbo->entry, &op->gart_list); + else { + NV_ERROR(dev, "invalid valid domains: 0x%08x\n", + b->valid_domains); + validate_fini(op, NULL); + return -EINVAL; + } + + if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { + validate_fini(op, NULL); + + if (nvbo->cpu_filp == file_priv) { + NV_ERROR(dev, "bo %p mapped by process trying " + "to validate it!\n", nvbo); + return -EINVAL; + } + + ret = ttm_bo_wait_cpu(&nvbo->bo, false); + if (ret == -ERESTART) + ret = -EAGAIN; + if (ret) + return ret; + goto retry; + } + } + + return 0; +} + +static int +validate_list(struct nouveau_channel *chan, struct list_head *list, + struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) +{ + struct drm_nouveau_gem_pushbuf_bo __user *upbbo = + (void __force __user *)(uintptr_t)user_pbbo_ptr; + struct nouveau_bo *nvbo; + int ret, relocs = 0; + + list_for_each_entry(nvbo, list, entry) { + struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; + struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; + + if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { + spin_lock(&nvbo->bo.lock); + ret = ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.lock); + if (unlikely(ret)) + return ret; + } + + ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, + b->write_domains, + b->valid_domains); + if (unlikely(ret)) + return ret; + + nvbo->channel = chan; + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, + false, false); + nvbo->channel = NULL; + if (unlikely(ret)) + return ret; + + if (nvbo->bo.offset == b->presumed_offset && + ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && + b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || + (nvbo->bo.mem.mem_type == TTM_PL_TT && + b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) + continue; + + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; + else + b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; + b->presumed_offset = nvbo->bo.offset; + b->presumed_ok = 0; + relocs++; + + if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) + return -EFAULT; + } + + return relocs; +} + +static int +nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, + struct drm_file *file_priv, + struct drm_nouveau_gem_pushbuf_bo *pbbo, + uint64_t user_buffers, int nr_buffers, + struct validate_op *op, int *apply_relocs) +{ + int ret, relocs = 0; + + INIT_LIST_HEAD(&op->vram_list); + INIT_LIST_HEAD(&op->gart_list); + INIT_LIST_HEAD(&op->both_list); + + ret = nouveau_fence_new(chan, &op->fence, false); + if (ret) + return ret; + + if (nr_buffers == 0) + return 0; + + ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); + if (unlikely(ret)) + return ret; + + ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { + validate_fini(op, NULL); + return ret; + } + relocs += ret; + + ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { + validate_fini(op, NULL); + return ret; + } + relocs += ret; + + ret = validate_list(chan, &op->both_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { + validate_fini(op, NULL); + return ret; + } + relocs += ret; + + *apply_relocs = relocs; + return 0; +} + +static inline void * +u_memcpya(uint64_t user, unsigned nmemb, unsigned size) +{ + void *mem; + void __user *userptr = (void __force __user *)(uintptr_t)user; + + mem = kmalloc(nmemb * size, GFP_KERNEL); + if (!mem) + return ERR_PTR(-ENOMEM); + + if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { + kfree(mem); + return ERR_PTR(-EFAULT); + } + + return mem; +} + +static int +nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, + struct drm_nouveau_gem_pushbuf_bo *bo, + int nr_relocs, uint64_t ptr_relocs, + int nr_dwords, int first_dword, + uint32_t *pushbuf, bool is_iomem) +{ + struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; + struct drm_device *dev = chan->dev; + int ret = 0, i; + + reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); + if (IS_ERR(reloc)) + return PTR_ERR(reloc); + + for (i = 0; i < nr_relocs; i++) { + struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; + struct drm_nouveau_gem_pushbuf_bo *b; + uint32_t data; + + if (r->bo_index >= nr_bo || r->reloc_index < first_dword || + r->reloc_index >= first_dword + nr_dwords) { + NV_ERROR(dev, "Bad relocation %d\n", i); + NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo); + NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords); + ret = -EINVAL; + break; + } + + b = &bo[r->bo_index]; + if (b->presumed_ok) + continue; + + if (r->flags & NOUVEAU_GEM_RELOC_LOW) + data = b->presumed_offset + r->data; + else + if (r->flags & NOUVEAU_GEM_RELOC_HIGH) + data = (b->presumed_offset + r->data) >> 32; + else + data = r->data; + + if (r->flags & NOUVEAU_GEM_RELOC_OR) { + if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) + data |= r->tor; + else + data |= r->vor; + } + + if (is_iomem) + iowrite32_native(data, (void __force __iomem *) + &pushbuf[r->reloc_index]); + else + pushbuf[r->reloc_index] = data; + } + + kfree(reloc); + return ret; +} + +int +nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_pushbuf *req = data; + struct drm_nouveau_gem_pushbuf_bo *bo = NULL; + struct nouveau_channel *chan; + struct validate_op op; + uint32_t *pushbuf = NULL; + int ret = 0, do_reloc = 0, i; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); + + if (req->nr_dwords >= chan->dma.max || + req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || + req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { + NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); + NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords, + chan->dma.max - 1); + NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, + NOUVEAU_GEM_MAX_BUFFERS); + NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, + NOUVEAU_GEM_MAX_RELOCS); + return -EINVAL; + } + + pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t)); + if (IS_ERR(pushbuf)) + return PTR_ERR(pushbuf); + + bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); + if (IS_ERR(bo)) { + kfree(pushbuf); + return PTR_ERR(bo); + } + + mutex_lock(&dev->struct_mutex); + + /* Validate buffer list */ + ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, + req->nr_buffers, &op, &do_reloc); + if (ret) + goto out; + + /* Apply any relocations that are required */ + if (do_reloc) { + ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, + bo, req->nr_relocs, + req->relocs, + req->nr_dwords, 0, + pushbuf, false); + if (ret) + goto out; + } + + /* Emit push buffer to the hw + */ + ret = RING_SPACE(chan, req->nr_dwords); + if (ret) + goto out; + + OUT_RINGp(chan, pushbuf, req->nr_dwords); + + ret = nouveau_fence_emit(op.fence); + if (ret) { + NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); + WIND_RING(chan); + goto out; + } + + if (nouveau_gem_pushbuf_sync(chan)) { + ret = nouveau_fence_wait(op.fence, NULL, false, false); + if (ret) { + for (i = 0; i < req->nr_dwords; i++) + NV_ERROR(dev, "0x%08x\n", pushbuf[i]); + NV_ERROR(dev, "^^ above push buffer is fail :(\n"); + } + } + +out: + validate_fini(&op, ret == 0); + mutex_unlock(&dev->struct_mutex); + kfree(pushbuf); + kfree(bo); + return ret; +} + +#define PUSHBUF_CAL (dev_priv->card_type >= NV_20) + +int +nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_gem_pushbuf_call *req = data; + struct drm_nouveau_gem_pushbuf_bo *bo = NULL; + struct nouveau_channel *chan; + struct drm_gem_object *gem; + struct nouveau_bo *pbbo; + struct validate_op op; + int i, ret = 0, do_reloc = 0; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); + + if (unlikely(req->handle == 0)) + goto out_next; + + if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || + req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { + NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); + NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, + NOUVEAU_GEM_MAX_BUFFERS); + NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, + NOUVEAU_GEM_MAX_RELOCS); + return -EINVAL; + } + + bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + mutex_lock(&dev->struct_mutex); + + /* Validate buffer list */ + ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, + req->nr_buffers, &op, &do_reloc); + if (ret) { + NV_ERROR(dev, "validate: %d\n", ret); + goto out; + } + + /* Validate DMA push buffer */ + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) { + NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle); + ret = -EINVAL; + goto out; + } + pbbo = nouveau_gem_object(gem); + + ret = ttm_bo_reserve(&pbbo->bo, false, false, true, + chan->fence.sequence); + if (ret) { + NV_ERROR(dev, "resv pb: %d\n", ret); + drm_gem_object_unreference(gem); + goto out; + } + + nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type); + ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false); + if (ret) { + NV_ERROR(dev, "validate pb: %d\n", ret); + ttm_bo_unreserve(&pbbo->bo); + drm_gem_object_unreference(gem); + goto out; + } + + list_add_tail(&pbbo->entry, &op.both_list); + + /* If presumed return address doesn't match, we need to map the + * push buffer and fix it.. + */ + if (!PUSHBUF_CAL) { + uint32_t retaddy; + + if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) { + ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS); + if (ret) { + NV_ERROR(dev, "jmp_space: %d\n", ret); + goto out; + } + } + + retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); + retaddy |= 0x20000000; + if (retaddy != req->suffix0) { + req->suffix0 = retaddy; + do_reloc = 1; + } + } + + /* Apply any relocations that are required */ + if (do_reloc) { + void *pbvirt; + bool is_iomem; + ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages, + &pbbo->kmap); + if (ret) { + NV_ERROR(dev, "kmap pb: %d\n", ret); + goto out; + } + + pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); + ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, + req->nr_relocs, + req->relocs, + req->nr_dwords, + req->offset / 4, + pbvirt, is_iomem); + + if (!PUSHBUF_CAL) { + nouveau_bo_wr32(pbbo, + req->offset / 4 + req->nr_dwords - 2, + req->suffix0); + } + + ttm_bo_kunmap(&pbbo->kmap); + if (ret) { + NV_ERROR(dev, "reloc apply: %d\n", ret); + goto out; + } + } + + if (PUSHBUF_CAL) { + ret = RING_SPACE(chan, 2); + if (ret) { + NV_ERROR(dev, "cal_space: %d\n", ret); + goto out; + } + OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + + req->offset) | 2); + OUT_RING(chan, 0); + } else { + ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); + if (ret) { + NV_ERROR(dev, "jmp_space: %d\n", ret); + goto out; + } + OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + + req->offset) | 0x20000000); + OUT_RING(chan, 0); + + /* Space the jumps apart with NOPs. */ + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(chan, 0); + } + + ret = nouveau_fence_emit(op.fence); + if (ret) { + NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); + WIND_RING(chan); + goto out; + } + +out: + validate_fini(&op, ret == 0); + mutex_unlock(&dev->struct_mutex); + kfree(bo); + +out_next: + if (PUSHBUF_CAL) { + req->suffix0 = 0x00020000; + req->suffix1 = 0x00000000; + } else { + req->suffix0 = 0x20000000 | + (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); + req->suffix1 = 0x00000000; + } + + return ret; +} + +int +nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_gem_pushbuf_call *req = data; + + req->vram_available = dev_priv->fb_aper_free; + req->gart_available = dev_priv->gart_info.aper_free; + + return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv); +} + +static inline uint32_t +domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) +{ + uint32_t flags = 0; + + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) + flags |= TTM_PL_FLAG_VRAM; + if (domain & NOUVEAU_GEM_DOMAIN_GART) + flags |= TTM_PL_FLAG_TT; + + return flags; +} + +int +nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_pin *req = data; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + int ret = 0; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + NV_ERROR(dev, "pin only allowed without kernel modesetting\n"); + return -EINVAL; + } + + if (!DRM_SUSER(DRM_CURPROC)) + return -EPERM; + + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return -EINVAL; + nvbo = nouveau_gem_object(gem); + + ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain)); + if (ret) + goto out; + + req->offset = nvbo->bo.offset; + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + req->domain = NOUVEAU_GEM_DOMAIN_GART; + else + req->domain = NOUVEAU_GEM_DOMAIN_VRAM; + +out: + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int +nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_pin *req = data; + struct drm_gem_object *gem; + int ret; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return -EINVAL; + + ret = nouveau_bo_unpin(nouveau_gem_object(gem)); + + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int +nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_cpu_prep *req = data; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); + int ret = -EINVAL; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return ret; + nvbo = nouveau_gem_object(gem); + + if (nvbo->cpu_filp) { + if (nvbo->cpu_filp == file_priv) + goto out; + + ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); + if (ret == -ERESTART) + ret = -EAGAIN; + if (ret) + goto out; + } + + if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { + ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); + } else { + ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); + if (ret == -ERESTART) + ret = -EAGAIN; + else + if (ret == 0) + nvbo->cpu_filp = file_priv; + } + +out: + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_cpu_prep *req = data; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + int ret = -EINVAL; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return ret; + nvbo = nouveau_gem_object(gem); + + if (nvbo->cpu_filp != file_priv) + goto out; + nvbo->cpu_filp = NULL; + + ttm_bo_synccpu_write_release(&nvbo->bo); + ret = 0; + +out: + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +nouveau_gem_ioctl_info(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_info *req = data; + struct drm_gem_object *gem; + int ret; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return -EINVAL; + + ret = nouveau_gem_info(gem, req); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&dev->struct_mutex); + return ret; +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c new file mode 100644 index 000000000000..dc46792a5c96 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -0,0 +1,1080 @@ +/* + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" + +#define CHIPSET_NFORCE 0x01a0 +#define CHIPSET_NFORCE2 0x01f0 + +/* + * misc hw access wrappers/control functions + */ + +void +NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); + NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value); +} + +uint8_t +NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); + return NVReadPRMVIO(dev, head, NV_PRMVIO_SR); +} + +void +NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); + NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value); +} + +uint8_t +NVReadVgaGr(struct drm_device *dev, int head, uint8_t index) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); + return NVReadPRMVIO(dev, head, NV_PRMVIO_GX); +} + +/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) + * it affects only the 8 bit vga io regs, which we access using mmio at + * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d* + * in general, the set value of cr44 does not matter: reg access works as + * expected and values can be set for the appropriate head by using a 0x2000 + * offset as required + * however: + * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and + * cr44 must be set to 0 or 3 for accessing values on the correct head + * through the common 0xc03c* addresses + * b) in tied mode (4) head B is programmed to the values set on head A, and + * access using the head B addresses can have strange results, ergo we leave + * tied mode in init once we know to what cr44 should be restored on exit + * + * the owner parameter is slightly abused: + * 0 and 1 are treated as head values and so the set value is (owner * 3) + * other values are treated as literal values to set + */ +void +NVSetOwner(struct drm_device *dev, int owner) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (owner == 1) + owner *= 3; + + if (dev_priv->chipset == 0x11) { + /* This might seem stupid, but the blob does it and + * omitting it often locks the system up. + */ + NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX); + NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX); + } + + /* CR44 is always changed on CRTC0 */ + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); + + if (dev_priv->chipset == 0x11) { /* set me harder */ + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); + } +} + +void +NVBlankScreen(struct drm_device *dev, int head, bool blank) +{ + unsigned char seq1; + + if (nv_two_heads(dev)) + NVSetOwner(dev, head); + + seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); + + NVVgaSeqReset(dev, head, true); + if (blank) + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); + else + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); + NVVgaSeqReset(dev, head, false); +} + +/* + * PLL setting + */ + +static int +powerctrl_1_shift(int chip_version, int reg) +{ + int shift = -4; + + if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20) + return shift; + + switch (reg) { + case NV_RAMDAC_VPLL2: + shift += 4; + case NV_PRAMDAC_VPLL_COEFF: + shift += 4; + case NV_PRAMDAC_MPLL_COEFF: + shift += 4; + case NV_PRAMDAC_NVPLL_COEFF: + shift += 4; + } + + /* + * the shift for vpll regs is only used for nv3x chips with a single + * stage pll + */ + if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 || + chip_version == 0x36 || chip_version >= 0x40)) + shift = -4; + + return shift; +} + +static void +setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chip_version = dev_priv->vbios->chip_version; + uint32_t oldpll = NVReadRAMDAC(dev, 0, reg); + int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; + uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; + uint32_t saved_powerctrl_1 = 0; + int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg); + + if (oldpll == pll) + return; /* already set */ + + if (shift_powerctrl_1 >= 0) { + saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1); + nvWriteMC(dev, NV_PBUS_POWERCTRL_1, + (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) | + 1 << shift_powerctrl_1); + } + + if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1)) + /* upclock -- write new post divider first */ + NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff)); + else + /* downclock -- write new NM first */ + NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1); + + if (chip_version < 0x17 && chip_version != 0x11) + /* wait a bit on older chips */ + msleep(64); + NVReadRAMDAC(dev, 0, reg); + + /* then write the other half as well */ + NVWriteRAMDAC(dev, 0, reg, pll); + + if (shift_powerctrl_1 >= 0) + nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1); +} + +static uint32_t +new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580) +{ + bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF); + + if (ss) /* single stage pll mode */ + ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE : + NV_RAMDAC_580_VPLL2_ACTIVE; + else + ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE : + ~NV_RAMDAC_580_VPLL2_ACTIVE; + + return ramdac580; +} + +static void +setPLL_double_highregs(struct drm_device *dev, uint32_t reg1, + struct nouveau_pll_vals *pv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chip_version = dev_priv->vbios->chip_version; + bool nv3035 = chip_version == 0x30 || chip_version == 0x35; + uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70); + uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1); + uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0; + uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1; + uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2; + uint32_t oldramdac580 = 0, ramdac580 = 0; + bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */ + uint32_t saved_powerctrl_1 = 0, savedc040 = 0; + int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1); + + /* model specific additions to generic pll1 and pll2 set up above */ + if (nv3035) { + pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 | + (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4; + pll2 = 0; + } + if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */ + oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); + ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580); + if (oldramdac580 != ramdac580) + oldpll1 = ~0; /* force mismatch */ + if (single_stage) + /* magic value used by nvidia in single stage mode */ + pll2 |= 0x011f; + } + if (chip_version > 0x70) + /* magic bits set by the blob (but not the bios) on g71-73 */ + pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28; + + if (oldpll1 == pll1 && oldpll2 == pll2) + return; /* already set */ + + if (shift_powerctrl_1 >= 0) { + saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1); + nvWriteMC(dev, NV_PBUS_POWERCTRL_1, + (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) | + 1 << shift_powerctrl_1); + } + + if (chip_version >= 0x40) { + int shift_c040 = 14; + + switch (reg1) { + case NV_PRAMDAC_MPLL_COEFF: + shift_c040 += 2; + case NV_PRAMDAC_NVPLL_COEFF: + shift_c040 += 2; + case NV_RAMDAC_VPLL2: + shift_c040 += 2; + case NV_PRAMDAC_VPLL_COEFF: + shift_c040 += 2; + } + + savedc040 = nvReadMC(dev, 0xc040); + if (shift_c040 != 14) + nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040)); + } + + if (oldramdac580 != ramdac580) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580); + + if (!nv3035) + NVWriteRAMDAC(dev, 0, reg2, pll2); + NVWriteRAMDAC(dev, 0, reg1, pll1); + + if (shift_powerctrl_1 >= 0) + nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1); + if (chip_version >= 0x40) + nvWriteMC(dev, 0xc040, savedc040); +} + +static void +setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg, + struct nouveau_pll_vals *pv) +{ + /* When setting PLLs, there is a merry game of disabling and enabling + * various bits of hardware during the process. This function is a + * synthesis of six nv4x traces, nearly each card doing a subtly + * different thing. With luck all the necessary bits for each card are + * combined herein. Without luck it deviates from each card's formula + * so as to not work on any :) + */ + + uint32_t Preg = NMNMreg - 4; + bool mpll = Preg == 0x4020; + uint32_t oldPval = nvReadMC(dev, Preg); + uint32_t NMNM = pv->NM2 << 16 | pv->NM1; + uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) | + 0xc << 28 | pv->log2P << 16; + uint32_t saved4600 = 0; + /* some cards have different maskc040s */ + uint32_t maskc040 = ~(3 << 14), savedc040; + bool single_stage = !pv->NM2 || pv->N2 == pv->M2; + + if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval) + return; + + if (Preg == 0x4000) + maskc040 = ~0x333; + if (Preg == 0x4058) + maskc040 = ~(0xc << 24); + + if (mpll) { + struct pll_lims pll_lim; + uint8_t Pval2; + + if (get_pll_limits(dev, Preg, &pll_lim)) + return; + + Pval2 = pv->log2P + pll_lim.log2p_bias; + if (Pval2 > pll_lim.max_log2p) + Pval2 = pll_lim.max_log2p; + Pval |= 1 << 28 | Pval2 << 20; + + saved4600 = nvReadMC(dev, 0x4600); + nvWriteMC(dev, 0x4600, saved4600 | 8 << 28); + } + if (single_stage) + Pval |= mpll ? 1 << 12 : 1 << 8; + + nvWriteMC(dev, Preg, oldPval | 1 << 28); + nvWriteMC(dev, Preg, Pval & ~(4 << 28)); + if (mpll) { + Pval |= 8 << 20; + nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28)); + nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28)); + } + + savedc040 = nvReadMC(dev, 0xc040); + nvWriteMC(dev, 0xc040, savedc040 & maskc040); + + nvWriteMC(dev, NMNMreg, NMNM); + if (NMNMreg == 0x4024) + nvWriteMC(dev, 0x403c, NMNM); + + nvWriteMC(dev, Preg, Pval); + if (mpll) { + Pval &= ~(8 << 20); + nvWriteMC(dev, 0x4020, Pval); + nvWriteMC(dev, 0x4038, Pval); + nvWriteMC(dev, 0x4600, saved4600); + } + + nvWriteMC(dev, 0xc040, savedc040); + + if (mpll) { + nvWriteMC(dev, 0x4020, Pval & ~(1 << 28)); + nvWriteMC(dev, 0x4038, Pval & ~(1 << 28)); + } +} + +void +nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1, + struct nouveau_pll_vals *pv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int cv = dev_priv->vbios->chip_version; + + if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || + cv >= 0x40) { + if (reg1 > 0x405c) + setPLL_double_highregs(dev, reg1, pv); + else + setPLL_double_lowregs(dev, reg1, pv); + } else + setPLL_single(dev, reg1, pv); +} + +/* + * PLL getting + */ + +static void +nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, + uint32_t pll2, struct nouveau_pll_vals *pllvals) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */ + + /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */ + pllvals->log2P = (pll1 >> 16) & 0x7; + pllvals->N2 = pllvals->M2 = 1; + + if (reg1 <= 0x405c) { + pllvals->NM1 = pll2 & 0xffff; + /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */ + if (!(pll1 & 0x1100)) + pllvals->NM2 = pll2 >> 16; + } else { + pllvals->NM1 = pll1 & 0xffff; + if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) + pllvals->NM2 = pll2 & 0xffff; + else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) { + pllvals->M1 &= 0xf; /* only 4 bits */ + if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { + pllvals->M2 = (pll1 >> 4) & 0x7; + pllvals->N2 = ((pll1 >> 21) & 0x18) | + ((pll1 >> 19) & 0x7); + } + } + } +} + +int +nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype, + struct nouveau_pll_vals *pllvals) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF, + NV_PRAMDAC_MPLL_COEFF, + NV_PRAMDAC_VPLL_COEFF, + NV_RAMDAC_VPLL2 }; + const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000, + 0x4020, + NV_PRAMDAC_VPLL_COEFF, + NV_RAMDAC_VPLL2 }; + uint32_t reg1, pll1, pll2 = 0; + struct pll_lims pll_lim; + int ret; + + if (dev_priv->card_type < NV_40) + reg1 = nv04_regs[plltype]; + else + reg1 = nv40_regs[plltype]; + + pll1 = nvReadMC(dev, reg1); + + if (reg1 <= 0x405c) + pll2 = nvReadMC(dev, reg1 + 4); + else if (nv_two_reg_pll(dev)) { + uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); + + pll2 = nvReadMC(dev, reg2); + } + + if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { + uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); + + /* check whether vpll has been forced into single stage mode */ + if (reg1 == NV_PRAMDAC_VPLL_COEFF) { + if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE) + pll2 = 0; + } else + if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE) + pll2 = 0; + } + + nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals); + + ret = get_pll_limits(dev, plltype, &pll_lim); + if (ret) + return ret; + + pllvals->refclk = pll_lim.refclk; + + return 0; +} + +int +nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv) +{ + /* Avoid divide by zero if called at an inappropriate time */ + if (!pv->M1 || !pv->M2) + return 0; + + return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P; +} + +int +nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) +{ + struct nouveau_pll_vals pllvals; + + if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { + uint32_t mpllP; + + pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); + if (!mpllP) + mpllP = 4; + + return 400000 / mpllP; + } else + if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { + uint32_t clock; + + pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); + return clock; + } + + nouveau_hw_get_pllvals(dev, plltype, &pllvals); + + return nouveau_hw_pllvals_to_clk(&pllvals); +} + +static void +nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) +{ + /* the vpll on an unused head can come up with a random value, way + * beyond the pll limits. for some reason this causes the chip to + * lock up when reading the dac palette regs, so set a valid pll here + * when such a condition detected. only seen on nv11 to date + */ + + struct pll_lims pll_lim; + struct nouveau_pll_vals pv; + uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; + + if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim)) + return; + nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv); + + if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && + pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && + pv.log2P <= pll_lim.max_log2p) + return; + + NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1); + + /* set lowest clock within static limits */ + pv.M1 = pll_lim.vco1.max_m; + pv.N1 = pll_lim.vco1.min_n; + pv.log2P = pll_lim.max_usable_log2p; + nouveau_hw_setpll(dev, pllreg, &pv); +} + +/* + * vga font save/restore + */ + +static void nouveau_vga_font_io(struct drm_device *dev, + void __iomem *iovram, + bool save, unsigned plane) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned i; + + NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane); + NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane); + for (i = 0; i < 16384; i++) { + if (save) { + dev_priv->saved_vga_font[plane][i] = + ioread32_native(iovram + i * 4); + } else { + iowrite32_native(dev_priv->saved_vga_font[plane][i], + iovram + i * 4); + } + } +} + +void +nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save) +{ + uint8_t misc, gr4, gr5, gr6, seq2, seq4; + bool graphicsmode; + unsigned plane; + void __iomem *iovram; + + if (nv_two_heads(dev)) + NVSetOwner(dev, 0); + + NVSetEnablePalette(dev, 0, true); + graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1; + NVSetEnablePalette(dev, 0, false); + + if (graphicsmode) /* graphics mode => framebuffer => no need to save */ + return; + + NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor"); + + /* map first 64KiB of VRAM, holds VGA fonts etc */ + iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536); + if (!iovram) { + NV_ERROR(dev, "Failed to map VRAM, " + "cannot save/restore VGA fonts.\n"); + return; + } + + if (nv_two_heads(dev)) + NVBlankScreen(dev, 1, true); + NVBlankScreen(dev, 0, true); + + /* save control regs */ + misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ); + seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX); + seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX); + gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX); + gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX); + gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX); + + NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5); + + /* store font in planes 0..3 */ + for (plane = 0; plane < 4; plane++) + nouveau_vga_font_io(dev, iovram, save, plane); + + /* restore control regs */ + NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc); + NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4); + + if (nv_two_heads(dev)) + NVBlankScreen(dev, 1, false); + NVBlankScreen(dev, 0, false); + + iounmap(iovram); +} + +/* + * mode state save/load + */ + +static void +rd_cio_state(struct drm_device *dev, int head, + struct nv04_crtc_reg *crtcstate, int index) +{ + crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index); +} + +static void +wr_cio_state(struct drm_device *dev, int head, + struct nv04_crtc_reg *crtcstate, int index) +{ + NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]); +} + +static void +nv_save_state_ramdac(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + if (dev_priv->card_type >= NV_10) + regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); + + nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, ®p->pllvals); + state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); + if (nv_two_heads(dev)) + state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); + if (dev_priv->chipset == 0x11) + regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); + + regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); + + if (nv_gf4_disp_arch(dev)) + regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); + if (dev_priv->chipset >= 0x30) + regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); + + regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); + regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL); + regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW); + regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY); + regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL); + regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW); + regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY); + regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2); + + for (i = 0; i < 7; i++) { + uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4); + regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg); + regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20); + } + + if (nv_gf4_disp_arch(dev)) { + regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER); + for (i = 0; i < 3; i++) { + regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4); + regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4); + } + } + + regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); + regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0); + if (!nv_gf4_disp_arch(dev) && head == 0) { + /* early chips don't allow access to PRAMDAC_TMDS_* without + * the head A FPCLK on (nv11 even locks up) */ + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 & + ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK); + } + regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1); + regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2); + + regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR); + + if (nv_gf4_disp_arch(dev)) + regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); + + if (dev_priv->card_type == NV_40) { + regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); + regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); + regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); + + for (i = 0; i < 38; i++) + regp->ctv_regs[i] = NVReadRAMDAC(dev, head, + NV_PRAMDAC_CTV + 4*i); + } +} + +static void +nv_load_state_ramdac(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; + int i; + + if (dev_priv->card_type >= NV_10) + NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); + + nouveau_hw_setpll(dev, pllreg, ®p->pllvals); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); + if (nv_two_heads(dev)) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); + if (dev_priv->chipset == 0x11) + NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); + + if (nv_gf4_disp_arch(dev)) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); + if (dev_priv->chipset >= 0x30) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2); + + for (i = 0; i < 7; i++) { + uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4); + + NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]); + NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]); + } + + if (nv_gf4_disp_arch(dev)) { + NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither); + for (i = 0; i < 3; i++) { + NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]); + } + } + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color); + + if (nv_gf4_disp_arch(dev)) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); + + if (dev_priv->card_type == NV_40) { + NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); + + for (i = 0; i < 38; i++) + NVWriteRAMDAC(dev, head, + NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]); + } +} + +static void +nv_save_state_vga(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ); + + for (i = 0; i < 25; i++) + rd_cio_state(dev, head, regp, i); + + NVSetEnablePalette(dev, head, true); + for (i = 0; i < 21; i++) + regp->Attribute[i] = NVReadVgaAttr(dev, head, i); + NVSetEnablePalette(dev, head, false); + + for (i = 0; i < 9; i++) + regp->Graphics[i] = NVReadVgaGr(dev, head, i); + + for (i = 0; i < 5; i++) + regp->Sequencer[i] = NVReadVgaSeq(dev, head, i); +} + +static void +nv_load_state_vga(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg); + + for (i = 0; i < 5; i++) + NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]); + + nv_lock_vga_crtc_base(dev, head, false); + for (i = 0; i < 25; i++) + wr_cio_state(dev, head, regp, i); + nv_lock_vga_crtc_base(dev, head, true); + + for (i = 0; i < 9; i++) + NVWriteVgaGr(dev, head, i, regp->Graphics[i]); + + NVSetEnablePalette(dev, head, true); + for (i = 0; i < 21; i++) + NVWriteVgaAttr(dev, head, i, regp->Attribute[i]); + NVSetEnablePalette(dev, head, false); +} + +static void +nv_save_state_ext(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); + + rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_21); + if (dev_priv->card_type >= NV_30) + rd_cio_state(dev, head, regp, NV_CIO_CRE_47); + rd_cio_state(dev, head, regp, NV_CIO_CRE_49); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); + + if (dev_priv->card_type >= NV_10) { + regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); + regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); + + if (dev_priv->card_type >= NV_30) + regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); + + if (dev_priv->card_type == NV_40) + regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); + + if (nv_two_heads(dev)) + regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL); + regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG); + } + + regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG); + + rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); + if (dev_priv->card_type >= NV_10) { + rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); + rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); + rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); + } + /* NV11 and NV20 don't have this, they stop at 0x52. */ + if (nv_gf4_disp_arch(dev)) { + rd_cio_state(dev, head, regp, NV_CIO_CRE_53); + rd_cio_state(dev, head, regp, NV_CIO_CRE_54); + + for (i = 0; i < 0x10; i++) + regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i); + rd_cio_state(dev, head, regp, NV_CIO_CRE_59); + rd_cio_state(dev, head, regp, NV_CIO_CRE_5B); + + rd_cio_state(dev, head, regp, NV_CIO_CRE_85); + rd_cio_state(dev, head, regp, NV_CIO_CRE_86); + } + + regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START); +} + +static void +nv_load_state_ext(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + uint32_t reg900; + int i; + + if (dev_priv->card_type >= NV_10) { + if (nv_two_heads(dev)) + /* setting ENGINE_CTRL (EC) *must* come before + * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in + * EC that should not be overwritten by writing stale EC + */ + NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); + + nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1); + nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0); + nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0); + nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0); + nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1); + nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1); + nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1); + nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1); + nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0); + + NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); + NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); + NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); + + if (dev_priv->card_type >= NV_30) + NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); + + if (dev_priv->card_type == NV_40) { + NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); + + reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); + if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); + else + NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); + } + } + + NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); + if (dev_priv->card_type >= NV_30) + wr_cio_state(dev, head, regp, NV_CIO_CRE_47); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_49); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); + if (dev_priv->card_type == NV_40) + nv_fix_nv40_hw_cursor(dev, head); + wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); + if (dev_priv->card_type >= NV_10) { + wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); + wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); + wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); + } + /* NV11 and NV20 stop at 0x52. */ + if (nv_gf4_disp_arch(dev)) { + if (dev_priv->card_type == NV_10) { + /* Not waiting for vertical retrace before modifying + CRE_53/CRE_54 causes lockups. */ + nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); + nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); + } + + wr_cio_state(dev, head, regp, NV_CIO_CRE_53); + wr_cio_state(dev, head, regp, NV_CIO_CRE_54); + + for (i = 0; i < 0x10; i++) + NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]); + wr_cio_state(dev, head, regp, NV_CIO_CRE_59); + wr_cio_state(dev, head, regp, NV_CIO_CRE_5B); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_85); + wr_cio_state(dev, head, regp, NV_CIO_CRE_86); + } + + NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); + + /* Setting 1 on this value gives you interrupts for every vblank period. */ + NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); + NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); +} + +static void +nv_save_state_palette(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + int head_offset = head * NV_PRMDIO_SIZE, i; + + nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset, + NV_PRMDIO_PIXEL_MASK_MASK); + nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); + + for (i = 0; i < 768; i++) { + state->crtc_reg[head].DAC[i] = nv_rd08(dev, + NV_PRMDIO_PALETTE_DATA + head_offset); + } + + NVSetEnablePalette(dev, head, false); +} + +void +nouveau_hw_load_state_palette(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + int head_offset = head * NV_PRMDIO_SIZE, i; + + nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset, + NV_PRMDIO_PIXEL_MASK_MASK); + nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); + + for (i = 0; i < 768; i++) { + nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset, + state->crtc_reg[head].DAC[i]); + } + + NVSetEnablePalette(dev, head, false); +} + +void nouveau_hw_save_state(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset == 0x11) + /* NB: no attempt is made to restore the bad pll later on */ + nouveau_hw_fix_bad_vpll(dev, head); + nv_save_state_ramdac(dev, head, state); + nv_save_state_vga(dev, head, state); + nv_save_state_palette(dev, head, state); + nv_save_state_ext(dev, head, state); +} + +void nouveau_hw_load_state(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + NVVgaProtect(dev, head, true); + nv_load_state_ramdac(dev, head, state); + nv_load_state_ext(dev, head, state); + nouveau_hw_load_state_palette(dev, head, state); + nv_load_state_vga(dev, head, state); + NVVgaProtect(dev, head, false); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h new file mode 100644 index 000000000000..869130f83602 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_hw.h @@ -0,0 +1,455 @@ +/* + * Copyright 2008 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __NOUVEAU_HW_H__ +#define __NOUVEAU_HW_H__ + +#include "drmP.h" +#include "nouveau_drv.h" + +#define MASK(field) ( \ + (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field)) + +#define XLATE(src, srclowbit, outfield) ( \ + (((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield)) + +void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value); +uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index); +void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value); +uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index); +void NVSetOwner(struct drm_device *, int owner); +void NVBlankScreen(struct drm_device *, int head, bool blank); +void nouveau_hw_setpll(struct drm_device *, uint32_t reg1, + struct nouveau_pll_vals *pv); +int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype, + struct nouveau_pll_vals *pllvals); +int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals); +int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype); +void nouveau_hw_save_vga_fonts(struct drm_device *, bool save); +void nouveau_hw_save_state(struct drm_device *, int head, + struct nv04_mode_state *state); +void nouveau_hw_load_state(struct drm_device *, int head, + struct nv04_mode_state *state); +void nouveau_hw_load_state_palette(struct drm_device *, int head, + struct nv04_mode_state *state); + +/* nouveau_calc.c */ +extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, + int *burst, int *lwm); +extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim, + int clk, struct nouveau_pll_vals *pv); + +static inline uint32_t +nvReadMC(struct drm_device *dev, uint32_t reg) +{ + uint32_t val = nv_rd32(dev, reg); + NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val); + return val; +} + +static inline void +nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val) +{ + NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val); + nv_wr32(dev, reg, val); +} + +static inline uint32_t +nvReadVIDEO(struct drm_device *dev, uint32_t reg) +{ + uint32_t val = nv_rd32(dev, reg); + NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val); + return val; +} + +static inline void +nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val) +{ + NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val); + nv_wr32(dev, reg, val); +} + +static inline uint32_t +nvReadFB(struct drm_device *dev, uint32_t reg) +{ + uint32_t val = nv_rd32(dev, reg); + NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val); + return val; +} + +static inline void +nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val) +{ + NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val); + nv_wr32(dev, reg, val); +} + +static inline uint32_t +nvReadEXTDEV(struct drm_device *dev, uint32_t reg) +{ + uint32_t val = nv_rd32(dev, reg); + NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val); + return val; +} + +static inline void +nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val) +{ + NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val); + nv_wr32(dev, reg, val); +} + +static inline uint32_t NVReadCRTC(struct drm_device *dev, + int head, uint32_t reg) +{ + uint32_t val; + if (head) + reg += NV_PCRTC0_SIZE; + val = nv_rd32(dev, reg); + NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val); + return val; +} + +static inline void NVWriteCRTC(struct drm_device *dev, + int head, uint32_t reg, uint32_t val) +{ + if (head) + reg += NV_PCRTC0_SIZE; + NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val); + nv_wr32(dev, reg, val); +} + +static inline uint32_t NVReadRAMDAC(struct drm_device *dev, + int head, uint32_t reg) +{ + uint32_t val; + if (head) + reg += NV_PRAMDAC0_SIZE; + val = nv_rd32(dev, reg); + NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n", + head, reg, val); + return val; +} + +static inline void NVWriteRAMDAC(struct drm_device *dev, + int head, uint32_t reg, uint32_t val) +{ + if (head) + reg += NV_PRAMDAC0_SIZE; + NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n", + head, reg, val); + nv_wr32(dev, reg, val); +} + +static inline uint8_t nv_read_tmds(struct drm_device *dev, + int or, int dl, uint8_t address) +{ + int ramdac = (or & OUTPUT_C) >> 2; + + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, + NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address); + return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8); +} + +static inline void nv_write_tmds(struct drm_device *dev, + int or, int dl, uint8_t address, + uint8_t data) +{ + int ramdac = (or & OUTPUT_C) >> 2; + + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data); + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address); +} + +static inline void NVWriteVgaCrtc(struct drm_device *dev, + int head, uint8_t index, uint8_t value) +{ + NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n", + head, index, value); + nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); + nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); +} + +static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, + int head, uint8_t index) +{ + uint8_t val; + nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); + val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); + NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n", + head, index, val); + return val; +} + +/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58 + * I suspect they in fact do nothing, but are merely a way to carry useful + * per-head variables around + * + * Known uses: + * CR57 CR58 + * 0x00 index to the appropriate dcb entry (or 7f for inactive) + * 0x02 dcb entry's "or" value (or 00 for inactive) + * 0x03 bit0 set for dual link (LVDS, possibly elsewhere too) + * 0x08 or 0x09 pxclk in MHz + * 0x0f laptop panel info - low nibble for PEXTDEV_BOOT_0 strap + * high nibble for xlat strap value + */ + +static inline void +NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value) +{ + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value); +} + +static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index) +{ + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); + return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58); +} + +static inline uint8_t NVReadPRMVIO(struct drm_device *dev, + int head, uint32_t reg) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint8_t val; + + /* Only NV4x have two pvio ranges; other twoHeads cards MUST call + * NVSetOwner for the relevant head to be programmed */ + if (head && dev_priv->card_type == NV_40) + reg += NV_PRMVIO_SIZE; + + val = nv_rd08(dev, reg); + NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val); + return val; +} + +static inline void NVWritePRMVIO(struct drm_device *dev, + int head, uint32_t reg, uint8_t value) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* Only NV4x have two pvio ranges; other twoHeads cards MUST call + * NVSetOwner for the relevant head to be programmed */ + if (head && dev_priv->card_type == NV_40) + reg += NV_PRMVIO_SIZE; + + NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", + head, reg, value); + nv_wr08(dev, reg, value); +} + +static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) +{ + nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); +} + +static inline bool NVGetEnablePalette(struct drm_device *dev, int head) +{ + nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); +} + +static inline void NVWriteVgaAttr(struct drm_device *dev, + int head, uint8_t index, uint8_t value) +{ + if (NVGetEnablePalette(dev, head)) + index &= ~0x20; + else + index |= 0x20; + + nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n", + head, index, value); + nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); + nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); +} + +static inline uint8_t NVReadVgaAttr(struct drm_device *dev, + int head, uint8_t index) +{ + uint8_t val; + if (NVGetEnablePalette(dev, head)) + index &= ~0x20; + else + index |= 0x20; + + nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); + val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); + NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n", + head, index, val); + return val; +} + +static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start) +{ + NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3); +} + +static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) +{ + uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); + + if (protect) { + NVVgaSeqReset(dev, head, true); + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); + } else { + /* Reenable sequencer, then turn on screen */ + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */ + NVVgaSeqReset(dev, head, false); + } + NVSetEnablePalette(dev, head, protect); +} + +static inline bool +nv_heads_tied(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset == 0x11) + return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)); + + return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; +} + +/* makes cr0-7 on the specified head read-only */ +static inline bool +nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock) +{ + uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX); + bool waslocked = cr11 & 0x80; + + if (lock) + cr11 |= 0x80; + else + cr11 &= ~0x80; + NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11); + + return waslocked; +} + +static inline void +nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock) +{ + /* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs + * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB + * bit6: seems to have some effect on CR09 (double scan, VBS_9) + * bit5: unlocks HDE + * bit4: unlocks VDE + * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR + * bit2: same as bit 1 of 0x60?804 + * bit0: same as bit 0 of 0x60?804 + */ + + uint8_t cr21 = lock; + + if (lock < 0) + /* 0xfa is generic "unlock all" mask */ + cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa; + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21); +} + +/* renders the extended crtc regs (cr19+) on all crtcs impervious: + * immutable and unreadable + */ +static inline bool +NVLockVgaCrtcs(struct drm_device *dev, bool lock) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX); + + NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, + lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); + /* NV11 has independently lockable extended crtcs, except when tied */ + if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev)) + NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, + lock ? NV_CIO_SR_LOCK_VALUE : + NV_CIO_SR_UNLOCK_RW_VALUE); + + return waslocked; +} + +/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */ +#define NV04_CURSOR_SIZE 32 +/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */ +#define NV10_CURSOR_SIZE 64 + +static inline int nv_cursor_width(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; +} + +static inline void +nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) +{ + /* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40, + * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS + * for changes to the CRTC CURCTL regs to take effect, whether changing + * the pixmap location, or just showing/hiding the cursor + */ + uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos); +} + +static inline void +nv_show_cursor(struct drm_device *dev, int head, bool show) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint8_t *curctl1 = + &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX]; + + if (show) + *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); + else + *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); + + if (dev_priv->card_type == NV_40) + nv_fix_nv40_hw_cursor(dev, head); +} + +static inline uint32_t +nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int mask; + + if (bpp == 15) + bpp = 16; + if (bpp == 24) + bpp = 8; + + /* Alignment requirements taken from the Haiku driver */ + if (dev_priv->card_type == NV_04) + mask = 128 / bpp - 1; + else + mask = 512 / bpp - 1; + + return (width + mask) & ~mask; +} + +#endif /* __NOUVEAU_HW_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c new file mode 100644 index 000000000000..70e994d28122 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -0,0 +1,269 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_i2c.h" +#include "nouveau_hw.h" + +static void +nv04_i2c_setscl(void *data, int state) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + uint8_t val; + + val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0); + NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); +} + +static void +nv04_i2c_setsda(void *data, int state) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + uint8_t val; + + val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0); + NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01); +} + +static int +nv04_i2c_getscl(void *data) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4); +} + +static int +nv04_i2c_getsda(void *data) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8); +} + +static void +nv4e_i2c_setscl(void *data, int state) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + uint8_t val; + + val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0); + nv_wr32(dev, i2c->wr, val | 0x01); +} + +static void +nv4e_i2c_setsda(void *data, int state) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + uint8_t val; + + val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0); + nv_wr32(dev, i2c->wr, val | 0x01); +} + +static int +nv4e_i2c_getscl(void *data) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + return !!((nv_rd32(dev, i2c->rd) >> 16) & 4); +} + +static int +nv4e_i2c_getsda(void *data) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + return !!((nv_rd32(dev, i2c->rd) >> 16) & 8); +} + +static int +nv50_i2c_getscl(void *data) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + return !!(nv_rd32(dev, i2c->rd) & 1); +} + + +static int +nv50_i2c_getsda(void *data) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + return !!(nv_rd32(dev, i2c->rd) & 2); +} + +static void +nv50_i2c_setscl(void *data, int state) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0)); +} + +static void +nv50_i2c_setsda(void *data, int state) +{ + struct nouveau_i2c_chan *i2c = data; + struct drm_device *dev = i2c->dev; + + nv_wr32(dev, i2c->wr, + (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0)); + i2c->data = state; +} + +static const uint32_t nv50_i2c_port[] = { + 0x00e138, 0x00e150, 0x00e168, 0x00e180, + 0x00e254, 0x00e274, 0x00e764, 0x00e780, + 0x00e79c, 0x00e7b8 +}; +#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port) + +int +nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_i2c_chan *i2c; + int ret; + + if (entry->chan) + return -EEXIST; + + if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) { + NV_ERROR(dev, "unknown i2c port %d\n", entry->read); + return -EINVAL; + } + + i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); + if (i2c == NULL) + return -ENOMEM; + + switch (entry->port_type) { + case 0: + i2c->algo.bit.setsda = nv04_i2c_setsda; + i2c->algo.bit.setscl = nv04_i2c_setscl; + i2c->algo.bit.getsda = nv04_i2c_getsda; + i2c->algo.bit.getscl = nv04_i2c_getscl; + i2c->rd = entry->read; + i2c->wr = entry->write; + break; + case 4: + i2c->algo.bit.setsda = nv4e_i2c_setsda; + i2c->algo.bit.setscl = nv4e_i2c_setscl; + i2c->algo.bit.getsda = nv4e_i2c_getsda; + i2c->algo.bit.getscl = nv4e_i2c_getscl; + i2c->rd = 0x600800 + entry->read; + i2c->wr = 0x600800 + entry->write; + break; + case 5: + i2c->algo.bit.setsda = nv50_i2c_setsda; + i2c->algo.bit.setscl = nv50_i2c_setscl; + i2c->algo.bit.getsda = nv50_i2c_getsda; + i2c->algo.bit.getscl = nv50_i2c_getscl; + i2c->rd = nv50_i2c_port[entry->read]; + i2c->wr = i2c->rd; + break; + case 6: + i2c->rd = entry->read; + i2c->wr = entry->write; + break; + default: + NV_ERROR(dev, "DCB I2C port type %d unknown\n", + entry->port_type); + kfree(i2c); + return -EINVAL; + } + + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "nouveau-%s-%d", pci_name(dev->pdev), index); + i2c->adapter.owner = THIS_MODULE; + i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + + if (entry->port_type < 6) { + i2c->adapter.algo_data = &i2c->algo.bit; + i2c->algo.bit.udelay = 40; + i2c->algo.bit.timeout = usecs_to_jiffies(5000); + i2c->algo.bit.data = i2c; + ret = i2c_bit_add_bus(&i2c->adapter); + } else { + i2c->adapter.algo_data = &i2c->algo.dp; + i2c->algo.dp.running = false; + i2c->algo.dp.address = 0; + i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch; + ret = i2c_dp_aux_add_bus(&i2c->adapter); + } + + if (ret) { + NV_ERROR(dev, "Failed to register i2c %d\n", index); + kfree(i2c); + return ret; + } + + entry->chan = i2c; + return 0; +} + +void +nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry) +{ + if (!entry->chan) + return; + + i2c_del_adapter(&entry->chan->adapter); + kfree(entry->chan); + entry->chan = NULL; +} + +struct nouveau_i2c_chan * +nouveau_i2c_find(struct drm_device *dev, int index) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + + if (index > DCB_MAX_NUM_I2C_ENTRIES) + return NULL; + + if (!bios->bdcb.dcb.i2c[index].chan) { + if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index)) + return NULL; + } + + return bios->bdcb.dcb.i2c[index].chan; +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h new file mode 100644 index 000000000000..c8eaf7a9fcbb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h @@ -0,0 +1,52 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_I2C_H__ +#define __NOUVEAU_I2C_H__ + +#include +#include +#include +#include "drm_dp_helper.h" + +struct dcb_i2c_entry; + +struct nouveau_i2c_chan { + struct i2c_adapter adapter; + struct drm_device *dev; + union { + struct i2c_algo_bit_data bit; + struct i2c_algo_dp_aux_data dp; + } algo; + unsigned rd; + unsigned wr; + unsigned data; +}; + +int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index); +void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *); +struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index); + +int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte, + uint8_t *read_byte); + +#endif /* __NOUVEAU_I2C_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c new file mode 100644 index 000000000000..a2c30f4611ba --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c @@ -0,0 +1,72 @@ +/** + * \file mga_ioc32.c + * + * 32-bit ioctl compatibility routines for the MGA DRM. + * + * \author Dave Airlie with code from patches by Egbert Eich + * + * + * Copyright (C) Paul Mackerras 2005 + * Copyright (C) Egbert Eich 2003,2004 + * Copyright (C) Dave Airlie 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include + +#include "drmP.h" +#include "drm.h" + +#include "nouveau_drv.h" + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + +#if 0 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) + fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; +#endif + lock_kernel(); /* XXX for now */ + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c new file mode 100644 index 000000000000..370c72c968d1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -0,0 +1,702 @@ +/* + * Copyright (C) 2006 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_reg.h" +#include + +/* needed for hotplug irq */ +#include "nouveau_connector.h" +#include "nv50_display.h" + +void +nouveau_irq_preinstall(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* Master disable */ + nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); + + if (dev_priv->card_type == NV_50) { + INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); + INIT_LIST_HEAD(&dev_priv->vbl_waiting); + } +} + +int +nouveau_irq_postinstall(struct drm_device *dev) +{ + /* Master enable */ + nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); + return 0; +} + +void +nouveau_irq_uninstall(struct drm_device *dev) +{ + /* Master disable */ + nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); +} + +static int +nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_pgraph_object_method *grm; + struct nouveau_pgraph_object_class *grc; + + grc = dev_priv->engine.graph.grclass; + while (grc->id) { + if (grc->id == class) + break; + grc++; + } + + if (grc->id != class || !grc->methods) + return -ENOENT; + + grm = grc->methods; + while (grm->id) { + if (grm->id == mthd) + return grm->exec(chan, class, mthd, data); + grm++; + } + + return -ENOENT; +} + +static bool +nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) +{ + struct drm_device *dev = chan->dev; + const int subc = (addr >> 13) & 0x7; + const int mthd = addr & 0x1ffc; + + if (mthd == 0x0000) { + struct nouveau_gpuobj_ref *ref = NULL; + + if (nouveau_gpuobj_ref_find(chan, data, &ref)) + return false; + + if (ref->gpuobj->engine != NVOBJ_ENGINE_SW) + return false; + + chan->sw_subchannel[subc] = ref->gpuobj->class; + nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, + NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); + return true; + } + + /* hw object */ + if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4))) + return false; + + if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data)) + return false; + + return true; +} + +static void +nouveau_fifo_irq_handler(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + uint32_t status, reassign; + int cnt = 0; + + reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; + while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { + struct nouveau_channel *chan = NULL; + uint32_t chid, get; + + nv_wr32(dev, NV03_PFIFO_CACHES, 0); + + chid = engine->fifo.channel_id(dev); + if (chid >= 0 && chid < engine->fifo.channels) + chan = dev_priv->fifos[chid]; + get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); + + if (status & NV_PFIFO_INTR_CACHE_ERROR) { + uint32_t mthd, data; + int ptr; + + /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before + * wrapping on my G80 chips, but CACHE1 isn't big + * enough for this much data.. Tests show that it + * wraps around to the start at GET=0x800.. No clue + * as to why.. + */ + ptr = (get & 0x7ff) >> 2; + + if (dev_priv->card_type < NV_40) { + mthd = nv_rd32(dev, + NV04_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(dev, + NV04_PFIFO_CACHE1_DATA(ptr)); + } else { + mthd = nv_rd32(dev, + NV40_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(dev, + NV40_PFIFO_CACHE1_DATA(ptr)); + } + + if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) { + NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " + "Mthd 0x%04x Data 0x%08x\n", + chid, (mthd >> 13) & 7, mthd & 0x1ffc, + data); + } + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_CACHE_ERROR); + + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); + nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, + nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); + + status &= ~NV_PFIFO_INTR_CACHE_ERROR; + } + + if (status & NV_PFIFO_INTR_DMA_PUSHER) { + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid); + + status &= ~NV_PFIFO_INTR_DMA_PUSHER; + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_DMA_PUSHER); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); + if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get) + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, + get + 4); + } + + if (status) { + NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", + status, chid); + nv_wr32(dev, NV03_PFIFO_INTR_0, status); + status = 0; + } + + nv_wr32(dev, NV03_PFIFO_CACHES, reassign); + } + + if (status) { + NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); + nv_wr32(dev, 0x2140, 0); + nv_wr32(dev, 0x140, 0); + } + + nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); +} + +struct nouveau_bitfield_names { + uint32_t mask; + const char *name; +}; + +static struct nouveau_bitfield_names nstatus_names[] = +{ + { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } +}; + +static struct nouveau_bitfield_names nstatus_names_nv10[] = +{ + { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } +}; + +static struct nouveau_bitfield_names nsource_names[] = +{ + { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, + { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, + { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, + { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, + { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, + { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, + { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, + { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, + { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, + { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, +}; + +static void +nouveau_print_bitfield_names_(uint32_t value, + const struct nouveau_bitfield_names *namelist, + const int namelist_len) +{ + /* + * Caller must have already printed the KERN_* log level for us. + * Also the caller is responsible for adding the newline. + */ + int i; + for (i = 0; i < namelist_len; ++i) { + uint32_t mask = namelist[i].mask; + if (value & mask) { + printk(" %s", namelist[i].name); + value &= ~mask; + } + } + if (value) + printk(" (unknown bits 0x%08x)", value); +} +#define nouveau_print_bitfield_names(val, namelist) \ + nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) + + +static int +nouveau_graph_chid_from_grctx(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t inst; + int i; + + if (dev_priv->card_type < NV_40) + return dev_priv->engine.fifo.channels; + else + if (dev_priv->card_type < NV_50) { + inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + struct nouveau_channel *chan = dev_priv->fifos[i]; + + if (!chan || !chan->ramin_grctx) + continue; + + if (inst == chan->ramin_grctx->instance) + break; + } + } else { + inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + struct nouveau_channel *chan = dev_priv->fifos[i]; + + if (!chan || !chan->ramin) + continue; + + if (inst == chan->ramin->instance) + break; + } + } + + + return i; +} + +static int +nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + int channel; + + if (dev_priv->card_type < NV_10) + channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; + else + if (dev_priv->card_type < NV_40) + channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; + else + channel = nouveau_graph_chid_from_grctx(dev); + + if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { + NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); + return -EINVAL; + } + + *channel_ret = channel; + return 0; +} + +struct nouveau_pgraph_trap { + int channel; + int class; + int subc, mthd, size; + uint32_t data, data2; + uint32_t nsource, nstatus; +}; + +static void +nouveau_graph_trap_info(struct drm_device *dev, + struct nouveau_pgraph_trap *trap) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t address; + + trap->nsource = trap->nstatus = 0; + if (dev_priv->card_type < NV_50) { + trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + } + + if (nouveau_graph_trapped_channel(dev, &trap->channel)) + trap->channel = -1; + address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + + trap->mthd = address & 0x1FFC; + trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + if (dev_priv->card_type < NV_10) { + trap->subc = (address >> 13) & 0x7; + } else { + trap->subc = (address >> 16) & 0x7; + trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH); + } + + if (dev_priv->card_type < NV_10) + trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF; + else if (dev_priv->card_type < NV_40) + trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF; + else if (dev_priv->card_type < NV_50) + trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF; + else + trap->class = nv_rd32(dev, 0x400814); +} + +static void +nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, + struct nouveau_pgraph_trap *trap) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t nsource = trap->nsource, nstatus = trap->nstatus; + + NV_INFO(dev, "%s - nSource:", id); + nouveau_print_bitfield_names(nsource, nsource_names); + printk(", nStatus:"); + if (dev_priv->card_type < NV_10) + nouveau_print_bitfield_names(nstatus, nstatus_names); + else + nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); + printk("\n"); + + NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " + "Data 0x%08x:0x%08x\n", + id, trap->channel, trap->subc, + trap->class, trap->mthd, + trap->data2, trap->data); +} + +static int +nouveau_pgraph_intr_swmthd(struct drm_device *dev, + struct nouveau_pgraph_trap *trap) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (trap->channel < 0 || + trap->channel >= dev_priv->engine.fifo.channels || + !dev_priv->fifos[trap->channel]) + return -ENODEV; + + return nouveau_call_method(dev_priv->fifos[trap->channel], + trap->class, trap->mthd, trap->data); +} + +static inline void +nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) +{ + struct nouveau_pgraph_trap trap; + int unhandled = 0; + + nouveau_graph_trap_info(dev, &trap); + + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (nouveau_pgraph_intr_swmthd(dev, &trap)) + unhandled = 1; + } else { + unhandled = 1; + } + + if (unhandled) + nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); +} + +static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); + +static int nouveau_ratelimit(void) +{ + return __ratelimit(&nouveau_ratelimit_state); +} + + +static inline void +nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) +{ + struct nouveau_pgraph_trap trap; + int unhandled = 0; + + nouveau_graph_trap_info(dev, &trap); + trap.nsource = nsource; + + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (nouveau_pgraph_intr_swmthd(dev, &trap)) + unhandled = 1; + } else { + unhandled = 1; + } + + if (unhandled && nouveau_ratelimit()) + nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); +} + +static inline void +nouveau_pgraph_intr_context_switch(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + uint32_t chid; + + chid = engine->fifo.channel_id(dev); + NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid); + + switch (dev_priv->card_type) { + case NV_04: + nv04_graph_context_switch(dev); + break; + case NV_10: + nv10_graph_context_switch(dev); + break; + default: + NV_ERROR(dev, "Context switch not implemented\n"); + break; + } +} + +static void +nouveau_pgraph_irq_handler(struct drm_device *dev) +{ + uint32_t status; + + while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { + uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + + if (status & NV_PGRAPH_INTR_NOTIFY) { + nouveau_pgraph_intr_notify(dev, nsource); + + status &= ~NV_PGRAPH_INTR_NOTIFY; + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); + } + + if (status & NV_PGRAPH_INTR_ERROR) { + nouveau_pgraph_intr_error(dev, nsource); + + status &= ~NV_PGRAPH_INTR_ERROR; + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); + } + + if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nouveau_pgraph_intr_context_switch(dev); + + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + nv_wr32(dev, NV03_PGRAPH_INTR, + NV_PGRAPH_INTR_CONTEXT_SWITCH); + } + + if (status) { + NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); + nv_wr32(dev, NV03_PGRAPH_INTR, status); + } + + if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0) + nv_wr32(dev, NV04_PGRAPH_FIFO, 1); + } + + nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); +} + +static void +nv50_pgraph_irq_handler(struct drm_device *dev) +{ + uint32_t status, nsource; + + status = nv_rd32(dev, NV03_PGRAPH_INTR); + nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + + if (status & 0x00000001) { + nouveau_pgraph_intr_notify(dev, nsource); + status &= ~0x00000001; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); + } + + if (status & 0x00000010) { + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); + + status &= ~0x00000010; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); + } + + if (status & 0x00001000) { + nv_wr32(dev, 0x400500, 0x00000000); + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); + nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, + NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); + nv_wr32(dev, 0x400500, 0x00010001); + + nv50_graph_context_switch(dev); + + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + } + + if (status & 0x00100000) { + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_DATA_ERROR); + + status &= ~0x00100000; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); + } + + if (status & 0x00200000) { + int r; + + nouveau_pgraph_intr_error(dev, nsource | + NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); + + NV_ERROR(dev, "magic set 1:\n"); + for (r = 0x408900; r <= 0x408910; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); + nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); + for (r = 0x408e08; r <= 0x408e24; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); + nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); + + NV_ERROR(dev, "magic set 2:\n"); + for (r = 0x409900; r <= 0x409910; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); + nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); + for (r = 0x409e08; r <= 0x409e24; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); + nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); + + status &= ~0x00200000; + nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); + } + + if (status) { + NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); + nv_wr32(dev, NV03_PGRAPH_INTR, status); + } + + { + const int isb = (1 << 16) | (1 << 0); + + if ((nv_rd32(dev, 0x400500) & isb) != isb) + nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); + } + + nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); +} + +static void +nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) +{ + if (crtc & 1) + nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + + if (crtc & 2) + nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); +} + +irqreturn_t +nouveau_irq_handler(DRM_IRQ_ARGS) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t status, fbdev_flags = 0; + + status = nv_rd32(dev, NV03_PMC_INTR_0); + if (!status) + return IRQ_NONE; + + if (dev_priv->fbdev_info) { + fbdev_flags = dev_priv->fbdev_info->flags; + dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; + } + + if (status & NV_PMC_INTR_0_PFIFO_PENDING) { + nouveau_fifo_irq_handler(dev); + status &= ~NV_PMC_INTR_0_PFIFO_PENDING; + } + + if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { + if (dev_priv->card_type >= NV_50) + nv50_pgraph_irq_handler(dev); + else + nouveau_pgraph_irq_handler(dev); + + status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; + } + + if (status & NV_PMC_INTR_0_CRTCn_PENDING) { + nouveau_crtc_irq_handler(dev, (status>>24)&3); + status &= ~NV_PMC_INTR_0_CRTCn_PENDING; + } + + if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING | + NV_PMC_INTR_0_NV50_I2C_PENDING)) { + nv50_display_irq_handler(dev); + status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING | + NV_PMC_INTR_0_NV50_I2C_PENDING); + } + + if (status) + NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); + + if (dev_priv->fbdev_info) + dev_priv->fbdev_info->flags = fbdev_flags; + + return IRQ_HANDLED; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c new file mode 100644 index 000000000000..02755712ed3d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -0,0 +1,568 @@ +/* + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * Copyright 2005 Stephane Marchesin + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell + */ + + +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "nouveau_drv.h" + +static struct mem_block * +split_block(struct mem_block *p, uint64_t start, uint64_t size, + struct drm_file *file_priv) +{ + /* Maybe cut off the start of an existing block */ + if (start > p->start) { + struct mem_block *newblock = + kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start; + newblock->size = p->size - (start - p->start); + newblock->file_priv = NULL; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size -= newblock->size; + p = newblock; + } + + /* Maybe cut off the end of an existing block */ + if (size < p->size) { + struct mem_block *newblock = + kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start + size; + newblock->size = p->size - size; + newblock->file_priv = NULL; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size = size; + } + +out: + /* Our block is in the middle */ + p->file_priv = file_priv; + return p; +} + +struct mem_block * +nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, + int align2, struct drm_file *file_priv, int tail) +{ + struct mem_block *p; + uint64_t mask = (1 << align2) - 1; + + if (!heap) + return NULL; + + if (tail) { + list_for_each_prev(p, heap) { + uint64_t start = ((p->start + p->size) - size) & ~mask; + + if (p->file_priv == NULL && start >= p->start && + start + size <= p->start + p->size) + return split_block(p, start, size, file_priv); + } + } else { + list_for_each(p, heap) { + uint64_t start = (p->start + mask) & ~mask; + + if (p->file_priv == NULL && + start + size <= p->start + p->size) + return split_block(p, start, size, file_priv); + } + } + + return NULL; +} + +void nouveau_mem_free_block(struct mem_block *p) +{ + p->file_priv = NULL; + + /* Assumes a single contiguous range. Needs a special file_priv in + * 'heap' to stop it being subsumed. + */ + if (p->next->file_priv == NULL) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + + if (p->prev->file_priv == NULL) { + struct mem_block *q = p->prev; + q->size += p->size; + q->next = p->next; + q->next->prev = q; + kfree(p); + } +} + +/* Initialize. How to check for an uninitialized heap? + */ +int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, + uint64_t size) +{ + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); + + if (!blocks) + return -ENOMEM; + + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); + if (!*heap) { + kfree(blocks); + return -ENOMEM; + } + + blocks->start = start; + blocks->size = size; + blocks->file_priv = NULL; + blocks->next = blocks->prev = *heap; + + memset(*heap, 0, sizeof(**heap)); + (*heap)->file_priv = (struct drm_file *) -1; + (*heap)->next = (*heap)->prev = blocks; + return 0; +} + +/* + * Free all blocks associated with the releasing file_priv + */ +void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) +{ + struct mem_block *p; + + if (!heap || !heap->next) + return; + + list_for_each(p, heap) { + if (p->file_priv == file_priv) + p->file_priv = NULL; + } + + /* Assumes a single contiguous range. Needs a special file_priv in + * 'heap' to stop it being subsumed. + */ + list_for_each(p, heap) { + while ((p->file_priv == NULL) && + (p->next->file_priv == NULL) && + (p->next != heap)) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + } +} + +/* + * NV50 VM helpers + */ +int +nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, + uint32_t flags, uint64_t phys) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj **pgt; + unsigned psz, pfl, pages; + + if (virt >= dev_priv->vm_gart_base && + (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { + psz = 12; + pgt = &dev_priv->gart_info.sg_ctxdma; + pfl = 0x21; + virt -= dev_priv->vm_gart_base; + } else + if (virt >= dev_priv->vm_vram_base && + (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { + psz = 16; + pgt = dev_priv->vm_vram_pt; + pfl = 0x01; + virt -= dev_priv->vm_vram_base; + } else { + NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", + virt, virt + size - 1); + return -EINVAL; + } + + pages = size >> psz; + + dev_priv->engine.instmem.prepare_access(dev, true); + if (flags & 0x80000000) { + while (pages--) { + struct nouveau_gpuobj *pt = pgt[virt >> 29]; + unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; + + nv_wo32(dev, pt, pte++, 0x00000000); + nv_wo32(dev, pt, pte++, 0x00000000); + + virt += (1 << psz); + } + } else { + while (pages--) { + struct nouveau_gpuobj *pt = pgt[virt >> 29]; + unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; + unsigned offset_h = upper_32_bits(phys) & 0xff; + unsigned offset_l = lower_32_bits(phys); + + nv_wo32(dev, pt, pte++, offset_l | pfl); + nv_wo32(dev, pt, pte++, offset_h | flags); + + phys += (1 << psz); + virt += (1 << psz); + } + } + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, 0x100c80, 0x00050001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + nv_wr32(dev, 0x100c80, 0x00000001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + return 0; +} + +void +nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) +{ + nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); +} + +/* + * Cleanup everything + */ +void nouveau_mem_takedown(struct mem_block **heap) +{ + struct mem_block *p; + + if (!*heap) + return; + + for (p = (*heap)->next; p != *heap;) { + struct mem_block *q = p; + p = p->next; + kfree(q); + } + + kfree(*heap); + *heap = NULL; +} + +void nouveau_mem_close(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) + ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); + ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); + + ttm_bo_device_release(&dev_priv->ttm.bdev); + + nouveau_ttm_global_release(dev_priv); + + if (drm_core_has_AGP(dev) && dev->agp && + drm_core_check_feature(dev, DRIVER_MODESET)) { + struct drm_agp_mem *entry, *tempe; + + /* Remove AGP resources, but leave dev->agp + intact until drv_cleanup is called. */ + list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { + if (entry->bound) + drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + kfree(entry); + } + INIT_LIST_HEAD(&dev->agp->memory); + + if (dev->agp->acquired) + drm_agp_release(dev); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } + + if (dev_priv->fb_mtrr) { + drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1), + drm_get_resource_len(dev, 1), DRM_MTRR_WC); + dev_priv->fb_mtrr = 0; + } +} + +/*XXX won't work on BSD because of pci_read_config_dword */ +static uint32_t +nouveau_mem_fb_amount_igp(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pci_dev *bridge; + uint32_t mem; + + bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); + if (!bridge) { + NV_ERROR(dev, "no bridge device\n"); + return 0; + } + + if (dev_priv->flags&NV_NFORCE) { + pci_read_config_dword(bridge, 0x7C, &mem); + return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; + } else + if (dev_priv->flags&NV_NFORCE2) { + pci_read_config_dword(bridge, 0x84, &mem); + return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; + } + + NV_ERROR(dev, "impossible!\n"); + return 0; +} + +/* returns the amount of FB ram in bytes */ +uint64_t nouveau_mem_fb_amount(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t boot0; + + switch (dev_priv->card_type) { + case NV_04: + boot0 = nv_rd32(dev, NV03_BOOT_0); + if (boot0 & 0x00000100) + return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; + + switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { + case NV04_BOOT_0_RAM_AMOUNT_32MB: + return 32 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_16MB: + return 16 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_8MB: + return 8 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_4MB: + return 4 * 1024 * 1024; + } + break; + case NV_10: + case NV_20: + case NV_30: + case NV_40: + case NV_50: + default: + if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { + return nouveau_mem_fb_amount_igp(dev); + } else { + uint64_t mem; + mem = (nv_rd32(dev, NV04_FIFO_DATA) & + NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> + NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; + return mem * 1024 * 1024; + } + break; + } + + NV_ERROR(dev, + "Unable to detect video ram size. Please report your setup to " + DRIVER_EMAIL "\n"); + return 0; +} + +static void nouveau_mem_reset_agp(struct drm_device *dev) +{ + uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; + + saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1); + saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19); + + /* clear busmaster bit */ + nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); + /* clear SBA and AGP bits */ + nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); + + /* power cycle pgraph, if enabled */ + pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); + if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { + nv_wr32(dev, NV03_PMC_ENABLE, + pmc_enable & ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + } + + /* and restore (gives effect of resetting AGP) */ + nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19); + nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); +} + +int +nouveau_mem_init_agp(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_agp_info info; + struct drm_agp_mode mode; + int ret; + + if (nouveau_noagp) + return 0; + + nouveau_mem_reset_agp(dev); + + if (!dev->agp->acquired) { + ret = drm_agp_acquire(dev); + if (ret) { + NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret); + return ret; + } + } + + ret = drm_agp_info(dev, &info); + if (ret) { + NV_ERROR(dev, "Unable to get AGP info: %d\n", ret); + return ret; + } + + /* see agp.h for the AGPSTAT_* modes available */ + mode.mode = info.mode; + ret = drm_agp_enable(dev, mode); + if (ret) { + NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); + return ret; + } + + dev_priv->gart_info.type = NOUVEAU_GART_AGP; + dev_priv->gart_info.aper_base = info.aperture_base; + dev_priv->gart_info.aper_size = info.aperture_size; + return 0; +} + +int +nouveau_mem_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + int ret, dma_bits = 32; + + dev_priv->fb_phys = drm_get_resource_start(dev, 1); + dev_priv->gart_info.type = NOUVEAU_GART_NONE; + + if (dev_priv->card_type >= NV_50 && + pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) + dma_bits = 40; + + ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); + if (ret) { + NV_ERROR(dev, "Error setting DMA mask: %d\n", ret); + return ret; + } + + ret = nouveau_ttm_global_init(dev_priv); + if (ret) + return ret; + + ret = ttm_bo_device_init(&dev_priv->ttm.bdev, + dev_priv->ttm.bo_global_ref.ref.object, + &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, + dma_bits <= 32 ? true : false); + if (ret) { + NV_ERROR(dev, "Error initialising bo driver: %d\n", ret); + return ret; + } + + INIT_LIST_HEAD(&dev_priv->ttm.bo_list); + spin_lock_init(&dev_priv->ttm.bo_list_lock); + + dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); + + dev_priv->fb_mappable_pages = dev_priv->fb_available_size; + if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) + dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); + dev_priv->fb_mappable_pages >>= PAGE_SHIFT; + + NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20)); + + /* remove reserved space at end of vram from available amount */ + dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; + dev_priv->fb_aper_free = dev_priv->fb_available_size; + + /* mappable vram */ + ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, + dev_priv->fb_available_size >> PAGE_SHIFT); + if (ret) { + NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret); + return ret; + } + + /* GART */ +#if !defined(__powerpc__) && !defined(__ia64__) + if (drm_device_is_agp(dev) && dev->agp) { + ret = nouveau_mem_init_agp(dev); + if (ret) + NV_ERROR(dev, "Error initialising AGP: %d\n", ret); + } +#endif + + if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { + ret = nouveau_sgdma_init(dev); + if (ret) { + NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret); + return ret; + } + } + + NV_INFO(dev, "%d MiB GART (aperture)\n", + (int)(dev_priv->gart_info.aper_size >> 20)); + dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size; + + ret = ttm_bo_init_mm(bdev, TTM_PL_TT, + dev_priv->gart_info.aper_size >> PAGE_SHIFT); + if (ret) { + NV_ERROR(dev, "Failed TT mm init: %d\n", ret); + return ret; + } + + dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), + drm_get_resource_len(dev, 1), + DRM_MTRR_WC); + return 0; +} + + diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c new file mode 100644 index 000000000000..6c66a34b6345 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +int +nouveau_notifier_init_channel(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct nouveau_bo *ntfy = NULL; + int ret; + + ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? + TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, + 0, 0x0000, false, true, &ntfy); + if (ret) + return ret; + + ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); + if (ret) + goto out_err; + + ret = nouveau_bo_map(ntfy); + if (ret) + goto out_err; + + ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size); + if (ret) + goto out_err; + + chan->notifier_bo = ntfy; +out_err: + if (ret) { + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(ntfy->gem); + mutex_unlock(&dev->struct_mutex); + } + + return ret; +} + +void +nouveau_notifier_takedown_channel(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + + if (!chan->notifier_bo) + return; + + nouveau_bo_unmap(chan->notifier_bo); + mutex_lock(&dev->struct_mutex); + nouveau_bo_unpin(chan->notifier_bo); + drm_gem_object_unreference(chan->notifier_bo->gem); + mutex_unlock(&dev->struct_mutex); + nouveau_mem_takedown(&chan->notifier_heap); +} + +static void +nouveau_notifier_gpuobj_dtor(struct drm_device *dev, + struct nouveau_gpuobj *gpuobj) +{ + NV_DEBUG(dev, "\n"); + + if (gpuobj->priv) + nouveau_mem_free_block(gpuobj->priv); +} + +int +nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, + int size, uint32_t *b_offset) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *nobj = NULL; + struct mem_block *mem; + uint32_t offset; + int target, ret; + + if (!chan->notifier_heap) { + NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n", + chan->id); + return -EINVAL; + } + + mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0, + (struct drm_file *)-2, 0); + if (!mem) { + NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); + return -ENOMEM; + } + + offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT; + if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { + target = NV_DMA_TARGET_VIDMEM; + } else + if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && + dev_priv->card_type < NV_50) { + ret = nouveau_sgdma_get_page(dev, offset, &offset); + if (ret) + return ret; + target = NV_DMA_TARGET_PCI; + } else { + target = NV_DMA_TARGET_AGP; + } + } else { + NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", + chan->notifier_bo->bo.mem.mem_type); + return -EINVAL; + } + offset += mem->start; + + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, + mem->size, NV_DMA_ACCESS_RW, target, + &nobj); + if (ret) { + nouveau_mem_free_block(mem); + NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); + return ret; + } + nobj->dtor = nouveau_notifier_gpuobj_dtor; + nobj->priv = mem; + + ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); + if (ret) { + nouveau_gpuobj_del(dev, &nobj); + nouveau_mem_free_block(mem); + NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); + return ret; + } + + *b_offset = mem->start; + return 0; +} + +int +nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) +{ + if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor) + return -EINVAL; + + if (poffset) { + struct mem_block *mem = nobj->priv; + + if (*poffset >= mem->size) + return false; + + *poffset += mem->start; + } + + return 0; +} + +int +nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_notifierobj_alloc *na = data; + struct nouveau_channel *chan; + int ret; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); + + ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c new file mode 100644 index 000000000000..93379bb81bea --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -0,0 +1,1294 @@ +/* + * Copyright (C) 2006 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +/* NVidia uses context objects to drive drawing operations. + + Context objects can be selected into 8 subchannels in the FIFO, + and then used via DMA command buffers. + + A context object is referenced by a user defined handle (CARD32). The HW + looks up graphics objects in a hash table in the instance RAM. + + An entry in the hash table consists of 2 CARD32. The first CARD32 contains + the handle, the second one a bitfield, that contains the address of the + object in instance RAM. + + The format of the second CARD32 seems to be: + + NV4 to NV30: + + 15: 0 instance_addr >> 4 + 17:16 engine (here uses 1 = graphics) + 28:24 channel id (here uses 0) + 31 valid (use 1) + + NV40: + + 15: 0 instance_addr >> 4 (maybe 19-0) + 21:20 engine (here uses 1 = graphics) + I'm unsure about the other bits, but using 0 seems to work. + + The key into the hash table depends on the object handle and channel id and + is given as: +*/ +static uint32_t +nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t hash = 0; + int i; + + NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle); + + for (i = 32; i > 0; i -= dev_priv->ramht_bits) { + hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); + handle >>= dev_priv->ramht_bits; + } + + if (dev_priv->card_type < NV_50) + hash ^= channel << (dev_priv->ramht_bits - 4); + hash <<= 3; + + NV_DEBUG(dev, "hash=0x%08x\n", hash); + return hash; +} + +static int +nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, + uint32_t offset) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); + + if (dev_priv->card_type < NV_40) + return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); + return (ctx != 0); +} + +static int +nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_channel *chan = ref->channel; + struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + uint32_t ctx, co, ho; + + if (!ramht) { + NV_ERROR(dev, "No hash table!\n"); + return -EINVAL; + } + + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | + (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = (ref->instance >> 4) | + (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { + ctx = (ref->instance << 10) | 2; + } else { + ctx = (ref->instance >> 4) | + ((ref->gpuobj->engine << + NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); + } + } + + instmem->prepare_access(dev, true); + co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { + NV_DEBUG(dev, + "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + chan->id, co, ref->handle, ctx); + nv_wo32(dev, ramht, (co + 0)/4, ref->handle); + nv_wo32(dev, ramht, (co + 4)/4, ctx); + + list_add_tail(&ref->list, &chan->ramht_refs); + instmem->finish_access(dev); + return 0; + } + NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", + chan->id, co, nv_ro32(dev, ramht, co/4)); + + co += 8; + if (co >= dev_priv->ramht_size) + co = 0; + } while (co != ho); + instmem->finish_access(dev); + + NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); + return -ENOMEM; +} + +static void +nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_channel *chan = ref->channel; + struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + uint32_t co, ho; + + if (!ramht) { + NV_ERROR(dev, "No hash table!\n"); + return; + } + + instmem->prepare_access(dev, true); + co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && + (ref->handle == nv_ro32(dev, ramht, (co/4)))) { + NV_DEBUG(dev, + "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + chan->id, co, ref->handle, + nv_ro32(dev, ramht, (co + 4))); + nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); + nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); + + list_del(&ref->list); + instmem->finish_access(dev); + return; + } + + co += 8; + if (co >= dev_priv->ramht_size) + co = 0; + } while (co != ho); + list_del(&ref->list); + instmem->finish_access(dev); + + NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", + chan->id, ref->handle); +} + +int +nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, + uint32_t size, int align, uint32_t flags, + struct nouveau_gpuobj **gpuobj_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_gpuobj *gpuobj; + struct mem_block *pramin = NULL; + int ret; + + NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", + chan ? chan->id : -1, size, align, flags); + + if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) + return -EINVAL; + + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + gpuobj->flags = flags; + gpuobj->im_channel = chan; + + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + + /* Choose between global instmem heap, and per-channel private + * instmem heap. On ramin_heap) { + NV_DEBUG(dev, "private heap\n"); + pramin = chan->ramin_heap; + } else + if (dev_priv->card_type < NV_50) { + NV_DEBUG(dev, "global heap fallback\n"); + pramin = dev_priv->ramin_heap; + } + } else { + NV_DEBUG(dev, "global heap\n"); + pramin = dev_priv->ramin_heap; + } + + if (!pramin) { + NV_ERROR(dev, "No PRAMIN heap!\n"); + return -EINVAL; + } + + if (!chan) { + ret = engine->instmem.populate(dev, gpuobj, &size); + if (ret) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + } + + /* Allocate a chunk of the PRAMIN aperture */ + gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, + drm_order(align), + (struct drm_file *)-2, 0); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; + } + + if (!chan) { + ret = engine->instmem.bind(dev, gpuobj); + if (ret) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + } + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + int i; + + engine->instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + nv_wo32(dev, gpuobj, i/4, 0); + engine->instmem.finish_access(dev); + } + + *gpuobj_ret = gpuobj; + return 0; +} + +int +nouveau_gpuobj_early_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + NV_DEBUG(dev, "\n"); + + INIT_LIST_HEAD(&dev_priv->gpuobj_list); + + return 0; +} + +int +nouveau_gpuobj_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + NV_DEBUG(dev, "\n"); + + if (dev_priv->card_type < NV_50) { + ret = nouveau_gpuobj_new_fake(dev, + dev_priv->ramht_offset, ~0, dev_priv->ramht_size, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, + &dev_priv->ramht, NULL); + if (ret) + return ret; + } + + return 0; +} + +void +nouveau_gpuobj_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + NV_DEBUG(dev, "\n"); + + nouveau_gpuobj_del(dev, &dev_priv->ramht); +} + +void +nouveau_gpuobj_late_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + struct list_head *entry, *tmp; + + NV_DEBUG(dev, "\n"); + + list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { + gpuobj = list_entry(entry, struct nouveau_gpuobj, list); + + NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", + gpuobj, gpuobj->refcount); + gpuobj->refcount = 0; + nouveau_gpuobj_del(dev, &gpuobj); + } +} + +int +nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_gpuobj *gpuobj; + int i; + + NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); + + if (!dev_priv || !pgpuobj || !(*pgpuobj)) + return -EINVAL; + gpuobj = *pgpuobj; + + if (gpuobj->refcount != 0) { + NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount); + return -EINVAL; + } + + if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { + engine->instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + nv_wo32(dev, gpuobj, i/4, 0); + engine->instmem.finish_access(dev); + } + + if (gpuobj->dtor) + gpuobj->dtor(dev, gpuobj); + + if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE)) + engine->instmem.clear(dev, gpuobj); + + if (gpuobj->im_pramin) { + if (gpuobj->flags & NVOBJ_FLAG_FAKE) + kfree(gpuobj->im_pramin); + else + nouveau_mem_free_block(gpuobj->im_pramin); + } + + list_del(&gpuobj->list); + + *pgpuobj = NULL; + kfree(gpuobj); + return 0; +} + +static int +nouveau_gpuobj_instance_get(struct drm_device *dev, + struct nouveau_channel *chan, + struct nouveau_gpuobj *gpuobj, uint32_t *inst) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *cpramin; + + /* card_type < NV_50) { + *inst = gpuobj->im_pramin->start; + return 0; + } + + if (chan && gpuobj->im_channel != chan) { + NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n", + gpuobj->im_channel->id, chan->id); + return -EINVAL; + } + + /* NV50 channel-local instance */ + if (chan) { + cpramin = chan->ramin->gpuobj; + *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; + return 0; + } + + /* NV50 global (VRAM) instance */ + if (!gpuobj->im_channel) { + /* ...from global heap */ + if (!gpuobj->im_backing) { + NV_ERROR(dev, "AII, no VRAM backing gpuobj\n"); + return -EINVAL; + } + *inst = gpuobj->im_backing_start; + return 0; + } else { + /* ...from local heap */ + cpramin = gpuobj->im_channel->ramin->gpuobj; + *inst = cpramin->im_backing_start + + (gpuobj->im_pramin->start - cpramin->im_pramin->start); + return 0; + } + + return -EINVAL; +} + +int +nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, + uint32_t handle, struct nouveau_gpuobj *gpuobj, + struct nouveau_gpuobj_ref **ref_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_ref *ref; + uint32_t instance; + int ret; + + NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n", + chan ? chan->id : -1, handle, gpuobj); + + if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) + return -EINVAL; + + if (!chan && !ref_ret) + return -EINVAL; + + if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) { + /* sw object */ + instance = 0x40; + } else { + ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); + if (ret) + return ret; + } + + ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!ref) + return -ENOMEM; + INIT_LIST_HEAD(&ref->list); + ref->gpuobj = gpuobj; + ref->channel = chan; + ref->instance = instance; + + if (!ref_ret) { + ref->handle = handle; + + ret = nouveau_ramht_insert(dev, ref); + if (ret) { + kfree(ref); + return ret; + } + } else { + ref->handle = ~0; + *ref_ret = ref; + } + + ref->gpuobj->refcount++; + return 0; +} + +int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) +{ + struct nouveau_gpuobj_ref *ref; + + NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL); + + if (!dev || !pref || *pref == NULL) + return -EINVAL; + ref = *pref; + + if (ref->handle != ~0) + nouveau_ramht_remove(dev, ref); + + if (ref->gpuobj) { + ref->gpuobj->refcount--; + + if (ref->gpuobj->refcount == 0) { + if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) + nouveau_gpuobj_del(dev, &ref->gpuobj); + } + } + + *pref = NULL; + kfree(ref); + return 0; +} + +int +nouveau_gpuobj_new_ref(struct drm_device *dev, + struct nouveau_channel *oc, struct nouveau_channel *rc, + uint32_t handle, uint32_t size, int align, + uint32_t flags, struct nouveau_gpuobj_ref **ref) +{ + struct nouveau_gpuobj *gpuobj = NULL; + int ret; + + ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj); + if (ret) + return ret; + + ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref); + if (ret) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + return 0; +} + +int +nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, + struct nouveau_gpuobj_ref **ref_ret) +{ + struct nouveau_gpuobj_ref *ref; + struct list_head *entry, *tmp; + + list_for_each_safe(entry, tmp, &chan->ramht_refs) { + ref = list_entry(entry, struct nouveau_gpuobj_ref, list); + + if (ref->handle == handle) { + if (ref_ret) + *ref_ret = ref; + return 0; + } + } + + return -EINVAL; +} + +int +nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, + uint32_t b_offset, uint32_t size, + uint32_t flags, struct nouveau_gpuobj **pgpuobj, + struct nouveau_gpuobj_ref **pref) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + int i; + + NV_DEBUG(dev, + "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", + p_offset, b_offset, size, flags); + + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + gpuobj->im_channel = NULL; + gpuobj->flags = flags | NVOBJ_FLAG_FAKE; + + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + + if (p_offset != ~0) { + gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), + GFP_KERNEL); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; + } + gpuobj->im_pramin->start = p_offset; + gpuobj->im_pramin->size = size; + } + + if (b_offset != ~0) { + gpuobj->im_backing = (struct nouveau_bo *)-1; + gpuobj->im_backing_start = b_offset; + } + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + nv_wo32(dev, gpuobj, i/4, 0); + dev_priv->engine.instmem.finish_access(dev); + } + + if (pref) { + i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); + if (i) { + nouveau_gpuobj_del(dev, &gpuobj); + return i; + } + } + + if (pgpuobj) + *pgpuobj = gpuobj; + return 0; +} + + +static uint32_t +nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /*XXX: dodgy hack for now */ + if (dev_priv->card_type >= NV_50) + return 24; + if (dev_priv->card_type >= NV_40) + return 32; + return 16; +} + +/* + DMA objects are used to reference a piece of memory in the + framebuffer, PCI or AGP address space. Each object is 16 bytes big + and looks as follows: + + entry[0] + 11:0 class (seems like I can always use 0 here) + 12 page table present? + 13 page entry linear? + 15:14 access: 0 rw, 1 ro, 2 wo + 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP + 31:20 dma adjust (bits 0-11 of the address) + entry[1] + dma limit (size of transfer) + entry[X] + 1 0 readonly, 1 readwrite + 31:12 dma frame address of the page (bits 12-31 of the address) + entry[N] + page table terminator, same value as the first pte, as does nvidia + rivatv uses 0xffffffff + + Non linear page tables need a list of frame addresses afterwards, + the rivatv project has some info on this. + + The method below creates a DMA object in instance RAM and returns a handle + to it that can be used to set up context objects. +*/ +int +nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, + uint64_t offset, uint64_t size, int access, + int target, struct nouveau_gpuobj **gpuobj) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + int ret; + + NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", + chan->id, class, offset, size); + NV_DEBUG(dev, "access=%d target=%d\n", access, target); + + switch (target) { + case NV_DMA_TARGET_AGP: + offset += dev_priv->gart_info.aper_base; + break; + default: + break; + } + + ret = nouveau_gpuobj_new(dev, chan, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, gpuobj); + if (ret) { + NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); + return ret; + } + + instmem->prepare_access(dev, true); + + if (dev_priv->card_type < NV_50) { + uint32_t frame, adjust, pte_flags = 0; + + if (access != NV_DMA_ACCESS_RO) + pte_flags |= (1<<1); + adjust = offset & 0x00000fff; + frame = offset & ~0x00000fff; + + nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + class)); + nv_wo32(dev, *gpuobj, 1, size - 1); + nv_wo32(dev, *gpuobj, 2, frame | pte_flags); + nv_wo32(dev, *gpuobj, 3, frame | pte_flags); + } else { + uint64_t limit = offset + size - 1; + uint32_t flags0, flags5; + + if (target == NV_DMA_TARGET_VIDMEM) { + flags0 = 0x00190000; + flags5 = 0x00010000; + } else { + flags0 = 0x7fc00000; + flags5 = 0x00080000; + } + + nv_wo32(dev, *gpuobj, 0, flags0 | class); + nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); + nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); + nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | + (upper_32_bits(offset) & 0xff)); + nv_wo32(dev, *gpuobj, 5, flags5); + } + + instmem->finish_access(dev); + + (*gpuobj)->engine = NVOBJ_ENGINE_SW; + (*gpuobj)->class = class; + return 0; +} + +int +nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, + uint64_t offset, uint64_t size, int access, + struct nouveau_gpuobj **gpuobj, + uint32_t *o_ret) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || + (dev_priv->card_type >= NV_50 && + dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + offset + dev_priv->vm_gart_base, + size, access, NV_DMA_TARGET_AGP, + gpuobj); + if (o_ret) + *o_ret = 0; + } else + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { + *gpuobj = dev_priv->gart_info.sg_ctxdma; + if (offset & ~0xffffffffULL) { + NV_ERROR(dev, "obj offset exceeds 32-bits\n"); + return -EINVAL; + } + if (o_ret) + *o_ret = (uint32_t)offset; + ret = (*gpuobj != NULL) ? 0 : -EINVAL; + } else { + NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); + return -EINVAL; + } + + return ret; +} + +/* Context objects in the instance RAM have the following structure. + * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. + + NV4 - NV30: + + entry[0] + 11:0 class + 12 chroma key enable + 13 user clip enable + 14 swizzle enable + 17:15 patch config: + scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre + 18 synchronize enable + 19 endian: 1 big, 0 little + 21:20 dither mode + 23 single step enable + 24 patch status: 0 invalid, 1 valid + 25 context_surface 0: 1 valid + 26 context surface 1: 1 valid + 27 context pattern: 1 valid + 28 context rop: 1 valid + 29,30 context beta, beta4 + entry[1] + 7:0 mono format + 15:8 color format + 31:16 notify instance address + entry[2] + 15:0 dma 0 instance address + 31:16 dma 1 instance address + entry[3] + dma method traps + + NV40: + No idea what the exact format is. Here's what can be deducted: + + entry[0]: + 11:0 class (maybe uses more bits here?) + 17 user clip enable + 21:19 patch config + 25 patch status valid ? + entry[1]: + 15:0 DMA notifier (maybe 20:0) + entry[2]: + 15:0 DMA 0 instance (maybe 20:0) + 24 big endian + entry[3]: + 15:0 DMA 1 instance (maybe 20:0) + entry[4]: + entry[5]: + set to 0? +*/ +int +nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, + struct nouveau_gpuobj **gpuobj) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); + + ret = nouveau_gpuobj_new(dev, chan, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { + NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); + return ret; + } + + dev_priv->engine.instmem.prepare_access(dev, true); + if (dev_priv->card_type >= NV_50) { + nv_wo32(dev, *gpuobj, 0, class); + nv_wo32(dev, *gpuobj, 5, 0x00010000); + } else { + switch (class) { + case NV_CLASS_NULL: + nv_wo32(dev, *gpuobj, 0, 0x00001030); + nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); + break; + default: + if (dev_priv->card_type >= NV_40) { + nv_wo32(dev, *gpuobj, 0, class); +#ifdef __BIG_ENDIAN + nv_wo32(dev, *gpuobj, 2, 0x01000000); +#endif + } else { +#ifdef __BIG_ENDIAN + nv_wo32(dev, *gpuobj, 0, class | 0x00080000); +#else + nv_wo32(dev, *gpuobj, 0, class); +#endif + } + } + } + dev_priv->engine.instmem.finish_access(dev); + + (*gpuobj)->engine = NVOBJ_ENGINE_GR; + (*gpuobj)->class = class; + return 0; +} + +static int +nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, + struct nouveau_gpuobj **gpuobj_ret) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_gpuobj *gpuobj; + + if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) + return -EINVAL; + + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; + gpuobj->engine = NVOBJ_ENGINE_SW; + gpuobj->class = class; + + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + *gpuobj_ret = gpuobj; + return 0; +} + +static int +nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *pramin = NULL; + uint32_t size; + uint32_t base; + int ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + /* Base amount for object storage (4KiB enough?) */ + size = 0x1000; + base = 0; + + /* PGRAPH context */ + + if (dev_priv->card_type == NV_50) { + /* Various fixed table thingos */ + size += 0x1400; /* mostly unknown stuff */ + size += 0x4000; /* vm pd */ + base = 0x6000; + /* RAMHT, not sure about setting size yet, 32KiB to be safe */ + size += 0x8000; + /* RAMFC */ + size += 0x1000; + /* PGRAPH context */ + size += 0x70000; + } + + NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", + chan->id, size, base); + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, + &chan->ramin); + if (ret) { + NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); + return ret; + } + pramin = chan->ramin->gpuobj; + + ret = nouveau_mem_init_heap(&chan->ramin_heap, + pramin->im_pramin->start + base, size); + if (ret) { + NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); + nouveau_gpuobj_ref_del(dev, &chan->ramin); + return ret; + } + + return 0; +} + +int +nouveau_gpuobj_channel_init(struct nouveau_channel *chan, + uint32_t vram_h, uint32_t tt_h) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_gpuobj *vram = NULL, *tt = NULL; + int ret, i; + + INIT_LIST_HEAD(&chan->ramht_refs); + + NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); + + /* Reserve a block of PRAMIN for the channel + *XXX: maybe on card_type == NV_50) { + ret = nouveau_gpuobj_channel_init_pramin(chan); + if (ret) { + NV_ERROR(dev, "init pramin\n"); + return ret; + } + } + + /* NV50 VM + * - Allocate per-channel page-directory + * - Map GART and VRAM into the channel's address space at the + * locations determined during init. + */ + if (dev_priv->card_type >= NV_50) { + uint32_t vm_offset, pde; + + instmem->prepare_access(dev, true); + + vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; + vm_offset += chan->ramin->gpuobj->im_pramin->start; + + ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, + 0, &chan->vm_pd, NULL); + if (ret) { + instmem->finish_access(dev); + return ret; + } + for (i = 0; i < 0x4000; i += 8) { + nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); + nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); + } + + pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; + ret = nouveau_gpuobj_ref_add(dev, NULL, 0, + dev_priv->gart_info.sg_ctxdma, + &chan->vm_gart_pt); + if (ret) { + instmem->finish_access(dev); + return ret; + } + nv_wo32(dev, chan->vm_pd, pde++, + chan->vm_gart_pt->instance | 0x03); + nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); + + pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { + ret = nouveau_gpuobj_ref_add(dev, NULL, 0, + dev_priv->vm_vram_pt[i], + &chan->vm_vram_pt[i]); + if (ret) { + instmem->finish_access(dev); + return ret; + } + + nv_wo32(dev, chan->vm_pd, pde++, + chan->vm_vram_pt[i]->instance | 0x61); + nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); + } + + instmem->finish_access(dev); + } + + /* RAMHT */ + if (dev_priv->card_type < NV_50) { + ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, + &chan->ramht); + if (ret) + return ret; + } else { + ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, + 0x8000, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramht); + if (ret) + return ret; + } + + /* VRAM ctxdma */ + if (dev_priv->card_type >= NV_50) { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->vm_end, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP, &vram); + if (ret) { + NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); + return ret; + } + } else { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &vram); + if (ret) { + NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); + return ret; + } + } + + ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); + if (ret) { + NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); + return ret; + } + + /* TT memory ctxdma */ + if (dev_priv->card_type >= NV_50) { + tt = vram; + } else + if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { + ret = nouveau_gpuobj_gart_dma_new(chan, 0, + dev_priv->gart_info.aper_size, + NV_DMA_ACCESS_RW, &tt, NULL); + } else { + NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); + ret = -EINVAL; + } + + if (ret) { + NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); + return ret; + } + + ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); + if (ret) { + NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); + return ret; + } + + return 0; +} + +void +nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_device *dev = chan->dev; + struct list_head *entry, *tmp; + struct nouveau_gpuobj_ref *ref; + int i; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + if (!chan->ramht_refs.next) + return; + + list_for_each_safe(entry, tmp, &chan->ramht_refs) { + ref = list_entry(entry, struct nouveau_gpuobj_ref, list); + + nouveau_gpuobj_ref_del(dev, &ref); + } + + nouveau_gpuobj_ref_del(dev, &chan->ramht); + + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) + nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); + + if (chan->ramin_heap) + nouveau_mem_takedown(&chan->ramin_heap); + if (chan->ramin) + nouveau_gpuobj_ref_del(dev, &chan->ramin); + +} + +int +nouveau_gpuobj_suspend(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj; + int i; + + if (dev_priv->card_type < NV_50) { + dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram); + if (!dev_priv->susres.ramin_copy) + return -ENOMEM; + + for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) + dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i); + return 0; + } + + list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { + if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE)) + continue; + + gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size); + if (!gpuobj->im_backing_suspend) { + nouveau_gpuobj_resume(dev); + return -ENOMEM; + } + + dev_priv->engine.instmem.prepare_access(dev, false); + for (i = 0; i < gpuobj->im_pramin->size / 4; i++) + gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); + dev_priv->engine.instmem.finish_access(dev); + } + + return 0; +} + +void +nouveau_gpuobj_suspend_cleanup(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj; + + if (dev_priv->card_type < NV_50) { + vfree(dev_priv->susres.ramin_copy); + dev_priv->susres.ramin_copy = NULL; + return; + } + + list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { + if (!gpuobj->im_backing_suspend) + continue; + + vfree(gpuobj->im_backing_suspend); + gpuobj->im_backing_suspend = NULL; + } +} + +void +nouveau_gpuobj_resume(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj; + int i; + + if (dev_priv->card_type < NV_50) { + for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) + nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]); + nouveau_gpuobj_suspend_cleanup(dev); + return; + } + + list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { + if (!gpuobj->im_backing_suspend) + continue; + + dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 0; i < gpuobj->im_pramin->size / 4; i++) + nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); + dev_priv->engine.instmem.finish_access(dev); + } + + nouveau_gpuobj_suspend_cleanup(dev); +} + +int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_grobj_alloc *init = data; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_pgraph_object_class *grc; + struct nouveau_gpuobj *gr = NULL; + struct nouveau_channel *chan; + int ret; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); + + if (init->handle == ~0) + return -EINVAL; + + grc = pgraph->grclass; + while (grc->id) { + if (grc->id == init->class) + break; + grc++; + } + + if (!grc->id) { + NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); + return -EPERM; + } + + if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) + return -EEXIST; + + if (!grc->software) + ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); + else + ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); + + if (ret) { + NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", + ret, init->channel, init->handle); + return ret; + } + + ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); + if (ret) { + NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", + ret, init->channel, init->handle); + nouveau_gpuobj_del(dev, &gr); + return ret; + } + + return 0; +} + +int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gpuobj_free *objfree = data; + struct nouveau_gpuobj_ref *ref; + struct nouveau_channel *chan; + int ret; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); + + ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); + if (ret) + return ret; + nouveau_gpuobj_ref_del(dev, &ref); + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h new file mode 100644 index 000000000000..fa1b0e7165b9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -0,0 +1,836 @@ + + +#define NV03_BOOT_0 0x00100000 +# define NV03_BOOT_0_RAM_AMOUNT 0x00000003 +# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000 +# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001 +# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002 +# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003 +# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000 +# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001 +# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002 +# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003 + +#define NV04_FIFO_DATA 0x0010020c +# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 +# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 + +#define NV_RAMIN 0x00700000 + +#define NV_RAMHT_HANDLE_OFFSET 0 +#define NV_RAMHT_CONTEXT_OFFSET 4 +# define NV_RAMHT_CONTEXT_VALID (1<<31) +# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24 +# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16 +# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0 +# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1 +# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0 +# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23 +# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 +# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 + +/* DMA object defines */ +#define NV_DMA_ACCESS_RW 0 +#define NV_DMA_ACCESS_RO 1 +#define NV_DMA_ACCESS_WO 2 +#define NV_DMA_TARGET_VIDMEM 0 +#define NV_DMA_TARGET_PCI 2 +#define NV_DMA_TARGET_AGP 3 +/* The following is not a real value used by the card, it's changed by + * nouveau_object_dma_create */ +#define NV_DMA_TARGET_PCI_NONLINEAR 8 + +/* Some object classes we care about in the drm */ +#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 +#define NV_CLASS_DMA_TO_MEMORY 0x00000003 +#define NV_CLASS_NULL 0x00000030 +#define NV_CLASS_DMA_IN_MEMORY 0x0000003D + +#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE)) +#define NV03_USER__SIZE 16 +#define NV10_USER__SIZE 32 +#define NV03_USER_SIZE 0x00010000 +#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE)) +#define NV03_USER_DMA_PUT__SIZE 16 +#define NV10_USER_DMA_PUT__SIZE 32 +#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE)) +#define NV03_USER_DMA_GET__SIZE 16 +#define NV10_USER_DMA_GET__SIZE 32 +#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE)) +#define NV03_USER_REF_CNT__SIZE 16 +#define NV10_USER_REF_CNT__SIZE 32 + +#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE)) +#define NV40_USER_SIZE 0x00001000 +#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE)) +#define NV40_USER_DMA_PUT__SIZE 32 +#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE)) +#define NV40_USER_DMA_GET__SIZE 32 +#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE)) +#define NV40_USER_REF_CNT__SIZE 32 + +#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE)) +#define NV50_USER_SIZE 0x00002000 +#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE)) +#define NV50_USER_DMA_PUT__SIZE 128 +#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE)) +#define NV50_USER_DMA_GET__SIZE 128 +#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE)) +#define NV50_USER_REF_CNT__SIZE 128 + +#define NV03_FIFO_SIZE 0x8000UL + +#define NV03_PMC_BOOT_0 0x00000000 +#define NV03_PMC_BOOT_1 0x00000004 +#define NV03_PMC_INTR_0 0x00000100 +# define NV_PMC_INTR_0_PFIFO_PENDING (1<<8) +# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12) +# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21) +# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24) +# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25) +# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26) +# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24) +#define NV03_PMC_INTR_EN_0 0x00000140 +# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<<0) +#define NV03_PMC_ENABLE 0x00000200 +# define NV_PMC_ENABLE_PFIFO (1<<8) +# define NV_PMC_ENABLE_PGRAPH (1<<12) +/* Disabling the below bit breaks newer (G7X only?) mobile chipsets, + * the card will hang early on in the X init process. + */ +# define NV_PMC_ENABLE_UNK13 (1<<13) +#define NV40_PMC_BACKLIGHT 0x000015f0 +# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 +#define NV40_PMC_1700 0x00001700 +#define NV40_PMC_1704 0x00001704 +#define NV40_PMC_1708 0x00001708 +#define NV40_PMC_170C 0x0000170C + +/* probably PMC ? */ +#define NV50_PUNK_BAR0_PRAMIN 0x00001700 +#define NV50_PUNK_BAR_CFG_BASE 0x00001704 +#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30) +#define NV50_PUNK_BAR1_CTXDMA 0x00001708 +#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31) +#define NV50_PUNK_BAR3_CTXDMA 0x0000170C +#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31) +#define NV50_PUNK_UNK1710 0x00001710 + +#define NV04_PBUS_PCI_NV_1 0x00001804 +#define NV04_PBUS_PCI_NV_19 0x0000184C +#define NV04_PBUS_PCI_NV_20 0x00001850 +# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0) +# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0) + +#define NV04_PTIMER_INTR_0 0x00009100 +#define NV04_PTIMER_INTR_EN_0 0x00009140 +#define NV04_PTIMER_NUMERATOR 0x00009200 +#define NV04_PTIMER_DENOMINATOR 0x00009210 +#define NV04_PTIMER_TIME_0 0x00009400 +#define NV04_PTIMER_TIME_1 0x00009410 +#define NV04_PTIMER_ALARM_0 0x00009420 + +#define NV04_PFB_CFG0 0x00100200 +#define NV04_PFB_CFG1 0x00100204 +#define NV40_PFB_020C 0x0010020C +#define NV10_PFB_TILE(i) (0x00100240 + (i*16)) +#define NV10_PFB_TILE__SIZE 8 +#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16)) +#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16)) +#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16)) +#define NV10_PFB_CLOSE_PAGE2 0x0010033C +#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) +#define NV40_PFB_TILE__SIZE_0 12 +#define NV40_PFB_TILE__SIZE_1 15 +#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16)) +#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16)) +#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16)) +#define NV40_PFB_UNK_800 0x00100800 + +#define NV04_PGRAPH_DEBUG_0 0x00400080 +#define NV04_PGRAPH_DEBUG_1 0x00400084 +#define NV04_PGRAPH_DEBUG_2 0x00400088 +#define NV04_PGRAPH_DEBUG_3 0x0040008c +#define NV10_PGRAPH_DEBUG_4 0x00400090 +#define NV03_PGRAPH_INTR 0x00400100 +#define NV03_PGRAPH_NSTATUS 0x00400104 +# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11) +# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12) +# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13) +# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14) +# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) +# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24) +# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) +# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) +#define NV03_PGRAPH_NSOURCE 0x00400108 +# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0) +# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1) +# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2) +# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3) +# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4) +# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5) +# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6) +# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7) +# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8) +# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9) +# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) +# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) +# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) +# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) +# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) +# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) +# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) +#define NV03_PGRAPH_INTR_EN 0x00400140 +#define NV40_PGRAPH_INTR_EN 0x0040013C +# define NV_PGRAPH_INTR_NOTIFY (1<<0) +# define NV_PGRAPH_INTR_MISSING_HW (1<<4) +# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) +# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) +# define NV_PGRAPH_INTR_ERROR (1<<20) +#define NV10_PGRAPH_CTX_CONTROL 0x00400144 +#define NV10_PGRAPH_CTX_USER 0x00400148 +#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C +#define NV10_PGRAPH_CTX_SWITCH2 0x00400150 +#define NV10_PGRAPH_CTX_SWITCH3 0x00400154 +#define NV10_PGRAPH_CTX_SWITCH4 0x00400158 +#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C +#define NV04_PGRAPH_CTX_SWITCH1 0x00400160 +#define NV10_PGRAPH_CTX_CACHE1 0x00400160 +#define NV04_PGRAPH_CTX_SWITCH2 0x00400164 +#define NV04_PGRAPH_CTX_SWITCH3 0x00400168 +#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C +#define NV04_PGRAPH_CTX_CONTROL 0x00400170 +#define NV04_PGRAPH_CTX_USER 0x00400174 +#define NV04_PGRAPH_CTX_CACHE1 0x00400180 +#define NV10_PGRAPH_CTX_CACHE2 0x00400180 +#define NV03_PGRAPH_CTX_CONTROL 0x00400190 +#define NV03_PGRAPH_CTX_USER 0x00400194 +#define NV04_PGRAPH_CTX_CACHE2 0x004001A0 +#define NV10_PGRAPH_CTX_CACHE3 0x004001A0 +#define NV04_PGRAPH_CTX_CACHE3 0x004001C0 +#define NV10_PGRAPH_CTX_CACHE4 0x004001C0 +#define NV04_PGRAPH_CTX_CACHE4 0x004001E0 +#define NV10_PGRAPH_CTX_CACHE5 0x004001E0 +#define NV40_PGRAPH_CTXCTL_0304 0x00400304 +#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff +#define NV40_PGRAPH_CTXCTL_0310 0x00400310 +#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 +#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 +#define NV40_PGRAPH_CTXCTL_030C 0x0040030c +#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 +#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 +#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 +#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF +#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330 +#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff +#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000 +#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff +#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330 +#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff +#define NV03_PGRAPH_ABS_X_RAM 0x00400400 +#define NV03_PGRAPH_ABS_Y_RAM 0x00400480 +#define NV03_PGRAPH_X_MISC 0x00400500 +#define NV03_PGRAPH_Y_MISC 0x00400504 +#define NV04_PGRAPH_VALID1 0x00400508 +#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C +#define NV04_PGRAPH_MISC24_0 0x00400510 +#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514 +#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518 +#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C +#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520 +#define NV03_PGRAPH_CLIPX_0 0x00400524 +#define NV03_PGRAPH_CLIPX_1 0x00400528 +#define NV03_PGRAPH_CLIPY_0 0x0040052C +#define NV03_PGRAPH_CLIPY_1 0x00400530 +#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534 +#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538 +#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C +#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540 +#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544 +#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548 +#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560 +#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564 +#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568 +#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C +#define NV04_PGRAPH_MISC24_1 0x00400570 +#define NV04_PGRAPH_MISC24_2 0x00400574 +#define NV04_PGRAPH_VALID2 0x00400578 +#define NV04_PGRAPH_PASSTHRU_0 0x0040057C +#define NV04_PGRAPH_PASSTHRU_1 0x00400580 +#define NV04_PGRAPH_PASSTHRU_2 0x00400584 +#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588 +#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C +#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590 +#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594 +#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598 +#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C +#define NV04_PGRAPH_FORMAT_0 0x004005A8 +#define NV04_PGRAPH_FORMAT_1 0x004005AC +#define NV04_PGRAPH_FILTER_0 0x004005B0 +#define NV04_PGRAPH_FILTER_1 0x004005B4 +#define NV03_PGRAPH_MONO_COLOR0 0x00400600 +#define NV04_PGRAPH_ROP3 0x00400604 +#define NV04_PGRAPH_BETA_AND 0x00400608 +#define NV04_PGRAPH_BETA_PREMULT 0x0040060C +#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610 +#define NV04_PGRAPH_FORMATS 0x00400618 +#define NV10_PGRAPH_DEBUG_2 0x00400620 +#define NV04_PGRAPH_BOFFSET0 0x00400640 +#define NV04_PGRAPH_BOFFSET1 0x00400644 +#define NV04_PGRAPH_BOFFSET2 0x00400648 +#define NV04_PGRAPH_BOFFSET3 0x0040064C +#define NV04_PGRAPH_BOFFSET4 0x00400650 +#define NV04_PGRAPH_BOFFSET5 0x00400654 +#define NV04_PGRAPH_BBASE0 0x00400658 +#define NV04_PGRAPH_BBASE1 0x0040065C +#define NV04_PGRAPH_BBASE2 0x00400660 +#define NV04_PGRAPH_BBASE3 0x00400664 +#define NV04_PGRAPH_BBASE4 0x00400668 +#define NV04_PGRAPH_BBASE5 0x0040066C +#define NV04_PGRAPH_BPITCH0 0x00400670 +#define NV04_PGRAPH_BPITCH1 0x00400674 +#define NV04_PGRAPH_BPITCH2 0x00400678 +#define NV04_PGRAPH_BPITCH3 0x0040067C +#define NV04_PGRAPH_BPITCH4 0x00400680 +#define NV04_PGRAPH_BLIMIT0 0x00400684 +#define NV04_PGRAPH_BLIMIT1 0x00400688 +#define NV04_PGRAPH_BLIMIT2 0x0040068C +#define NV04_PGRAPH_BLIMIT3 0x00400690 +#define NV04_PGRAPH_BLIMIT4 0x00400694 +#define NV04_PGRAPH_BLIMIT5 0x00400698 +#define NV04_PGRAPH_BSWIZZLE2 0x0040069C +#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 +#define NV03_PGRAPH_STATUS 0x004006B0 +#define NV04_PGRAPH_STATUS 0x00400700 +#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 +#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 +#define NV04_PGRAPH_SURFACE 0x0040070C +#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C +#define NV04_PGRAPH_STATE 0x00400710 +#define NV10_PGRAPH_SURFACE 0x00400710 +#define NV04_PGRAPH_NOTIFY 0x00400714 +#define NV10_PGRAPH_STATE 0x00400714 +#define NV10_PGRAPH_NOTIFY 0x00400718 + +#define NV04_PGRAPH_FIFO 0x00400720 + +#define NV04_PGRAPH_BPIXEL 0x00400724 +#define NV10_PGRAPH_RDI_INDEX 0x00400750 +#define NV04_PGRAPH_FFINTFC_ST2 0x00400754 +#define NV10_PGRAPH_RDI_DATA 0x00400754 +#define NV04_PGRAPH_DMA_PITCH 0x00400760 +#define NV10_PGRAPH_FFINTFC_ST2 0x00400764 +#define NV04_PGRAPH_DVD_COLORFMT 0x00400764 +#define NV04_PGRAPH_SCALED_FORMAT 0x00400768 +#define NV10_PGRAPH_DMA_PITCH 0x00400770 +#define NV10_PGRAPH_DVD_COLORFMT 0x00400774 +#define NV10_PGRAPH_SCALED_FORMAT 0x00400778 +#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 +#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 +#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 +#define NV04_PGRAPH_PATT_COLOR0 0x00400800 +#define NV04_PGRAPH_PATT_COLOR1 0x00400804 +#define NV04_PGRAPH_PATTERN 0x00400808 +#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810 +#define NV04_PGRAPH_CHROMA 0x00400814 +#define NV04_PGRAPH_CONTROL0 0x00400818 +#define NV04_PGRAPH_CONTROL1 0x0040081C +#define NV04_PGRAPH_CONTROL2 0x00400820 +#define NV04_PGRAPH_BLEND 0x00400824 +#define NV04_PGRAPH_STORED_FMT 0x00400830 +#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 +#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) +#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) +#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) +#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) +#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) +#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) +#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) +#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) +#define NV04_PGRAPH_U_RAM 0x00400D00 +#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) +#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) +#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) +#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) +#define NV04_PGRAPH_V_RAM 0x00400D40 +#define NV04_PGRAPH_W_RAM 0x00400D80 +#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 +#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 +#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 +#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C +#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50 +#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54 +#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58 +#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C +#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60 +#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64 +#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68 +#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C +#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00 +#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20 +#define NV10_PGRAPH_XFMODE0 0x00400F40 +#define NV10_PGRAPH_XFMODE1 0x00400F44 +#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48 +#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C +#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50 +#define NV10_PGRAPH_PIPE_DATA 0x00400F54 +#define NV04_PGRAPH_DMA_START_0 0x00401000 +#define NV04_PGRAPH_DMA_START_1 0x00401004 +#define NV04_PGRAPH_DMA_LENGTH 0x00401008 +#define NV04_PGRAPH_DMA_MISC 0x0040100C +#define NV04_PGRAPH_DMA_DATA_0 0x00401020 +#define NV04_PGRAPH_DMA_DATA_1 0x00401024 +#define NV04_PGRAPH_DMA_RM 0x00401030 +#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040 +#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044 +#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048 +#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C +#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050 +#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054 +#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058 +#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C +#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060 +#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080 +#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084 +#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088 +#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C +#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090 +#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094 +#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 +#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C +#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 +#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) +#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) +#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) +#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) + + +/* It's a guess that this works on NV03. Confirmed on NV04, though */ +#define NV04_PFIFO_DELAY_0 0x00002040 +#define NV04_PFIFO_DMA_TIMESLICE 0x00002044 +#define NV04_PFIFO_NEXT_CHANNEL 0x00002050 +#define NV03_PFIFO_INTR_0 0x00002100 +#define NV03_PFIFO_INTR_EN_0 0x00002140 +# define NV_PFIFO_INTR_CACHE_ERROR (1<<0) +# define NV_PFIFO_INTR_RUNOUT (1<<4) +# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8) +# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) +# define NV_PFIFO_INTR_DMA_PT (1<<16) +# define NV_PFIFO_INTR_SEMAPHORE (1<<20) +# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) +#define NV03_PFIFO_RAMHT 0x00002210 +#define NV03_PFIFO_RAMFC 0x00002214 +#define NV03_PFIFO_RAMRO 0x00002218 +#define NV40_PFIFO_RAMFC 0x00002220 +#define NV03_PFIFO_CACHES 0x00002500 +#define NV04_PFIFO_MODE 0x00002504 +#define NV04_PFIFO_DMA 0x00002508 +#define NV04_PFIFO_SIZE 0x0000250c +#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) +#define NV50_PFIFO_CTX_TABLE__SIZE 128 +#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31) +#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30) +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF +#define NV03_PFIFO_CACHE0_PUSH0 0x00003000 +#define NV03_PFIFO_CACHE0_PULL0 0x00003040 +#define NV04_PFIFO_CACHE0_PULL0 0x00003050 +#define NV04_PFIFO_CACHE0_PULL1 0x00003054 +#define NV03_PFIFO_CACHE1_PUSH0 0x00003200 +#define NV03_PFIFO_CACHE1_PUSH1 0x00003204 +#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8) +#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16) +#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f +#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f +#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f +#define NV03_PFIFO_CACHE1_PUT 0x00003210 +#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220 +#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000 +# define NV_PFIFO_CACHE1_ENDIAN 0x80000000 +# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF +# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000 +#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228 +#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c +#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230 +#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240 +#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244 +#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248 +#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C +#define NV03_PFIFO_CACHE1_PULL0 0x00003240 +#define NV04_PFIFO_CACHE1_PULL0 0x00003250 +#define NV03_PFIFO_CACHE1_PULL1 0x00003250 +#define NV04_PFIFO_CACHE1_PULL1 0x00003254 +#define NV04_PFIFO_CACHE1_HASH 0x00003258 +#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260 +#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264 +#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268 +#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C +#define NV03_PFIFO_CACHE1_GET 0x00003270 +#define NV04_PFIFO_CACHE1_ENGINE 0x00003280 +#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 +#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 +#define NV40_PFIFO_UNK32E4 0x000032E4 +#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) +#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8)) +#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8)) +#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8)) + +#define NV_CRTC0_INTSTAT 0x00600100 +#define NV_CRTC0_INTEN 0x00600140 +#define NV_CRTC1_INTSTAT 0x00602100 +#define NV_CRTC1_INTEN 0x00602140 +# define NV_CRTC_INTR_VBLANK (1<<0) + +#define NV04_PRAMIN 0x00700000 + +/* Fifo commands. These are not regs, neither masks */ +#define NV03_FIFO_CMD_JUMP 0x20000000 +#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc +#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK)) + +/* This is a partial import from rules-ng, a few things may be duplicated. + * Eventually we should completely import everything from rules-ng. + * For the moment check rules-ng for docs. + */ + +#define NV50_PMC 0x00000000 +#define NV50_PMC__LEN 0x1 +#define NV50_PMC__ESIZE 0x2000 +# define NV50_PMC_BOOT_0 0x00000000 +# define NV50_PMC_BOOT_0_REVISION 0x000000ff +# define NV50_PMC_BOOT_0_REVISION__SHIFT 0 +# define NV50_PMC_BOOT_0_ARCH 0x0ff00000 +# define NV50_PMC_BOOT_0_ARCH__SHIFT 20 +# define NV50_PMC_INTR_0 0x00000100 +# define NV50_PMC_INTR_0_PFIFO (1<<8) +# define NV50_PMC_INTR_0_PGRAPH (1<<12) +# define NV50_PMC_INTR_0_PTIMER (1<<20) +# define NV50_PMC_INTR_0_HOTPLUG (1<<21) +# define NV50_PMC_INTR_0_DISPLAY (1<<26) +# define NV50_PMC_INTR_EN_0 0x00000140 +# define NV50_PMC_INTR_EN_0_MASTER (1<<0) +# define NV50_PMC_INTR_EN_0_MASTER_DISABLED (0<<0) +# define NV50_PMC_INTR_EN_0_MASTER_ENABLED (1<<0) +# define NV50_PMC_ENABLE 0x00000200 +# define NV50_PMC_ENABLE_PFIFO (1<<8) +# define NV50_PMC_ENABLE_PGRAPH (1<<12) + +#define NV50_PCONNECTOR 0x0000e000 +#define NV50_PCONNECTOR__LEN 0x1 +#define NV50_PCONNECTOR__ESIZE 0x1000 +# define NV50_PCONNECTOR_HOTPLUG_INTR 0x0000e050 +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0 (1<<0) +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1 (1<<1) +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2 (1<<2) +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3 (1<<3) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0 (1<<16) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1 (1<<17) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2 (1<<18) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3 (1<<19) +# define NV50_PCONNECTOR_HOTPLUG_CTRL 0x0000e054 +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0 (1<<0) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1 (1<<1) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2 (1<<2) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3 (1<<3) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0 (1<<16) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1 (1<<17) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2 (1<<18) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3 (1<<19) +# define NV50_PCONNECTOR_HOTPLUG_STATE 0x0000e104 +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2) +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6) +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10) +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14) +# define NV50_PCONNECTOR_I2C_PORT_0 0x0000e138 +# define NV50_PCONNECTOR_I2C_PORT_1 0x0000e150 +# define NV50_PCONNECTOR_I2C_PORT_2 0x0000e168 +# define NV50_PCONNECTOR_I2C_PORT_3 0x0000e180 +# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240 +# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258 + +#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0) +#define NV50_AUXCH_DATA_OUT__SIZE 4 +#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0) +#define NV50_AUXCH_DATA_IN__SIZE 4 +#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0) +#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4) +#define NV50_AUXCH_CTRL_LINKSTAT 0x01000000 +#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY 0x00000000 +#define NV50_AUXCH_CTRL_LINKSTAT_READY 0x01000000 +#define NV50_AUXCH_CTRL_LINKEN 0x00100000 +#define NV50_AUXCH_CTRL_LINKEN_DISABLED 0x00000000 +#define NV50_AUXCH_CTRL_LINKEN_ENABLED 0x00100000 +#define NV50_AUXCH_CTRL_EXEC 0x00010000 +#define NV50_AUXCH_CTRL_EXEC_COMPLETE 0x00000000 +#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS 0x00010000 +#define NV50_AUXCH_CTRL_CMD 0x0000f000 +#define NV50_AUXCH_CTRL_CMD_SHIFT 12 +#define NV50_AUXCH_CTRL_LEN 0x0000000f +#define NV50_AUXCH_CTRL_LEN_SHIFT 0 +#define NV50_AUXCH_STAT(i) ((i) * 0x50 + 0x0000e4e8) +#define NV50_AUXCH_STAT_STATE 0x10000000 +#define NV50_AUXCH_STAT_STATE_NOT_READY 0x00000000 +#define NV50_AUXCH_STAT_STATE_READY 0x10000000 +#define NV50_AUXCH_STAT_REPLY 0x000f0000 +#define NV50_AUXCH_STAT_REPLY_AUX 0x00030000 +#define NV50_AUXCH_STAT_REPLY_AUX_ACK 0x00000000 +#define NV50_AUXCH_STAT_REPLY_AUX_NACK 0x00010000 +#define NV50_AUXCH_STAT_REPLY_AUX_DEFER 0x00020000 +#define NV50_AUXCH_STAT_REPLY_I2C 0x000c0000 +#define NV50_AUXCH_STAT_REPLY_I2C_ACK 0x00000000 +#define NV50_AUXCH_STAT_REPLY_I2C_NACK 0x00040000 +#define NV50_AUXCH_STAT_REPLY_I2C_DEFER 0x00080000 +#define NV50_AUXCH_STAT_COUNT 0x0000001f + +#define NV50_PBUS 0x00088000 +#define NV50_PBUS__LEN 0x1 +#define NV50_PBUS__ESIZE 0x1000 +# define NV50_PBUS_PCI_ID 0x00088000 +# define NV50_PBUS_PCI_ID_VENDOR_ID 0x0000ffff +# define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT 0 +# define NV50_PBUS_PCI_ID_DEVICE_ID 0xffff0000 +# define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT 16 + +#define NV50_PFB 0x00100000 +#define NV50_PFB__LEN 0x1 +#define NV50_PFB__ESIZE 0x1000 + +#define NV50_PEXTDEV 0x00101000 +#define NV50_PEXTDEV__LEN 0x1 +#define NV50_PEXTDEV__ESIZE 0x1000 + +#define NV50_PROM 0x00300000 +#define NV50_PROM__LEN 0x1 +#define NV50_PROM__ESIZE 0x10000 + +#define NV50_PGRAPH 0x00400000 +#define NV50_PGRAPH__LEN 0x1 +#define NV50_PGRAPH__ESIZE 0x10000 + +#define NV50_PDISPLAY 0x00610000 +#define NV50_PDISPLAY_OBJECTS 0x00610010 +#define NV50_PDISPLAY_INTR_0 0x00610020 +#define NV50_PDISPLAY_INTR_1 0x00610024 +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC 0x0000000c +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT 2 +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0 0x00000004 +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1 0x00000008 +#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 +#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 +#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 +#define NV50_PDISPLAY_INTR_EN 0x0061002c +#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c +#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2)) +#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004 +#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008 +#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010 +#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020 +#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040 +#define NV50_PDISPLAY_UNK30_CTRL 0x00610030 +#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 +#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 +#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 +#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 +#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200) +#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010 +#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000 +#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010 +#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204) +#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002 +#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000 +#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002 +#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001 +#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208) +#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c) + +#define NV50_PDISPLAY_CURSOR 0x00610270 +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON 0x00000001 +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 + +#define NV50_PDISPLAY_CTRL_STATE 0x00610300 +#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000 +#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc +#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001 +#define NV50_PDISPLAY_CTRL_VAL 0x00610304 +#define NV50_PDISPLAY_UNK_380 0x00610380 +#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384 +#define NV50_PDISPLAY_UNK_388 0x00610388 +#define NV50_PDISPLAY_UNK_38C 0x0061038c + +#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) +#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) +#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */ 0x00610a18 +#define NV50_PDISPLAY_CRTC_CLUT_MODE 0x00610a24 +#define NV50_PDISPLAY_CRTC_INTERLACE 0x00610a48 +#define NV50_PDISPLAY_CRTC_SCALE_CTRL 0x00610a50 +#define NV50_PDISPLAY_CRTC_CURSOR_CTRL 0x00610a58 +#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */ 0x00610a78 +#define NV50_PDISPLAY_CRTC_UNK0AB8 0x00610ab8 +#define NV50_PDISPLAY_CRTC_DEPTH 0x00610ac8 +#define NV50_PDISPLAY_CRTC_CLOCK 0x00610ad0 +#define NV50_PDISPLAY_CRTC_COLOR_CTRL 0x00610ae0 +#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END 0x00610ae8 +#define NV50_PDISPLAY_CRTC_MODE_UNK1 0x00610af0 +#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL 0x00610af8 +#define NV50_PDISPLAY_CRTC_SYNC_DURATION 0x00610b00 +#define NV50_PDISPLAY_CRTC_MODE_UNK2 0x00610b08 +#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */ 0x00610b10 +#define NV50_PDISPLAY_CRTC_FB_SIZE 0x00610b18 +#define NV50_PDISPLAY_CRTC_FB_PITCH 0x00610b20 +#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR 0x00100000 +#define NV50_PDISPLAY_CRTC_FB_POS 0x00610b28 +#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET 0x00610b38 +#define NV50_PDISPLAY_CRTC_REAL_RES 0x00610b40 +#define NV50_PDISPLAY_CRTC_SCALE_RES1 0x00610b48 +#define NV50_PDISPLAY_CRTC_SCALE_RES2 0x00610b50 + +#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8) +#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) +#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8) +#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8) +#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8) +#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8) + +#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8) +#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8) +#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8) +#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) +#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8) +#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8) + +#define NV50_PDISPLAY_CRTC_CLK 0x00614000 +#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100) +#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED 0x00000600 +#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i) ((i) * 0x800 + 0x614104) +#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i) ((i) * 0x800 + 0x614108) +#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i) ((i) * 0x800 + 0x614200) + +#define NV50_PDISPLAY_DAC_CLK 0x00614000 +#define NV50_PDISPLAY_DAC_CLK_CTRL2(i) ((i) * 0x800 + 0x614280) + +#define NV50_PDISPLAY_SOR_CLK 0x00614000 +#define NV50_PDISPLAY_SOR_CLK_CTRL2(i) ((i) * 0x800 + 0x614300) + +#define NV50_PDISPLAY_VGACRTC(r) ((r) + 0x619400) + +#define NV50_PDISPLAY_DAC 0x0061a000 +#define NV50_PDISPLAY_DAC_DPMS_CTRL(i) (0x0061a004 + (i) * 0x800) +#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF 0x00000001 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF 0x00000004 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED 0x00000010 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF 0x00000040 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_DAC_LOAD_CTRL(i) (0x0061a00c + (i) * 0x800) +#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE 0x00100000 +#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT 0x38000000 +#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE 0x80000000 +#define NV50_PDISPLAY_DAC_CLK_CTRL1(i) (0x0061a010 + (i) * 0x800) +#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED 0x00000600 + +#define NV50_PDISPLAY_SOR 0x0061c000 +#define NV50_PDISPLAY_SOR_DPMS_CTRL(i) (0x0061c004 + (i) * 0x800) +#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON 0x00000001 +#define NV50_PDISPLAY_SOR_CLK_CTRL1(i) (0x0061c008 + (i) * 0x800) +#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED 0x00000600 +#define NV50_PDISPLAY_SOR_DPMS_STATE(i) (0x0061c030 + (i) * 0x800) +#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000 +#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000 +#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000 +#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084 +#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 +#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff +#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 +#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 +#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000 +#define NV50_SOR_DP_CTRL_LANE_1_ENABLED 0x00020000 +#define NV50_SOR_DP_CTRL_LANE_2_ENABLED 0x00040000 +#define NV50_SOR_DP_CTRL_LANE_3_ENABLED 0x00080000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN 0x0f000000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 +#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) + +#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) +#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000) +#define NV50_PDISPLAY_USER_GET(i) ((i) * 0x1000 + 0x00640004) + +#define NV50_PDISPLAY_CURSOR_USER 0x00647000 +#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i) ((i) * 0x1000 + 0x00647080) +#define NV50_PDISPLAY_CURSOR_USER_POS(i) ((i) * 0x1000 + 0x00647084) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c new file mode 100644 index 000000000000..4c7f1e403e80 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -0,0 +1,321 @@ +#include "drmP.h" +#include "nouveau_drv.h" +#include + +#define NV_CTXDMA_PAGE_SHIFT 12 +#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) +#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) + +struct nouveau_sgdma_be { + struct ttm_backend backend; + struct drm_device *dev; + + dma_addr_t *pages; + unsigned nr_pages; + + unsigned pte_start; + bool bound; +}; + +static int +nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, + struct page **pages, struct page *dummy_read_page) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_device *dev = nvbe->dev; + + NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); + + if (nvbe->pages) + return -EINVAL; + + nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL); + if (!nvbe->pages) + return -ENOMEM; + + nvbe->nr_pages = 0; + while (num_pages--) { + nvbe->pages[nvbe->nr_pages] = + pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, + nvbe->pages[nvbe->nr_pages])) { + be->func->clear(be); + return -EFAULT; + } + + nvbe->nr_pages++; + } + + return 0; +} + +static void +nouveau_sgdma_clear(struct ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_device *dev = nvbe->dev; + + NV_DEBUG(nvbe->dev, "\n"); + + if (nvbe && nvbe->pages) { + if (nvbe->bound) + be->func->unbind(be); + + while (nvbe->nr_pages--) { + pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + } + kfree(nvbe->pages); + nvbe->pages = NULL; + nvbe->nr_pages = 0; + } +} + +static inline unsigned +nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + + if (dev_priv->card_type < NV_50) + return pte + 2; + + return pte << 1; +} + +static int +nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_device *dev = nvbe->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + unsigned i, j, pte; + + NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); + + dev_priv->engine.instmem.prepare_access(nvbe->dev, true); + pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); + nvbe->pte_start = pte; + for (i = 0; i < nvbe->nr_pages; i++) { + dma_addr_t dma_offset = nvbe->pages[i]; + uint32_t offset_l = lower_32_bits(dma_offset); + uint32_t offset_h = upper_32_bits(dma_offset); + + for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { + if (dev_priv->card_type < NV_50) + nv_wo32(dev, gpuobj, pte++, offset_l | 3); + else { + nv_wo32(dev, gpuobj, pte++, offset_l | 0x21); + nv_wo32(dev, gpuobj, pte++, offset_h & 0xff); + } + + dma_offset += NV_CTXDMA_PAGE_SIZE; + } + } + dev_priv->engine.instmem.finish_access(nvbe->dev); + + if (dev_priv->card_type == NV_50) { + nv_wr32(dev, 0x100c80, 0x00050001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", + nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + nv_wr32(dev, 0x100c80, 0x00000001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", + nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + } + + nvbe->bound = true; + return 0; +} + +static int +nouveau_sgdma_unbind(struct ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_device *dev = nvbe->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + unsigned i, j, pte; + + NV_DEBUG(dev, "\n"); + + if (!nvbe->bound) + return 0; + + dev_priv->engine.instmem.prepare_access(nvbe->dev, true); + pte = nvbe->pte_start; + for (i = 0; i < nvbe->nr_pages; i++) { + dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; + + for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { + if (dev_priv->card_type < NV_50) + nv_wo32(dev, gpuobj, pte++, dma_offset | 3); + else { + nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21); + nv_wo32(dev, gpuobj, pte++, 0x00000000); + } + + dma_offset += NV_CTXDMA_PAGE_SIZE; + } + } + dev_priv->engine.instmem.finish_access(nvbe->dev); + + nvbe->bound = false; + return 0; +} + +static void +nouveau_sgdma_destroy(struct ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + + if (be) { + NV_DEBUG(nvbe->dev, "\n"); + + if (nvbe) { + if (nvbe->pages) + be->func->clear(be); + kfree(nvbe); + } + } +} + +static struct ttm_backend_func nouveau_sgdma_backend = { + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nouveau_sgdma_bind, + .unbind = nouveau_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +struct ttm_backend * +nouveau_sgdma_init_ttm(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_sgdma_be *nvbe; + + if (!dev_priv->gart_info.sg_ctxdma) + return NULL; + + nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); + if (!nvbe) + return NULL; + + nvbe->dev = dev; + + nvbe->backend.func = &nouveau_sgdma_backend; + + return &nvbe->backend; +} + +int +nouveau_sgdma_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + uint32_t aper_size, obj_size; + int i, ret; + + if (dev_priv->card_type < NV_50) { + aper_size = (64 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; + obj_size += 8; /* ctxdma header */ + } else { + /* 1 entire VM page table */ + aper_size = (512 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; + } + + ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, + NVOBJ_FLAG_ALLOW_NO_REFS | + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj); + if (ret) { + NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); + return ret; + } + + dev_priv->gart_info.sg_dummy_page = + alloc_page(GFP_KERNEL|__GFP_DMA32); + set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); + dev_priv->gart_info.sg_dummy_bus = + pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + dev_priv->engine.instmem.prepare_access(dev, true); + if (dev_priv->card_type < NV_50) { + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and + * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE + * on those cards? */ + nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + nv_wo32(dev, gpuobj, 1, aper_size - 1); + for (i = 2; i < 2 + (aper_size >> 12); i++) { + nv_wo32(dev, gpuobj, i, + dev_priv->gart_info.sg_dummy_bus | 3); + } + } else { + for (i = 0; i < obj_size; i += 8) { + nv_wo32(dev, gpuobj, (i+0)/4, + dev_priv->gart_info.sg_dummy_bus | 0x21); + nv_wo32(dev, gpuobj, (i+4)/4, 0); + } + } + dev_priv->engine.instmem.finish_access(dev); + + dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + dev_priv->gart_info.sg_ctxdma = gpuobj; + return 0; +} + +void +nouveau_sgdma_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->gart_info.sg_dummy_page) { + pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, + NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + unlock_page(dev_priv->gart_info.sg_dummy_page); + __free_page(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_page = NULL; + dev_priv->gart_info.sg_dummy_bus = 0; + } + + nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); +} + +int +nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + int pte; + + pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) { + instmem->prepare_access(dev, false); + *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; + instmem->finish_access(dev); + return 0; + } + + NV_ERROR(dev, "Unimplemented on NV50\n"); + return -EINVAL; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c new file mode 100644 index 000000000000..2ed41d339f6a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -0,0 +1,811 @@ +/* + * Copyright 2005 Stephane Marchesin + * Copyright 2008 Stuart Bennett + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "drm_crtc_helper.h" +#include + +#include "nouveau_drv.h" +#include "nouveau_drm.h" +#include "nv50_display.h" + +static int nouveau_stub_init(struct drm_device *dev) { return 0; } +static void nouveau_stub_takedown(struct drm_device *dev) {} + +static int nouveau_init_engine_ptrs(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + + switch (dev_priv->chipset & 0xf0) { + case 0x00: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.prepare_access = nv04_instmem_prepare_access; + engine->instmem.finish_access = nv04_instmem_finish_access; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv04_fb_init; + engine->fb.takedown = nv04_fb_takedown; + engine->graph.grclass = nv04_graph_grclass; + engine->graph.init = nv04_graph_init; + engine->graph.takedown = nv04_graph_takedown; + engine->graph.fifo_access = nv04_graph_fifo_access; + engine->graph.channel = nv04_graph_channel; + engine->graph.create_context = nv04_graph_create_context; + engine->graph.destroy_context = nv04_graph_destroy_context; + engine->graph.load_context = nv04_graph_load_context; + engine->graph.unload_context = nv04_graph_unload_context; + engine->fifo.channels = 16; + engine->fifo.init = nv04_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.channel_id = nv04_fifo_channel_id; + engine->fifo.create_context = nv04_fifo_create_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv04_fifo_load_context; + engine->fifo.unload_context = nv04_fifo_unload_context; + break; + case 0x10: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.prepare_access = nv04_instmem_prepare_access; + engine->instmem.finish_access = nv04_instmem_finish_access; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.grclass = nv10_graph_grclass; + engine->graph.init = nv10_graph_init; + engine->graph.takedown = nv10_graph_takedown; + engine->graph.channel = nv10_graph_channel; + engine->graph.create_context = nv10_graph_create_context; + engine->graph.destroy_context = nv10_graph_destroy_context; + engine->graph.fifo_access = nv04_graph_fifo_access; + engine->graph.load_context = nv10_graph_load_context; + engine->graph.unload_context = nv10_graph_unload_context; + engine->fifo.channels = 32; + engine->fifo.init = nv10_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.unload_context = nv10_fifo_unload_context; + break; + case 0x20: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.prepare_access = nv04_instmem_prepare_access; + engine->instmem.finish_access = nv04_instmem_finish_access; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.grclass = nv20_graph_grclass; + engine->graph.init = nv20_graph_init; + engine->graph.takedown = nv20_graph_takedown; + engine->graph.channel = nv10_graph_channel; + engine->graph.create_context = nv20_graph_create_context; + engine->graph.destroy_context = nv20_graph_destroy_context; + engine->graph.fifo_access = nv04_graph_fifo_access; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.unload_context = nv20_graph_unload_context; + engine->fifo.channels = 32; + engine->fifo.init = nv10_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.unload_context = nv10_fifo_unload_context; + break; + case 0x30: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.prepare_access = nv04_instmem_prepare_access; + engine->instmem.finish_access = nv04_instmem_finish_access; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; + engine->graph.grclass = nv30_graph_grclass; + engine->graph.init = nv30_graph_init; + engine->graph.takedown = nv20_graph_takedown; + engine->graph.fifo_access = nv04_graph_fifo_access; + engine->graph.channel = nv10_graph_channel; + engine->graph.create_context = nv20_graph_create_context; + engine->graph.destroy_context = nv20_graph_destroy_context; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.unload_context = nv20_graph_unload_context; + engine->fifo.channels = 32; + engine->fifo.init = nv10_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv10_fifo_create_context; + engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.unload_context = nv10_fifo_unload_context; + break; + case 0x40: + case 0x60: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; + engine->instmem.populate = nv04_instmem_populate; + engine->instmem.clear = nv04_instmem_clear; + engine->instmem.bind = nv04_instmem_bind; + engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.prepare_access = nv04_instmem_prepare_access; + engine->instmem.finish_access = nv04_instmem_finish_access; + engine->mc.init = nv40_mc_init; + engine->mc.takedown = nv40_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv40_fb_init; + engine->fb.takedown = nv40_fb_takedown; + engine->graph.grclass = nv40_graph_grclass; + engine->graph.init = nv40_graph_init; + engine->graph.takedown = nv40_graph_takedown; + engine->graph.fifo_access = nv04_graph_fifo_access; + engine->graph.channel = nv40_graph_channel; + engine->graph.create_context = nv40_graph_create_context; + engine->graph.destroy_context = nv40_graph_destroy_context; + engine->graph.load_context = nv40_graph_load_context; + engine->graph.unload_context = nv40_graph_unload_context; + engine->fifo.channels = 32; + engine->fifo.init = nv40_fifo_init; + engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv40_fifo_create_context; + engine->fifo.destroy_context = nv40_fifo_destroy_context; + engine->fifo.load_context = nv40_fifo_load_context; + engine->fifo.unload_context = nv40_fifo_unload_context; + break; + case 0x50: + case 0x80: /* gotta love NVIDIA's consistency.. */ + case 0x90: + case 0xA0: + engine->instmem.init = nv50_instmem_init; + engine->instmem.takedown = nv50_instmem_takedown; + engine->instmem.suspend = nv50_instmem_suspend; + engine->instmem.resume = nv50_instmem_resume; + engine->instmem.populate = nv50_instmem_populate; + engine->instmem.clear = nv50_instmem_clear; + engine->instmem.bind = nv50_instmem_bind; + engine->instmem.unbind = nv50_instmem_unbind; + engine->instmem.prepare_access = nv50_instmem_prepare_access; + engine->instmem.finish_access = nv50_instmem_finish_access; + engine->mc.init = nv50_mc_init; + engine->mc.takedown = nv50_mc_takedown; + engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nouveau_stub_init; + engine->fb.takedown = nouveau_stub_takedown; + engine->graph.grclass = nv50_graph_grclass; + engine->graph.init = nv50_graph_init; + engine->graph.takedown = nv50_graph_takedown; + engine->graph.fifo_access = nv50_graph_fifo_access; + engine->graph.channel = nv50_graph_channel; + engine->graph.create_context = nv50_graph_create_context; + engine->graph.destroy_context = nv50_graph_destroy_context; + engine->graph.load_context = nv50_graph_load_context; + engine->graph.unload_context = nv50_graph_unload_context; + engine->fifo.channels = 128; + engine->fifo.init = nv50_fifo_init; + engine->fifo.takedown = nv50_fifo_takedown; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.channel_id = nv50_fifo_channel_id; + engine->fifo.create_context = nv50_fifo_create_context; + engine->fifo.destroy_context = nv50_fifo_destroy_context; + engine->fifo.load_context = nv50_fifo_load_context; + engine->fifo.unload_context = nv50_fifo_unload_context; + break; + default: + NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); + return 1; + } + + return 0; +} + +static unsigned int +nouveau_vga_set_decode(void *priv, bool state) +{ + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +int +nouveau_card_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine; + struct nouveau_gpuobj *gpuobj; + int ret; + + NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); + + if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) + return 0; + + vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); + + /* Initialise internal driver API hooks */ + ret = nouveau_init_engine_ptrs(dev); + if (ret) + return ret; + engine = &dev_priv->engine; + dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; + + /* Parse BIOS tables / Run init tables if card not POSTed */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = nouveau_bios_init(dev); + if (ret) + return ret; + } + + ret = nouveau_gpuobj_early_init(dev); + if (ret) + return ret; + + /* Initialise instance memory, must happen before mem_init so we + * know exactly how much VRAM we're able to use for "normal" + * purposes. + */ + ret = engine->instmem.init(dev); + if (ret) + return ret; + + /* Setup the memory manager */ + ret = nouveau_mem_init(dev); + if (ret) + return ret; + + ret = nouveau_gpuobj_init(dev); + if (ret) + return ret; + + /* PMC */ + ret = engine->mc.init(dev); + if (ret) + return ret; + + /* PTIMER */ + ret = engine->timer.init(dev); + if (ret) + return ret; + + /* PFB */ + ret = engine->fb.init(dev); + if (ret) + return ret; + + /* PGRAPH */ + ret = engine->graph.init(dev); + if (ret) + return ret; + + /* PFIFO */ + ret = engine->fifo.init(dev); + if (ret) + return ret; + + /* this call irq_preinstall, register irq handler and + * call irq_postinstall + */ + ret = drm_irq_install(dev); + if (ret) + return ret; + + ret = drm_vblank_init(dev, 0); + if (ret) + return ret; + + /* what about PVIDEO/PCRTC/PRAMDAC etc? */ + + ret = nouveau_channel_alloc(dev, &dev_priv->channel, + (struct drm_file *)-2, + NvDmaFB, NvDmaTT); + if (ret) + return ret; + + gpuobj = NULL; + ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, + 0, nouveau_mem_fb_amount(dev), + NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, + &gpuobj); + if (ret) + return ret; + + ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM, + gpuobj, NULL); + if (ret) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + gpuobj = NULL; + ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, + dev_priv->gart_info.aper_size, + NV_DMA_ACCESS_RW, &gpuobj, NULL); + if (ret) + return ret; + + ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART, + gpuobj, NULL); + if (ret) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (dev_priv->card_type >= NV_50) { + ret = nv50_display_create(dev); + if (ret) + return ret; + } else { + ret = nv04_display_create(dev); + if (ret) + return ret; + } + } + + ret = nouveau_backlight_init(dev); + if (ret) + NV_ERROR(dev, "Error %d registering backlight\n", ret); + + dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_helper_initial_config(dev); + + return 0; +} + +static void nouveau_card_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + + NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); + + if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { + nouveau_backlight_exit(dev); + + if (dev_priv->channel) { + nouveau_channel_free(dev_priv->channel); + dev_priv->channel = NULL; + } + + engine->fifo.takedown(dev); + engine->graph.takedown(dev); + engine->fb.takedown(dev); + engine->timer.takedown(dev); + engine->mc.takedown(dev); + + mutex_lock(&dev->struct_mutex); + ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); + mutex_unlock(&dev->struct_mutex); + nouveau_sgdma_takedown(dev); + + nouveau_gpuobj_takedown(dev); + nouveau_mem_close(dev); + engine->instmem.takedown(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_irq_uninstall(dev); + + nouveau_gpuobj_late_takedown(dev); + nouveau_bios_takedown(dev); + + vga_client_register(dev->pdev, NULL, NULL, NULL); + + dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; + } +} + +/* here a client dies, release the stuff that was allocated for its + * file_priv */ +void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) +{ + nouveau_channel_cleanup(dev, file_priv); +} + +/* first module load, setup the mmio/fb mapping */ +/* KMS: we need mmio at load time, not when the first drm client opens. */ +int nouveau_firstopen(struct drm_device *dev) +{ + return 0; +} + +/* if we have an OF card, copy vbios to RAMIN */ +static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev) +{ +#if defined(__powerpc__) + int size, i; + const uint32_t *bios; + struct device_node *dn = pci_device_to_OF_node(dev->pdev); + if (!dn) { + NV_INFO(dev, "Unable to get the OF node\n"); + return; + } + + bios = of_get_property(dn, "NVDA,BMP", &size); + if (bios) { + for (i = 0; i < size; i += 4) + nv_wi32(dev, i, bios[i/4]); + NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size); + } else { + NV_INFO(dev, "Unable to get the OF bios\n"); + } +#endif +} + +int nouveau_load(struct drm_device *dev, unsigned long flags) +{ + struct drm_nouveau_private *dev_priv; + uint32_t reg0; + resource_size_t mmio_start_offs; + + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (!dev_priv) + return -ENOMEM; + dev->dev_private = dev_priv; + dev_priv->dev = dev; + + dev_priv->flags = flags & NOUVEAU_FLAGS; + dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; + + NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", + dev->pci_vendor, dev->pci_device, dev->pdev->class); + + dev_priv->acpi_dsm = nouveau_dsm_probe(dev); + + if (dev_priv->acpi_dsm) + nouveau_hybrid_setup(dev); + + dev_priv->wq = create_workqueue("nouveau"); + if (!dev_priv->wq) + return -EINVAL; + + /* resource 0 is mmio regs */ + /* resource 1 is linear FB */ + /* resource 2 is RAMIN (mmio regs + 0x1000000) */ + /* resource 6 is bios */ + + /* map the mmio regs */ + mmio_start_offs = pci_resource_start(dev->pdev, 0); + dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000); + if (!dev_priv->mmio) { + NV_ERROR(dev, "Unable to initialize the mmio mapping. " + "Please report your setup to " DRIVER_EMAIL "\n"); + return -EINVAL; + } + NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", + (unsigned long long)mmio_start_offs); + +#ifdef __BIG_ENDIAN + /* Put the card in BE mode if it's not */ + if (nv_rd32(dev, NV03_PMC_BOOT_1)) + nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001); + + DRM_MEMORYBARRIER(); +#endif + + /* Time to determine the card architecture */ + reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); + + /* We're dealing with >=NV10 */ + if ((reg0 & 0x0f000000) > 0) { + /* Bit 27-20 contain the architecture in hex */ + dev_priv->chipset = (reg0 & 0xff00000) >> 20; + /* NV04 or NV05 */ + } else if ((reg0 & 0xff00fff0) == 0x20004000) { + dev_priv->chipset = 0x04; + } else + dev_priv->chipset = 0xff; + + switch (dev_priv->chipset & 0xf0) { + case 0x00: + case 0x10: + case 0x20: + case 0x30: + dev_priv->card_type = dev_priv->chipset & 0xf0; + break; + case 0x40: + case 0x60: + dev_priv->card_type = NV_40; + break; + case 0x50: + case 0x80: + case 0x90: + case 0xa0: + dev_priv->card_type = NV_50; + break; + default: + NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0); + return -EINVAL; + } + + NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", + dev_priv->card_type, reg0); + + /* map larger RAMIN aperture on NV40 cards */ + dev_priv->ramin = NULL; + if (dev_priv->card_type >= NV_40) { + int ramin_bar = 2; + if (pci_resource_len(dev->pdev, ramin_bar) == 0) + ramin_bar = 3; + + dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); + dev_priv->ramin = ioremap( + pci_resource_start(dev->pdev, ramin_bar), + dev_priv->ramin_size); + if (!dev_priv->ramin) { + NV_ERROR(dev, "Failed to init RAMIN mapping, " + "limited instance memory available\n"); + } + } + + /* On older cards (or if the above failed), create a map covering + * the BAR0 PRAMIN aperture */ + if (!dev_priv->ramin) { + dev_priv->ramin_size = 1 * 1024 * 1024; + dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, + dev_priv->ramin_size); + if (!dev_priv->ramin) { + NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); + return -ENOMEM; + } + } + + nouveau_OF_copy_vbios_to_ramin(dev); + + /* Special flags */ + if (dev->pci_device == 0x01a0) + dev_priv->flags |= NV_NFORCE; + else if (dev->pci_device == 0x01f0) + dev_priv->flags |= NV_NFORCE2; + + /* For kernel modesetting, init card now and bring up fbcon */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + int ret = nouveau_card_init(dev); + if (ret) + return ret; + } + + return 0; +} + +static void nouveau_close(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* In the case of an error dev_priv may not be be allocated yet */ + if (dev_priv && dev_priv->card_type) + nouveau_card_takedown(dev); +} + +/* KMS: we need mmio at load time, not when the first drm client opens. */ +void nouveau_lastclose(struct drm_device *dev) +{ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + nouveau_close(dev); +} + +int nouveau_unload(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (dev_priv->card_type >= NV_50) + nv50_display_destroy(dev); + else + nv04_display_destroy(dev); + nouveau_close(dev); + } + + iounmap(dev_priv->mmio); + iounmap(dev_priv->ramin); + + kfree(dev_priv); + dev->dev_private = NULL; + return 0; +} + +int +nouveau_ioctl_card_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return nouveau_card_init(dev); +} + +int nouveau_ioctl_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_getparam *getparam = data; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + switch (getparam->param) { + case NOUVEAU_GETPARAM_CHIPSET_ID: + getparam->value = dev_priv->chipset; + break; + case NOUVEAU_GETPARAM_PCI_VENDOR: + getparam->value = dev->pci_vendor; + break; + case NOUVEAU_GETPARAM_PCI_DEVICE: + getparam->value = dev->pci_device; + break; + case NOUVEAU_GETPARAM_BUS_TYPE: + if (drm_device_is_agp(dev)) + getparam->value = NV_AGP; + else if (drm_device_is_pcie(dev)) + getparam->value = NV_PCIE; + else + getparam->value = NV_PCI; + break; + case NOUVEAU_GETPARAM_FB_PHYSICAL: + getparam->value = dev_priv->fb_phys; + break; + case NOUVEAU_GETPARAM_AGP_PHYSICAL: + getparam->value = dev_priv->gart_info.aper_base; + break; + case NOUVEAU_GETPARAM_PCI_PHYSICAL: + if (dev->sg) { + getparam->value = (unsigned long)dev->sg->virtual; + } else { + NV_ERROR(dev, "Requested PCIGART address, " + "while no PCIGART was created\n"); + return -EINVAL; + } + break; + case NOUVEAU_GETPARAM_FB_SIZE: + getparam->value = dev_priv->fb_available_size; + break; + case NOUVEAU_GETPARAM_AGP_SIZE: + getparam->value = dev_priv->gart_info.aper_size; + break; + case NOUVEAU_GETPARAM_VM_VRAM_BASE: + getparam->value = dev_priv->vm_vram_base; + break; + default: + NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); + return -EINVAL; + } + + return 0; +} + +int +nouveau_ioctl_setparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_setparam *setparam = data; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + + switch (setparam->param) { + default: + NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); + return -EINVAL; + } + + return 0; +} + +/* Wait until (value(reg) & mask) == val, up until timeout has hit */ +bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + uint64_t start = ptimer->read(dev); + + do { + if ((nv_rd32(dev, reg) & mask) == val) + return true; + } while (ptimer->read(dev) - start < timeout); + + return false; +} + +/* Waits for PGRAPH to go completely idle */ +bool nouveau_wait_for_idle(struct drm_device *dev) +{ + if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { + NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", + nv_rd32(dev, NV04_PGRAPH_STATUS)); + return false; + } + + return true; +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c new file mode 100644 index 000000000000..187eb84e4da5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +#include "nouveau_drv.h" + +static struct vm_operations_struct nouveau_ttm_vm_ops; +static const struct vm_operations_struct *ttm_vm_ops; + +static int +nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo = vma->vm_private_data; + int ret; + + if (unlikely(bo == NULL)) + return VM_FAULT_NOPAGE; + + ret = ttm_vm_ops->fault(vma, vmf); + return ret; +} + +int +nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_nouveau_private *dev_priv = + file_priv->minor->dev->dev_private; + int ret; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) + return drm_mmap(filp, vma); + + ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(ttm_vm_ops == NULL)) { + ttm_vm_ops = vma->vm_ops; + nouveau_ttm_vm_ops = *ttm_vm_ops; + nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault; + } + + vma->vm_ops = &nouveau_ttm_vm_ops; + return 0; +} + +static int +nouveau_ttm_mem_global_init(struct ttm_global_reference *ref) +{ + return ttm_mem_global_init(ref->object); +} + +static void +nouveau_ttm_mem_global_release(struct ttm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +int +nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv) +{ + struct ttm_global_reference *global_ref; + int ret; + + global_ref = &dev_priv->ttm.mem_global_ref; + global_ref->global_type = TTM_GLOBAL_TTM_MEM; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &nouveau_ttm_mem_global_init; + global_ref->release = &nouveau_ttm_mem_global_release; + + ret = ttm_global_item_ref(global_ref); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed setting up TTM memory accounting\n"); + dev_priv->ttm.mem_global_ref.release = NULL; + return ret; + } + + dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object; + global_ref = &dev_priv->ttm.bo_global_ref.ref; + global_ref->global_type = TTM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_bo_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + + ret = ttm_global_item_ref(global_ref); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed setting up TTM BO subsystem\n"); + ttm_global_item_unref(&dev_priv->ttm.mem_global_ref); + dev_priv->ttm.mem_global_ref.release = NULL; + return ret; + } + + return 0; +} + +void +nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv) +{ + if (dev_priv->ttm.mem_global_ref.release == NULL) + return; + + ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref); + ttm_global_item_unref(&dev_priv->ttm.mem_global_ref); + dev_priv->ttm.mem_global_ref.release = NULL; +} + diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c new file mode 100644 index 000000000000..b91363606055 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -0,0 +1,1002 @@ +/* + * Copyright 1993-2003 NVIDIA, Corporation + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" + +#include "nouveau_drv.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nouveau_fb.h" +#include "nouveau_hw.h" +#include "nvreg.h" + +static int +nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + +static void +crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index) +{ + NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index, + crtcstate->CRTC[index]); +} + +static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + + regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level; + if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) { + regp->CRTC[NV_CIO_CRE_CSB] = 0x80; + regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B); + } + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB); +} + +static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + + nv_crtc->sharpness = level; + if (level < 0) /* blur is in hw range 0x3f -> 0x20 */ + level += 0x40; + regp->ramdac_634 = level; + NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634); +} + +#define PLLSEL_VPLL1_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \ + | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2) +#define PLLSEL_VPLL2_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2) +#define PLLSEL_TV_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2) + +/* NV4x 0x40.. pll notes: + * gpu pll: 0x4000 + 0x4004 + * ?gpu? pll: 0x4008 + 0x400c + * vpll1: 0x4010 + 0x4014 + * vpll2: 0x4018 + 0x401c + * mpll: 0x4020 + 0x4024 + * mpll: 0x4038 + 0x403c + * + * the first register of each pair has some unknown details: + * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?) + * bits 20-23: (mpll) something to do with post divider? + * bits 28-31: related to single stage mode? (bit 8/12) + */ + +static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv04_mode_state *state = &dev_priv->mode_reg; + struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; + struct nouveau_pll_vals *pv = ®p->pllvals; + struct pll_lims pll_lim; + + if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim)) + return; + + /* NM2 == 0 is used to determine single stage mode on two stage plls */ + pv->NM2 = 0; + + /* for newer nv4x the blob uses only the first stage of the vpll below a + * certain clock. for a certain nv4b this is 150MHz. since the max + * output frequency of the first stage for this card is 300MHz, it is + * assumed the threshold is given by vco1 maxfreq/2 + */ + /* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6, + * not 8, others unknown), the blob always uses both plls. no problem + * has yet been observed in allowing the use a single stage pll on all + * nv43 however. the behaviour of single stage use is untested on nv40 + */ + if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2)) + memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); + + if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv)) + return; + + state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; + + /* The blob uses this always, so let's do the same */ + if (dev_priv->card_type == NV_40) + state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; + /* again nv40 and some nv43 act more like nv3x as described above */ + if (dev_priv->chipset < 0x41) + state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | + NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; + state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; + + if (pv->NM2) + NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n", + pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P); + else + NV_TRACE(dev, "vpll: n %d m %d log2p %d\n", + pv->N1, pv->M1, pv->log2P); + + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); +} + +static void +nv_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + unsigned char seq1 = 0, crtc17 = 0; + unsigned char crtc1A; + + NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode, + nv_crtc->index); + + if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */ + return; + + nv_crtc->last_dpms = mode; + + if (nv_two_heads(dev)) + NVSetOwner(dev, nv_crtc->index); + + /* nv4ref indicates these two RPC1 bits inhibit h/v sync */ + crtc1A = NVReadVgaCrtc(dev, nv_crtc->index, + NV_CIO_CRE_RPC1_INDEX) & ~0xC0; + switch (mode) { + case DRM_MODE_DPMS_STANDBY: + /* Screen: Off; HSync: Off, VSync: On -- Not Supported */ + seq1 = 0x20; + crtc17 = 0x80; + crtc1A |= 0x80; + break; + case DRM_MODE_DPMS_SUSPEND: + /* Screen: Off; HSync: On, VSync: Off -- Not Supported */ + seq1 = 0x20; + crtc17 = 0x80; + crtc1A |= 0x40; + break; + case DRM_MODE_DPMS_OFF: + /* Screen: Off; HSync: Off, VSync: Off */ + seq1 = 0x20; + crtc17 = 0x00; + crtc1A |= 0xC0; + break; + case DRM_MODE_DPMS_ON: + default: + /* Screen: On; HSync: On, VSync: On */ + seq1 = 0x00; + crtc17 = 0x80; + break; + } + + NVVgaSeqReset(dev, nv_crtc->index, true); + /* Each head has it's own sequencer, so we can turn it off when we want */ + seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20); + NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1); + crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80); + mdelay(10); + NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17); + NVVgaSeqReset(dev, nv_crtc->index, false); + + NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); +} + +static bool +nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void +nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + struct drm_framebuffer *fb = crtc->fb; + + /* Calculate our timings */ + int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; + int horizStart = (mode->crtc_hsync_start >> 3) - 1; + int horizEnd = (mode->crtc_hsync_end >> 3) - 1; + int horizTotal = (mode->crtc_htotal >> 3) - 5; + int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; + int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; + int vertDisplay = mode->crtc_vdisplay - 1; + int vertStart = mode->crtc_vsync_start - 1; + int vertEnd = mode->crtc_vsync_end - 1; + int vertTotal = mode->crtc_vtotal - 2; + int vertBlankStart = mode->crtc_vdisplay - 1; + int vertBlankEnd = mode->crtc_vtotal - 1; + + struct drm_encoder *encoder; + bool fp_output = false; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (encoder->crtc == crtc && + (nv_encoder->dcb->type == OUTPUT_LVDS || + nv_encoder->dcb->type == OUTPUT_TMDS)) + fp_output = true; + } + + if (fp_output) { + vertStart = vertTotal - 3; + vertEnd = vertTotal - 2; + vertBlankStart = vertStart; + horizStart = horizTotal - 5; + horizEnd = horizTotal - 2; + horizBlankEnd = horizTotal + 4; +#if 0 + if (dev->overlayAdaptor && dev_priv->card_type >= NV_10) + /* This reportedly works around some video overlay bandwidth problems */ + horizTotal += 2; +#endif + } + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vertTotal |= 1; + +#if 0 + ErrorF("horizDisplay: 0x%X \n", horizDisplay); + ErrorF("horizStart: 0x%X \n", horizStart); + ErrorF("horizEnd: 0x%X \n", horizEnd); + ErrorF("horizTotal: 0x%X \n", horizTotal); + ErrorF("horizBlankStart: 0x%X \n", horizBlankStart); + ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd); + ErrorF("vertDisplay: 0x%X \n", vertDisplay); + ErrorF("vertStart: 0x%X \n", vertStart); + ErrorF("vertEnd: 0x%X \n", vertEnd); + ErrorF("vertTotal: 0x%X \n", vertTotal); + ErrorF("vertBlankStart: 0x%X \n", vertBlankStart); + ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd); +#endif + + /* + * compute correct Hsync & Vsync polarity + */ + if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)) + && (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) { + + regp->MiscOutReg = 0x23; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + regp->MiscOutReg |= 0x40; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + regp->MiscOutReg |= 0x80; + } else { + int vdisplay = mode->vdisplay; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + vdisplay *= 2; + if (mode->vscan > 1) + vdisplay *= mode->vscan; + if (vdisplay < 400) + regp->MiscOutReg = 0xA3; /* +hsync -vsync */ + else if (vdisplay < 480) + regp->MiscOutReg = 0x63; /* -hsync +vsync */ + else if (vdisplay < 768) + regp->MiscOutReg = 0xE3; /* -hsync -vsync */ + else + regp->MiscOutReg = 0x23; /* +hsync +vsync */ + } + + regp->MiscOutReg |= (mode->clock_index & 0x03) << 2; + + /* + * Time Sequencer + */ + regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00; + /* 0x20 disables the sequencer */ + if (mode->flags & DRM_MODE_FLAG_CLKDIV2) + regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29; + else + regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21; + regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F; + regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00; + regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E; + + /* + * CRTC + */ + regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal; + regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay; + regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart; + regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) | + XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0); + regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart; + regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) | + XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0); + regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal; + regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) | + XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) | + XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) | + (1 << 4) | + XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) | + XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) | + XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) | + XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8); + regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) | + 1 << 6 | + XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9); + regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart; + regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0); + regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay; + /* framebuffer can be larger than crtc scanout area. */ + regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8; + regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart; + regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd; + regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43; + regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff; + + /* + * Some extended CRTC registers (they are not saved with the rest of the vga regs). + */ + + /* framebuffer can be larger than crtc scanout area. */ + regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); + regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ? + MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00; + regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) | + XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) | + XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) | + XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) | + XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10); + regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) | + XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) | + XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) | + XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8); + regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) | + XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) | + XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) | + XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + horizTotal = (horizTotal >> 1) & ~1; + regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal; + regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8); + } else + regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */ + + /* + * Graphics Display Controller + */ + regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */ + regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */ + regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F; + regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF; + + regp->Attribute[0] = 0x00; /* standard colormap translation */ + regp->Attribute[1] = 0x01; + regp->Attribute[2] = 0x02; + regp->Attribute[3] = 0x03; + regp->Attribute[4] = 0x04; + regp->Attribute[5] = 0x05; + regp->Attribute[6] = 0x06; + regp->Attribute[7] = 0x07; + regp->Attribute[8] = 0x08; + regp->Attribute[9] = 0x09; + regp->Attribute[10] = 0x0A; + regp->Attribute[11] = 0x0B; + regp->Attribute[12] = 0x0C; + regp->Attribute[13] = 0x0D; + regp->Attribute[14] = 0x0E; + regp->Attribute[15] = 0x0F; + regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */ + /* Non-vga */ + regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00; + regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */ + regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00; + regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00; +} + +/** + * Sets up registers for the given mode/adjusted_mode pair. + * + * The clocks, CRTCs and outputs attached to this CRTC must be off. + * + * This shouldn't enable any clocks, CRTCs, or outputs, but they should + * be easily turned on/off after this. + */ +static void +nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index]; + struct drm_encoder *encoder; + bool lvds_output = false, tmds_output = false, tv_output = false, + off_chip_digital = false; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + bool digital = false; + + if (encoder->crtc != crtc) + continue; + + if (nv_encoder->dcb->type == OUTPUT_LVDS) + digital = lvds_output = true; + if (nv_encoder->dcb->type == OUTPUT_TV) + tv_output = true; + if (nv_encoder->dcb->type == OUTPUT_TMDS) + digital = tmds_output = true; + if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital) + off_chip_digital = true; + } + + /* Registers not directly related to the (s)vga mode */ + + /* What is the meaning of this register? */ + /* A few popular values are 0x18, 0x1c, 0x38, 0x3c */ + regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5); + + regp->crtc_eng_ctrl = 0; + /* Except for rare conditions I2C is enabled on the primary crtc */ + if (nv_crtc->index == 0) + regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C; +#if 0 + /* Set overlay to desired crtc. */ + if (dev->overlayAdaptor) { + NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev); + if (pPriv->overlayCRTC == nv_crtc->index) + regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY; + } +#endif + + /* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */ + regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | + NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | + NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; + if (dev_priv->chipset >= 0x11) + regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; + + /* Unblock some timings */ + regp->CRTC[NV_CIO_CRE_53] = 0; + regp->CRTC[NV_CIO_CRE_54] = 0; + + /* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */ + if (lvds_output) + regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11; + else if (tmds_output) + regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88; + else + regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22; + + /* These values seem to vary */ + /* This register seems to be used by the bios to make certain decisions on some G70 cards? */ + regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX]; + + nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation); + + /* probably a scratch reg, but kept for cargo-cult purposes: + * bit0: crtc0?, head A + * bit6: lvds, head A + * bit7: (only in X), head A + */ + if (nv_crtc->index == 0) + regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80; + + /* The blob seems to take the current value from crtc 0, add 4 to that + * and reuse the old value for crtc 1 */ + regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY]; + if (!nv_crtc->index) + regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4; + + /* the blob sometimes sets |= 0x10 (which is the same as setting |= + * 1 << 30 on 0x60.830), for no apparent reason */ + regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; + + regp->crtc_830 = mode->crtc_vdisplay - 3; + regp->crtc_834 = mode->crtc_vdisplay - 1; + + if (dev_priv->card_type == NV_40) + /* This is what the blob does */ + regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); + + if (dev_priv->card_type >= NV_30) + regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); + + regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC; + + /* Some misc regs */ + if (dev_priv->card_type == NV_40) { + regp->CRTC[NV_CIO_CRE_85] = 0xFF; + regp->CRTC[NV_CIO_CRE_86] = 0x1; + } + + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8; + /* Enable slaved mode (called MODE_TV in nv4ref.h) */ + if (lvds_output || tmds_output || tv_output) + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7); + + /* Generic PRAMDAC regs */ + + if (dev_priv->card_type >= NV_10) + /* Only bit that bios and blob set. */ + regp->nv10_cursync = (1 << 25); + + regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS | + NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL | + NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; + if (crtc->fb->depth == 16) + regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; + if (dev_priv->chipset >= 0x11) + regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; + + regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ + regp->tv_setup = 0; + + nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness); + + /* Some values the blob sets */ + regp->ramdac_8c0 = 0x100; + regp->ramdac_a20 = 0x0; + regp->ramdac_a24 = 0xfffff; + regp->ramdac_a34 = 0x1; +} + +/** + * Sets up registers for the given mode/adjusted_mode pair. + * + * The clocks, CRTCs and outputs attached to this CRTC must be off. + * + * This shouldn't enable any clocks, CRTCs, or outputs, but they should + * be easily turned on/off after this. + */ +static int +nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_nouveau_private *dev_priv = dev->dev_private; + + NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index); + drm_mode_debug_printmodeline(adjusted_mode); + + /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ + nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); + + nv_crtc_mode_set_vga(crtc, adjusted_mode); + /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ + if (dev_priv->card_type == NV_40) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk); + nv_crtc_mode_set_regs(crtc, adjusted_mode); + nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); + return 0; +} + +static void nv_crtc_save(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + struct nv04_mode_state *state = &dev_priv->mode_reg; + struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index]; + struct nv04_mode_state *saved = &dev_priv->saved_reg; + struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index]; + + if (nv_two_heads(crtc->dev)) + NVSetOwner(crtc->dev, nv_crtc->index); + + nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved); + + /* init some state to saved value */ + state->sel_clk = saved->sel_clk & ~(0x5 << 16); + crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX]; + state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK); + crtc_state->gpio_ext = crtc_saved->gpio_ext; +} + +static void nv_crtc_restore(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + int head = nv_crtc->index; + uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21]; + + if (nv_two_heads(crtc->dev)) + NVSetOwner(crtc->dev, head); + + nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg); + nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21); + + nv_crtc->last_dpms = NV_DPMS_CLEARED; +} + +static void nv_crtc_prepare(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_crtc_helper_funcs *funcs = crtc->helper_private; + + if (nv_two_heads(dev)) + NVSetOwner(dev, nv_crtc->index); + + funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + NVBlankScreen(dev, nv_crtc->index, true); + + /* Some more preperation. */ + NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); + if (dev_priv->card_type == NV_40) { + uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); + NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); + } +} + +static void nv_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_crtc_helper_funcs *funcs = crtc->helper_private; + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg); + nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL); + +#ifdef __BIG_ENDIAN + /* turn on LFB swapping */ + { + uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR); + tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG); + NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp); + } +#endif + + funcs->dpms(crtc, DRM_MODE_DPMS_ON); +} + +static void nv_crtc_destroy(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + NV_DEBUG(crtc->dev, "\n"); + + if (!nv_crtc) + return; + + drm_crtc_cleanup(crtc); + + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + kfree(nv_crtc); +} + +static void +nv_crtc_gamma_load(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = nv_crtc->base.dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs; + int i; + + rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC; + for (i = 0; i < 256; i++) { + rgbs[i].r = nv_crtc->lut.r[i] >> 8; + rgbs[i].g = nv_crtc->lut.g[i] >> 8; + rgbs[i].b = nv_crtc->lut.b[i] >> 8; + } + + nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg); +} + +static void +nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int i; + + if (size != 256) + return; + + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + /* We need to know the depth before we upload, but it's possible to + * get called before a framebuffer is bound. If this is the case, + * mark the lut values as dirty by setting depth==0, and it'll be + * uploaded on the first mode_set_base() + */ + if (!nv_crtc->base.fb) { + nv_crtc->lut.depth = 0; + return; + } + + nv_crtc_gamma_load(crtc); +} + +static int +nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + struct drm_framebuffer *drm_fb = nv_crtc->base.fb; + struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + int arb_burst, arb_lwm; + int ret; + + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (old_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb); + nouveau_bo_unpin(ofb->nvbo); + } + + nv_crtc->fb.offset = fb->nvbo->bo.offset; + + if (nv_crtc->lut.depth != drm_fb->depth) { + nv_crtc->lut.depth = drm_fb->depth; + nv_crtc_gamma_load(crtc); + } + + /* Update the framebuffer format. */ + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3; + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8; + regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; + if (crtc->fb->depth == 16) + regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX); + NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL, + regp->ramdac_gen_ctrl); + + regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3; + regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = + XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX); + + /* Update the framebuffer location. */ + regp->fb_start = nv_crtc->fb.offset & ~3; + regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); + NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); + + /* Update the arbitration parameters. */ + nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, + &arb_burst, &arb_lwm); + + regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst; + regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); + + if (dev_priv->card_type >= NV_30) { + regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); + } + + return 0; +} + +static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, + struct nouveau_bo *dst) +{ + int width = nv_cursor_width(dev); + uint32_t pixel; + int i, j; + + for (i = 0; i < width; i++) { + for (j = 0; j < width; j++) { + pixel = nouveau_bo_rd32(src, i*64 + j); + + nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16 + | (pixel & 0xf80000) >> 9 + | (pixel & 0xf800) >> 6 + | (pixel & 0xf8) >> 3); + } + } +} + +static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, + struct nouveau_bo *dst) +{ + uint32_t pixel; + int alpha, i; + + /* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha + * cursors (though NPM in combination with fp dithering may not work on + * nv11, from "nv" driver history) + * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the + * blob uses, however we get given PM cursors so we use PM mode + */ + for (i = 0; i < 64 * 64; i++) { + pixel = nouveau_bo_rd32(src, i); + + /* hw gets unhappy if alpha <= rgb values. for a PM image "less + * than" shouldn't happen; fix "equal to" case by adding one to + * alpha channel (slightly inaccurate, but so is attempting to + * get back to NPM images, due to limits of integer precision) + */ + alpha = pixel >> 24; + if (alpha > 0 && alpha < 255) + pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24); + +#ifdef __BIG_ENDIAN + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset == 0x11) { + pixel = ((pixel & 0x000000ff) << 24) | + ((pixel & 0x0000ff00) << 8) | + ((pixel & 0x00ff0000) >> 8) | + ((pixel & 0xff000000) >> 24); + } + } +#endif + + nouveau_bo_wr32(dst, i, pixel); + } +} + +static int +nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, uint32_t height) +{ + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + struct drm_device *dev = dev_priv->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_bo *cursor = NULL; + struct drm_gem_object *gem; + int ret = 0; + + if (width != 64 || height != 64) + return -EINVAL; + + if (!buffer_handle) { + nv_crtc->cursor.hide(nv_crtc, true); + return 0; + } + + gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); + if (!gem) + return -EINVAL; + cursor = nouveau_gem_object(gem); + + ret = nouveau_bo_map(cursor); + if (ret) + goto out; + + if (dev_priv->chipset >= 0x11) + nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); + else + nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); + + nouveau_bo_unmap(cursor); + nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset; + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); + nv_crtc->cursor.show(nv_crtc, true); +out: + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +static int +nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->cursor.set_pos(nv_crtc, x, y); + return 0; +} + +static const struct drm_crtc_funcs nv04_crtc_funcs = { + .save = nv_crtc_save, + .restore = nv_crtc_restore, + .cursor_set = nv04_crtc_cursor_set, + .cursor_move = nv04_crtc_cursor_move, + .gamma_set = nv_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = nv_crtc_destroy, +}; + +static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { + .dpms = nv_crtc_dpms, + .prepare = nv_crtc_prepare, + .commit = nv_crtc_commit, + .mode_fixup = nv_crtc_mode_fixup, + .mode_set = nv_crtc_mode_set, + .mode_set_base = nv04_crtc_mode_set_base, + .load_lut = nv_crtc_gamma_load, +}; + +int +nv04_crtc_create(struct drm_device *dev, int crtc_num) +{ + struct nouveau_crtc *nv_crtc; + int ret, i; + + nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); + if (!nv_crtc) + return -ENOMEM; + + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = i << 8; + nv_crtc->lut.g[i] = i << 8; + nv_crtc->lut.b[i] = i << 8; + } + nv_crtc->lut.depth = 0; + + nv_crtc->index = crtc_num; + nv_crtc->last_dpms = NV_DPMS_CLEARED; + + drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs); + drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); + drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); + + ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, false, true, &nv_crtc->cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + } + + nv04_cursor_init(nv_crtc); + + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c new file mode 100644 index 000000000000..89a91b9d8b25 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_cursor.c @@ -0,0 +1,70 @@ +#include "drmP.h" +#include "drm_mode.h" +#include "nouveau_reg.h" +#include "nouveau_drv.h" +#include "nouveau_crtc.h" +#include "nouveau_hw.h" + +static void +nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update) +{ + nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true); +} + +static void +nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) +{ + nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false); +} + +static void +nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ + NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, + NV_PRAMDAC_CU_START_POS, + XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | + XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X)); +} + +static void +crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index) +{ + NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index, + crtcstate->CRTC[index]); +} + +static void +nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + struct drm_crtc *crtc = &nv_crtc->base; + + regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] = + MASK(NV_CIO_CRE_HCUR_ASI) | + XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR); + regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] = + XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR); + if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) + regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |= + MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL); + regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24; + + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); + if (dev_priv->card_type == NV_40) + nv_fix_nv40_hw_cursor(dev, nv_crtc->index); +} + +int +nv04_cursor_init(struct nouveau_crtc *crtc) +{ + crtc->cursor.set_offset = nv04_cursor_set_offset; + crtc->cursor.set_pos = nv04_cursor_set_pos; + crtc->cursor.hide = nv04_cursor_hide; + crtc->cursor.show = nv04_cursor_show; + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c new file mode 100644 index 000000000000..a5fa51714e87 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -0,0 +1,528 @@ +/* + * Copyright 2003 NVIDIA, Corporation + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" + +#include "nouveau_drv.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nouveau_hw.h" +#include "nvreg.h" + +int nv04_dac_output_offset(struct drm_encoder *encoder) +{ + struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; + int offset = 0; + + if (dcb->or & (8 | OUTPUT_C)) + offset += 0x68; + if (dcb->or & (8 | OUTPUT_B)) + offset += 0x2000; + + return offset; +} + +/* + * arbitrary limit to number of sense oscillations tolerated in one sample + * period (observed to be at least 13 in "nvidia") + */ +#define MAX_HBLANK_OSC 20 + +/* + * arbitrary limit to number of conflicting sample pairs to tolerate at a + * voltage step (observed to be at least 5 in "nvidia") + */ +#define MAX_SAMPLE_PAIRS 10 + +static int sample_load_twice(struct drm_device *dev, bool sense[2]) +{ + int i; + + for (i = 0; i < 2; i++) { + bool sense_a, sense_b, sense_b_prime; + int j = 0; + + /* + * wait for bit 0 clear -- out of hblank -- (say reg value 0x4), + * then wait for transition 0x4->0x5->0x4: enter hblank, leave + * hblank again + * use a 10ms timeout (guards against crtc being inactive, in + * which case blank state would never change) + */ + if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) + return -EBUSY; + if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000001)) + return -EBUSY; + if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) + return -EBUSY; + + udelay(100); + /* when level triggers, sense is _LO_ */ + sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; + + /* take another reading until it agrees with sense_a... */ + do { + udelay(100); + sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; + if (sense_a != sense_b) { + sense_b_prime = + nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; + if (sense_b == sense_b_prime) { + /* ... unless two consecutive subsequent + * samples agree; sense_a is replaced */ + sense_a = sense_b; + /* force mis-match so we loop */ + sense_b = !sense_a; + } + } + } while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC); + + if (j == MAX_HBLANK_OSC) + /* with so much oscillation, default to sense:LO */ + sense[i] = false; + else + sense[i] = sense_a; + } + + return 0; +} + +static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + uint8_t saved_seq1, saved_pi, saved_rpc1; + uint8_t saved_palette0[3], saved_palette_mask; + uint32_t saved_rtest_ctrl, saved_rgen_ctrl; + int i; + uint8_t blue; + bool sense = true; + + /* + * for this detection to work, there needs to be a mode set up on the + * CRTC. this is presumed to be the case + */ + + if (nv_two_heads(dev)) + /* only implemented for head A for now */ + NVSetOwner(dev, 0); + + saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); + + saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, + saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); + + msleep(10); + + saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, + saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT))); + saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); + + nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); + for (i = 0; i < 3; i++) + saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA); + saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK); + nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0); + + saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, + (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS | + NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) | + NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON); + + blue = 8; /* start of test range */ + + do { + bool sense_pair[2]; + + nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); + nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0); + nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0); + /* testing blue won't find monochrome monitors. I don't care */ + nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue); + + i = 0; + /* take sample pairs until both samples in the pair agree */ + do { + if (sample_load_twice(dev, sense_pair)) + goto out; + } while ((sense_pair[0] != sense_pair[1]) && + ++i < MAX_SAMPLE_PAIRS); + + if (i == MAX_SAMPLE_PAIRS) + /* too much oscillation defaults to LO */ + sense = false; + else + sense = sense_pair[0]; + + /* + * if sense goes LO before blue ramps to 0x18, monitor is not connected. + * ergo, if blue gets to 0x18, monitor must be connected + */ + } while (++blue < 0x18 && sense); + +out: + nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); + nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); + for (i = 0; i < 3; i++) + nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); + + if (blue == 0x18) { + NV_TRACE(dev, "Load detected on head A\n"); + return connector_status_connected; + } + + return connector_status_disconnected; +} + +enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; + uint32_t testval, regoffset = nv04_dac_output_offset(encoder); + uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, + saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; + int head, present = 0; + +#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) + if (dcb->type == OUTPUT_TV) { + testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); + + if (dev_priv->vbios->tvdactestval) + testval = dev_priv->vbios->tvdactestval; + } else { + testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ + + if (dev_priv->vbios->dactestval) + testval = dev_priv->vbios->dactestval; + } + + saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, + saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); + + saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2); + + nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); + if (regoffset == 0x68) { + saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4); + nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); + } + + saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1); + saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0); + + nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV); + nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV); + + msleep(4); + + saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); + head = (saved_routput & 0x100) >> 8; +#if 0 + /* if there's a spare crtc, using it will minimise flicker for the case + * where the in-use crtc is in use by an off-chip tmds encoder */ + if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled) + head ^= 1; +#endif + /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ + routput = (saved_routput & 0xfffffece) | head << 8; + + if (dev_priv->card_type >= NV_40) { + if (dcb->type == OUTPUT_TV) + routput |= 0x1a << 16; + else + routput &= ~(0x1a << 16); + } + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput); + msleep(1); + + temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, + NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval); + temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, + temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); + msleep(5); + + temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + + if (dcb->type == OUTPUT_TV) + present = (nv17_tv_detect(encoder, connector, temp) + == connector_status_connected); + else + present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI; + + temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, + temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0); + + /* bios does something more complex for restoring, but I think this is good enough */ + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); + if (regoffset == 0x68) + nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); + nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); + + nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); + nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); + + if (present) { + NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); + return connector_status_connected; + } + + return connector_status_disconnected; +} + + +static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void nv04_dac_prepare(struct drm_encoder *encoder) +{ + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int head = nouveau_crtc(encoder->crtc)->index; + struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_disable(dev, head); + + /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) + * at LCD__INDEX which we don't alter + */ + if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44)) + crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0; +} + + +static void nv04_dac_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int head = nouveau_crtc(encoder->crtc)->index; + + NV_TRACE(dev, "%s called for encoder %d\n", __func__, + nv_encoder->dcb->index); + + if (nv_gf4_disp_arch(dev)) { + struct drm_encoder *rebind; + uint32_t dac_offset = nv04_dac_output_offset(encoder); + uint32_t otherdac; + + /* bit 16-19 are bits that are set on some G70 cards, + * but don't seem to have much effect */ + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset, + head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK); + /* force any other vga encoders to bind to the other crtc */ + list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) { + if (rebind == encoder + || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG) + continue; + + dac_offset = nv04_dac_output_offset(rebind); + otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset, + (otherdac & ~0x0100) | (head ^ 1) << 8); + } + } + + /* This could use refinement for flatpanels, but it should work this way */ + if (dev_priv->chipset < 0x44) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); + else + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); +} + +static void nv04_dac_commit(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +} + +void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; + + if (nv_gf4_disp_arch(dev)) { + uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1]; + int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder); + uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off); + + if (enable) { + *dac_users |= 1 << dcb->index; + NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK); + + } else { + *dac_users &= ~(1 << dcb->index); + if (!*dac_users) + NVWriteRAMDAC(dev, 0, dacclk_off, + dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK); + } + } +} + +static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (nv_encoder->last_dpms == mode) + return; + nv_encoder->last_dpms = mode; + + NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); +} + +static void nv04_dac_save(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + + if (nv_gf4_disp_arch(dev)) + nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder)); +} + +static void nv04_dac_restore(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + + if (nv_gf4_disp_arch(dev)) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder), + nv_encoder->restore.output); + + nv_encoder->last_dpms = NV_DPMS_CLEARED; +} + +static void nv04_dac_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + NV_DEBUG(encoder->dev, "\n"); + + drm_encoder_cleanup(encoder); + kfree(nv_encoder); +} + +static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = { + .dpms = nv04_dac_dpms, + .save = nv04_dac_save, + .restore = nv04_dac_restore, + .mode_fixup = nv04_dac_mode_fixup, + .prepare = nv04_dac_prepare, + .commit = nv04_dac_commit, + .mode_set = nv04_dac_mode_set, + .detect = nv04_dac_detect +}; + +static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = { + .dpms = nv04_dac_dpms, + .save = nv04_dac_save, + .restore = nv04_dac_restore, + .mode_fixup = nv04_dac_mode_fixup, + .prepare = nv04_dac_prepare, + .commit = nv04_dac_commit, + .mode_set = nv04_dac_mode_set, + .detect = nv17_dac_detect +}; + +static const struct drm_encoder_funcs nv04_dac_funcs = { + .destroy = nv04_dac_destroy, +}; + +int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) +{ + const struct drm_encoder_helper_funcs *helper; + struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder = NULL; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + if (nv_gf4_disp_arch(dev)) + helper = &nv17_dac_helper_funcs; + else + helper = &nv04_dac_helper_funcs; + + drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, helper); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c new file mode 100644 index 000000000000..e5b33339d595 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -0,0 +1,621 @@ +/* + * Copyright 2003 NVIDIA, Corporation + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" + +#include "nouveau_drv.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nouveau_hw.h" +#include "nvreg.h" + +#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \ + NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \ + NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS) +#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \ + NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \ + NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE) + +static inline bool is_fpc_off(uint32_t fpc) +{ + return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) == + FP_TG_CONTROL_OFF); +} + +int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent) +{ + /* special case of nv_read_tmds to find crtc associated with an output. + * this does not give a correct answer for off-chip dvi, but there's no + * use for such an answer anyway + */ + int ramdac = (dcbent->or & OUTPUT_C) >> 2; + + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL, + NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4); + return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac; +} + +void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, + int head, bool dl) +{ + /* The BIOS scripts don't do this for us, sadly + * Luckily we do know the values ;-) + * + * head < 0 indicates we wish to force a setting with the overrideval + * (for VT restore etc.) + */ + + int ramdac = (dcbent->or & OUTPUT_C) >> 2; + uint8_t tmds04 = 0x80; + + if (head != ramdac) + tmds04 = 0x88; + + if (dcbent->type == OUTPUT_LVDS) + tmds04 |= 0x01; + + nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04); + + if (dl) /* dual link */ + nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08); +} + +void nv04_dfp_disable(struct drm_device *dev, int head) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg; + + if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & + FP_TG_CONTROL_ON) { + /* digital remnants must be cleaned before new crtc + * values programmed. delay is time for the vga stuff + * to realise it's in control again + */ + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, + FP_TG_CONTROL_OFF); + msleep(50); + } + /* don't inadvertently turn it on when state written later */ + crtcstate[head].fp_control = FP_TG_CONTROL_OFF; +} + +void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + struct nouveau_crtc *nv_crtc; + uint32_t *fpc; + + if (mode == DRM_MODE_DPMS_ON) { + nv_crtc = nouveau_crtc(encoder->crtc); + fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control; + + if (is_fpc_off(*fpc)) { + /* using saved value is ok, as (is_digital && dpms_on && + * fp_control==OFF) is (at present) *only* true when + * fpc's most recent change was by below "off" code + */ + *fpc = nv_crtc->dpms_saved_fp_control; + } + + nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index; + NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc); + } else { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + nv_crtc = nouveau_crtc(crtc); + fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control; + + nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index); + if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) { + nv_crtc->dpms_saved_fp_control = *fpc; + /* cut the FP output */ + *fpc &= ~FP_TG_CONTROL_ON; + *fpc |= FP_TG_CONTROL_OFF; + NVWriteRAMDAC(dev, nv_crtc->index, + NV_PRAMDAC_FP_TG_CONTROL, *fpc); + } + } + } +} + +static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); + + /* For internal panels and gpu scaling on DVI we need the native mode */ + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + if (!nv_connector->native_mode) + return false; + nv_encoder->mode = *nv_connector->native_mode; + adjusted_mode->clock = nv_connector->native_mode->clock; + } else { + nv_encoder->mode = *adjusted_mode; + } + + return true; +} + +static void nv04_dfp_prepare_sel_clk(struct drm_device *dev, + struct nouveau_encoder *nv_encoder, int head) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_mode_state *state = &dev_priv->mode_reg; + uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000; + + if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP) + return; + + /* SEL_CLK is only used on the primary ramdac + * It toggles spread spectrum PLL output and sets the bindings of PLLs + * to heads on digital outputs + */ + if (head) + state->sel_clk |= bits1618; + else + state->sel_clk &= ~bits1618; + + /* nv30: + * bit 0 NVClk spread spectrum on/off + * bit 2 MemClk spread spectrum on/off + * bit 4 PixClk1 spread spectrum on/off toggle + * bit 6 PixClk2 spread spectrum on/off toggle + * + * nv40 (observations from bios behaviour and mmio traces): + * bits 4&6 as for nv30 + * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6; + * maybe a different spread mode + * bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts) + * The logic behind turning spread spectrum on/off in the first place, + * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table + * entry has the necessary info) + */ + if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) { + int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1; + + state->sel_clk &= ~0xf0; + state->sel_clk |= (head ? 0x40 : 0x10) << shift; + } +} + +static void nv04_dfp_prepare(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int head = nouveau_crtc(encoder->crtc)->index; + struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg; + uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX]; + uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX]; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); + + /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) + * at LCD__INDEX which we don't alter + */ + if (!(*cr_lcd & 0x44)) { + *cr_lcd = 0x3; + + if (nv_two_heads(dev)) { + if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) + *cr_lcd |= head ? 0x0 : 0x8; + else { + *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; + if (nv_encoder->dcb->type == OUTPUT_LVDS) + *cr_lcd |= 0x30; + if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { + /* avoid being connected to both crtcs */ + *cr_lcd_oth &= ~0x30; + NVWriteVgaCrtc(dev, head ^ 1, + NV_CIO_CRE_LCD__INDEX, + *cr_lcd_oth); + } + } + } + } +} + + +static void nv04_dfp_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index]; + struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_display_mode *output_mode = &nv_encoder->mode; + uint32_t mode_ratio, panel_ratio; + + NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index); + drm_mode_debug_printmodeline(output_mode); + + /* Initialize the FP registers in this CRTC. */ + regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1; + regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; + if (!nv_gf4_disp_arch(dev) || + (output_mode->hsync_start - output_mode->hdisplay) >= + dev_priv->vbios->digital_min_front_porch) + regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; + else + regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1; + regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; + regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; + regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; + regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1; + + regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1; + regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1; + regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1; + regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1; + regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1; + regp->fp_vert_regs[FP_VALID_START] = 0; + regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1; + + /* bit26: a bit seen on some g7x, no as yet discernable purpose */ + regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | + (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG)); + /* Deal with vsync/hsync polarity */ + /* LVDS screens do set this, but modes with +ve syncs are very rare */ + if (output_mode->flags & DRM_MODE_FLAG_PVSYNC) + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS; + if (output_mode->flags & DRM_MODE_FLAG_PHSYNC) + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS; + /* panel scaling first, as native would get set otherwise */ + if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || + nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */ + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER; + else if (adjusted_mode->hdisplay == output_mode->hdisplay && + adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */ + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; + else /* gpu needs to scale */ + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; + if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; + if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && + output_mode->clock > 165000) + regp->fp_control |= (2 << 24); + if (nv_encoder->dcb->type == OUTPUT_LVDS) { + bool duallink, dummy; + + nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> + clock, &duallink, &dummy); + if (duallink) + regp->fp_control |= (8 << 28); + } else + if (output_mode->clock > 165000) + regp->fp_control |= (8 << 28); + + regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR | + NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR | + NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED | + NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE | + NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE; + + /* We want automatic scaling */ + regp->fp_debug_1 = 0; + /* This can override HTOTAL and VTOTAL */ + regp->fp_debug_2 = 0; + + /* Use 20.12 fixed point format to avoid floats */ + mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay; + panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay; + /* if ratios are equal, SCALE_ASPECT will automatically (and correctly) + * get treated the same as SCALE_FULLSCREEN */ + if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT && + mode_ratio != panel_ratio) { + uint32_t diff, scale; + bool divide_by_2 = nv_gf4_disp_arch(dev); + + if (mode_ratio < panel_ratio) { + /* vertical needs to expand to glass size (automatic) + * horizontal needs to be scaled at vertical scale factor + * to maintain aspect */ + + scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay; + regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE | + XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE); + + /* restrict area of screen used, horizontally */ + diff = output_mode->hdisplay - + output_mode->vdisplay * mode_ratio / (1 << 12); + regp->fp_horiz_regs[FP_VALID_START] += diff / 2; + regp->fp_horiz_regs[FP_VALID_END] -= diff / 2; + } + + if (mode_ratio > panel_ratio) { + /* horizontal needs to expand to glass size (automatic) + * vertical needs to be scaled at horizontal scale factor + * to maintain aspect */ + + scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay; + regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE | + XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE); + + /* restrict area of screen used, vertically */ + diff = output_mode->vdisplay - + (1 << 12) * output_mode->hdisplay / mode_ratio; + regp->fp_vert_regs[FP_VALID_START] += diff / 2; + regp->fp_vert_regs[FP_VALID_END] -= diff / 2; + } + } + + /* Output property. */ + if (nv_connector->use_dithering) { + if (dev_priv->chipset == 0x11) + regp->dither = savep->dither | 0x00010000; + else { + int i; + regp->dither = savep->dither | 0x00000001; + for (i = 0; i < 3; i++) { + regp->dither_regs[i] = 0xe4e4e4e4; + regp->dither_regs[i + 3] = 0x44444444; + } + } + } else { + if (dev_priv->chipset != 0x11) { + /* reset them */ + int i; + for (i = 0; i < 3; i++) { + regp->dither_regs[i] = savep->dither_regs[i]; + regp->dither_regs[i + 3] = savep->dither_regs[i + 3]; + } + } + regp->dither = savep->dither; + } + + regp->fp_margin_color = 0; +} + +static void nv04_dfp_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct dcb_entry *dcbe = nv_encoder->dcb; + int head = nouveau_crtc(encoder->crtc)->index; + + NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index); + + if (dcbe->type == OUTPUT_TMDS) + run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); + else if (dcbe->type == OUTPUT_LVDS) + call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock); + + /* update fp_control state for any changes made by scripts, + * so correct value is written at DPMS on */ + dev_priv->mode_reg.crtc_reg[head].fp_control = + NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); + + /* This could use refinement for flatpanels, but it should work this way */ + if (dev_priv->chipset < 0x44) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); + else + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +} + +static inline bool is_powersaving_dpms(int mode) +{ + return (mode != DRM_MODE_DPMS_ON); +} + +static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc = encoder->crtc; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms); + + if (nv_encoder->last_dpms == mode) + return; + nv_encoder->last_dpms = mode; + + NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + if (was_powersaving && is_powersaving_dpms(mode)) + return; + + if (nv_encoder->dcb->lvdsconf.use_power_scripts) { + struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); + + /* when removing an output, crtc may not be set, but PANEL_OFF + * must still be run + */ + int head = crtc ? nouveau_crtc(crtc)->index : + nv04_dfp_get_bound_head(dev, nv_encoder->dcb); + + if (mode == DRM_MODE_DPMS_ON) { + if (!nv_connector->native_mode) { + NV_ERROR(dev, "Not turning on LVDS without native mode\n"); + return; + } + call_lvds_script(dev, nv_encoder->dcb, head, + LVDS_PANEL_ON, nv_connector->native_mode->clock); + } else + /* pxclk of 0 is fine for PANEL_OFF, and for a + * disconnected LVDS encoder there is no native_mode + */ + call_lvds_script(dev, nv_encoder->dcb, head, + LVDS_PANEL_OFF, 0); + } + + nv04_dfp_update_fp_control(encoder, mode); + + if (mode == DRM_MODE_DPMS_ON) + nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index); + else { + dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); + dev_priv->mode_reg.sel_clk &= ~0xf0; + } + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk); +} + +static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (nv_encoder->last_dpms == mode) + return; + nv_encoder->last_dpms = mode; + + NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + nv04_dfp_update_fp_control(encoder, mode); +} + +static void nv04_dfp_save(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + + if (nv_two_heads(dev)) + nv_encoder->restore.head = + nv04_dfp_get_bound_head(dev, nv_encoder->dcb); +} + +static void nv04_dfp_restore(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int head = nv_encoder->restore.head; + + if (nv_encoder->dcb->type == OUTPUT_LVDS) { + struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode; + if (native_mode) + call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON, + native_mode->clock); + else + NV_ERROR(dev, "Not restoring LVDS without native mode\n"); + + } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { + int clock = nouveau_hw_pllvals_to_clk + (&dev_priv->saved_reg.crtc_reg[head].pllvals); + + run_tmds_table(dev, nv_encoder->dcb, head, clock); + } + + nv_encoder->last_dpms = NV_DPMS_CLEARED; +} + +static void nv04_dfp_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + NV_DEBUG(encoder->dev, "\n"); + + drm_encoder_cleanup(encoder); + kfree(nv_encoder); +} + +static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { + .dpms = nv04_lvds_dpms, + .save = nv04_dfp_save, + .restore = nv04_dfp_restore, + .mode_fixup = nv04_dfp_mode_fixup, + .prepare = nv04_dfp_prepare, + .commit = nv04_dfp_commit, + .mode_set = nv04_dfp_mode_set, + .detect = NULL, +}; + +static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = { + .dpms = nv04_tmds_dpms, + .save = nv04_dfp_save, + .restore = nv04_dfp_restore, + .mode_fixup = nv04_dfp_mode_fixup, + .prepare = nv04_dfp_prepare, + .commit = nv04_dfp_commit, + .mode_set = nv04_dfp_mode_set, + .detect = NULL, +}; + +static const struct drm_encoder_funcs nv04_dfp_funcs = { + .destroy = nv04_dfp_destroy, +}; + +int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) +{ + const struct drm_encoder_helper_funcs *helper; + struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder = NULL; + int type; + + switch (entry->type) { + case OUTPUT_TMDS: + type = DRM_MODE_ENCODER_TMDS; + helper = &nv04_tmds_helper_funcs; + break; + case OUTPUT_LVDS: + type = DRM_MODE_ENCODER_LVDS; + helper = &nv04_lvds_helper_funcs; + break; + default: + return -EINVAL; + } + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type); + drm_encoder_helper_add(encoder, helper); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c new file mode 100644 index 000000000000..b47c757ff48b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_display.c @@ -0,0 +1,288 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Ben Skeggs + */ + +#include "drmP.h" +#include "drm.h" +#include "drm_crtc_helper.h" + +#include "nouveau_drv.h" +#include "nouveau_fb.h" +#include "nouveau_hw.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" + +#define MULTIPLE_ENCODERS(e) (e & (e - 1)) + +static void +nv04_display_store_initial_head_owner(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset != 0x11) { + dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44); + goto ownerknown; + } + + /* reading CR44 is broken on nv11, so we attempt to infer it */ + if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */ + dev_priv->crtc_owner = 0x4; + else { + uint8_t slaved_on_A, slaved_on_B; + bool tvA = false; + bool tvB = false; + + NVLockVgaCrtcs(dev, false); + + slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) & + 0x80; + if (slaved_on_B) + tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) & + MASK(NV_CIO_CRE_LCD_LCD_SELECT)); + + slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) & + 0x80; + if (slaved_on_A) + tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) & + MASK(NV_CIO_CRE_LCD_LCD_SELECT)); + + NVLockVgaCrtcs(dev, true); + + if (slaved_on_A && !tvA) + dev_priv->crtc_owner = 0x0; + else if (slaved_on_B && !tvB) + dev_priv->crtc_owner = 0x3; + else if (slaved_on_A) + dev_priv->crtc_owner = 0x0; + else if (slaved_on_B) + dev_priv->crtc_owner = 0x3; + else + dev_priv->crtc_owner = 0x0; + } + +ownerknown: + NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner); + + /* we need to ensure the heads are not tied henceforth, or reading any + * 8 bit reg on head B will fail + * setting a single arbitrary head solves that */ + NVSetOwner(dev, 0); +} + +int +nv04_display_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct parsed_dcb *dcb = dev_priv->vbios->dcb; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + uint16_t connector[16] = { 0 }; + int i, ret; + + NV_DEBUG(dev, "\n"); + + if (nv_two_heads(dev)) + nv04_display_store_initial_head_owner(dev); + + drm_mode_config_init(dev); + drm_mode_create_scaling_mode_property(dev); + drm_mode_create_dithering_property(dev); + + dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + switch (dev_priv->card_type) { + case NV_04: + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + break; + default: + dev->mode_config.max_width = 4096; + dev->mode_config.max_height = 4096; + break; + } + + dev->mode_config.fb_base = dev_priv->fb_phys; + + nv04_crtc_create(dev, 0); + if (nv_two_heads(dev)) + nv04_crtc_create(dev, 1); + + for (i = 0; i < dcb->entries; i++) { + struct dcb_entry *dcbent = &dcb->entry[i]; + + switch (dcbent->type) { + case OUTPUT_ANALOG: + ret = nv04_dac_create(dev, dcbent); + break; + case OUTPUT_LVDS: + case OUTPUT_TMDS: + ret = nv04_dfp_create(dev, dcbent); + break; + case OUTPUT_TV: + if (dcbent->location == DCB_LOC_ON_CHIP) + ret = nv17_tv_create(dev, dcbent); + else + ret = nv04_tv_create(dev, dcbent); + break; + default: + NV_WARN(dev, "DCB type %d not known\n", dcbent->type); + continue; + } + + if (ret) + continue; + + connector[dcbent->connector] |= (1 << dcbent->type); + } + + for (i = 0; i < dcb->entries; i++) { + struct dcb_entry *dcbent = &dcb->entry[i]; + uint16_t encoders; + int type; + + encoders = connector[dcbent->connector]; + if (!(encoders & (1 << dcbent->type))) + continue; + connector[dcbent->connector] = 0; + + switch (dcbent->type) { + case OUTPUT_ANALOG: + if (!MULTIPLE_ENCODERS(encoders)) + type = DRM_MODE_CONNECTOR_VGA; + else + type = DRM_MODE_CONNECTOR_DVII; + break; + case OUTPUT_TMDS: + if (!MULTIPLE_ENCODERS(encoders)) + type = DRM_MODE_CONNECTOR_DVID; + else + type = DRM_MODE_CONNECTOR_DVII; + break; + case OUTPUT_LVDS: + type = DRM_MODE_CONNECTOR_LVDS; +#if 0 + /* don't create i2c adapter when lvds ddc not allowed */ + if (dcbent->lvdsconf.use_straps_for_mode || + dev_priv->vbios->fp_no_ddc) + i2c_index = 0xf; +#endif + break; + case OUTPUT_TV: + type = DRM_MODE_CONNECTOR_TV; + break; + default: + type = DRM_MODE_CONNECTOR_Unknown; + continue; + } + + nouveau_connector_create(dev, dcbent->connector, type); + } + + /* Save previous state */ + NVLockVgaCrtcs(dev, false); + + nouveau_hw_save_vga_fonts(dev, 1); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + crtc->funcs->save(crtc); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct drm_encoder_helper_funcs *func = encoder->helper_private; + + func->save(encoder); + } + + return 0; +} + +void +nv04_display_destroy(struct drm_device *dev) +{ + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + NV_DEBUG(dev, "\n"); + + /* Turn every CRTC off. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_mode_set modeset = { + .crtc = crtc, + }; + + crtc->funcs->set_config(&modeset); + } + + /* Restore state */ + NVLockVgaCrtcs(dev, false); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct drm_encoder_helper_funcs *func = encoder->helper_private; + + func->restore(encoder); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + crtc->funcs->restore(crtc); + + nouveau_hw_save_vga_fonts(dev, 0); + + drm_mode_config_cleanup(dev); +} + +void +nv04_display_restore(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + NVLockVgaCrtcs(dev, false); + + /* meh.. modeset apparently doesn't setup all the regs and depends + * on pre-existing state, for now load the state of the card *before* + * nouveau was loaded, and then do a modeset. + * + * best thing to do probably is to make save/restore routines not + * save/restore "pre-load" state, but more general so we can save + * on suspend too. + */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct drm_encoder_helper_funcs *func = encoder->helper_private; + + func->restore(encoder); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + crtc->funcs->restore(crtc); + + if (nv_two_heads(dev)) { + NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n", + dev_priv->crtc_owner); + NVSetOwner(dev, dev_priv->crtc_owner); + } + + NVLockVgaCrtcs(dev, true); +} + diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c new file mode 100644 index 000000000000..638cf601c427 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_fb.c @@ -0,0 +1,21 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +int +nv04_fb_init(struct drm_device *dev) +{ + /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows + * nvidia reading PFB_CFG_0, then writing back its original value. + * (which was 0x701114 in this case) + */ + + nv_wr32(dev, NV04_PFB_CFG0, 0x1114); + return 0; +} + +void +nv04_fb_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c new file mode 100644 index 000000000000..09a31071ee58 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -0,0 +1,316 @@ +/* + * Copyright 2009 Ben Skeggs + * Copyright 2008 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_fbcon.h" + +static void +nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + } + + if (info->flags & FBINFO_HWACCEL_DISABLED) { + cfb_copyarea(info, region); + return; + } + + BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); + OUT_RING(chan, (region->sy << 16) | region->sx); + OUT_RING(chan, (region->dy << 16) | region->dx); + OUT_RING(chan, (region->height << 16) | region->width); + FIRE_RING(chan); +} + +static void +nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color]; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + } + + if (info->flags & FBINFO_HWACCEL_DISABLED) { + cfb_fillrect(info, rect); + return; + } + + BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); + OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); + BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); + OUT_RING(chan, color); + BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); + OUT_RING(chan, (rect->dx << 16) | rect->dy); + OUT_RING(chan, (rect->width << 16) | rect->height); + FIRE_RING(chan); +} + +static void +nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + uint32_t fg; + uint32_t bg; + uint32_t dsize; + uint32_t width; + uint32_t *data = (uint32_t *)image->data; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + if (image->depth != 1) { + cfb_imageblit(info, image); + return; + } + + if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + } + + if (info->flags & FBINFO_HWACCEL_DISABLED) { + cfb_imageblit(info, image); + return; + } + + width = (image->width + 31) & ~31; + dsize = (width * image->height) >> 5; + + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; + bg = ((uint32_t *) info->pseudo_palette)[image->bg_color]; + } else { + fg = image->fg_color; + bg = image->bg_color; + } + + BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7); + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + OUT_RING(chan, ((image->dy + image->height) << 16) | + ((image->dx + image->width) & 0xffff)); + OUT_RING(chan, bg); + OUT_RING(chan, fg); + OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->height << 16) | width); + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + + while (dsize) { + int iter_len = dsize > 128 ? 128 : dsize; + + if (RING_SPACE(chan, iter_len + 1)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + cfb_imageblit(info, image); + return; + } + + BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); + OUT_RINGp(chan, data, iter_len); + data += iter_len; + dsize -= iter_len; + } + + FIRE_RING(chan); +} + +static int +nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *obj = NULL; + int ret; + + ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj); + if (ret) + return ret; + + ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL); + if (ret) + return ret; + + return 0; +} + +int +nv04_fbcon_accel_init(struct fb_info *info) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + int surface_fmt, pattern_fmt, rect_fmt; + int ret; + + switch (info->var.bits_per_pixel) { + case 8: + surface_fmt = 1; + pattern_fmt = 3; + rect_fmt = 3; + break; + case 16: + surface_fmt = 4; + pattern_fmt = 1; + rect_fmt = 1; + break; + case 32: + switch (info->var.transp.length) { + case 0: /* depth 24 */ + case 8: /* depth 32 */ + break; + default: + return -EINVAL; + } + + surface_fmt = 6; + pattern_fmt = 3; + rect_fmt = 3; + break; + default: + return -EINVAL; + } + + ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? + 0x0062 : 0x0042, NvCtxSurf2D); + if (ret) + return ret; + + ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect); + if (ret) + return ret; + + ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop); + if (ret) + return ret; + + ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt); + if (ret) + return ret; + + ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect); + if (ret) + return ret; + + ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? + 0x009f : 0x005f, NvImageBlit); + if (ret) + return ret; + + if (RING_SPACE(chan, 49)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + return 0; + } + + BEGIN_RING(chan, 1, 0x0000, 1); + OUT_RING(chan, NvCtxSurf2D); + BEGIN_RING(chan, 1, 0x0184, 2); + OUT_RING(chan, NvDmaFB); + OUT_RING(chan, NvDmaFB); + BEGIN_RING(chan, 1, 0x0300, 4); + OUT_RING(chan, surface_fmt); + OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); + OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); + OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); + + BEGIN_RING(chan, 1, 0x0000, 1); + OUT_RING(chan, NvRop); + BEGIN_RING(chan, 1, 0x0300, 1); + OUT_RING(chan, 0x55); + + BEGIN_RING(chan, 1, 0x0000, 1); + OUT_RING(chan, NvImagePatt); + BEGIN_RING(chan, 1, 0x0300, 8); + OUT_RING(chan, pattern_fmt); +#ifdef __BIG_ENDIAN + OUT_RING(chan, 2); +#else + OUT_RING(chan, 1); +#endif + OUT_RING(chan, 0); + OUT_RING(chan, 1); + OUT_RING(chan, ~0); + OUT_RING(chan, ~0); + OUT_RING(chan, ~0); + OUT_RING(chan, ~0); + + BEGIN_RING(chan, 1, 0x0000, 1); + OUT_RING(chan, NvClipRect); + BEGIN_RING(chan, 1, 0x0300, 2); + OUT_RING(chan, 0); + OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); + + BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1); + OUT_RING(chan, NvImageBlit); + BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1); + OUT_RING(chan, NvCtxSurf2D); + BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1); + OUT_RING(chan, 3); + + BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1); + OUT_RING(chan, NvGdiRect); + BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1); + OUT_RING(chan, NvCtxSurf2D); + BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2); + OUT_RING(chan, NvImagePatt); + OUT_RING(chan, NvRop); + BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1); + OUT_RING(chan, rect_fmt); + BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); + OUT_RING(chan, 3); + + FIRE_RING(chan); + + info->fbops->fb_fillrect = nv04_fbcon_fillrect; + info->fbops->fb_copyarea = nv04_fbcon_copyarea; + info->fbops->fb_imageblit = nv04_fbcon_imageblit; + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c new file mode 100644 index 000000000000..0c3cd53c7313 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -0,0 +1,271 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) +#define NV04_RAMFC__SIZE 32 +#define NV04_RAMFC_DMA_PUT 0x00 +#define NV04_RAMFC_DMA_GET 0x04 +#define NV04_RAMFC_DMA_INSTANCE 0x08 +#define NV04_RAMFC_DMA_STATE 0x0C +#define NV04_RAMFC_DMA_FETCH 0x10 +#define NV04_RAMFC_ENGINE 0x14 +#define NV04_RAMFC_PULL1_ENGINE 0x18 + +#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \ + NV04_RAMFC_##offset/4, (val)) +#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \ + NV04_RAMFC_##offset/4) + +void +nv04_fifo_disable(struct drm_device *dev) +{ + uint32_t tmp; + + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); + tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1); +} + +void +nv04_fifo_enable(struct drm_device *dev) +{ + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); +} + +bool +nv04_fifo_reassign(struct drm_device *dev, bool enable) +{ + uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES); + + nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0); + return (reassign == 1); +} + +int +nv04_fifo_channel_id(struct drm_device *dev) +{ + return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & + NV03_PFIFO_CACHE1_PUSH1_CHID_MASK; +} + +int +nv04_fifo_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, + NV04_RAMFC__SIZE, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + NULL, &chan->ramfc); + if (ret) + return ret; + + /* Setup initial state */ + dev_priv->engine.instmem.prepare_access(dev, true); + RAMFC_WR(DMA_PUT, chan->pushbuf_base); + RAMFC_WR(DMA_GET, chan->pushbuf_base); + RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); + RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0)); + dev_priv->engine.instmem.finish_access(dev); + + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); + return 0; +} + +void +nv04_fifo_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); + + nouveau_gpuobj_ref_del(dev, &chan->ramfc); +} + +static void +nv04_fifo_do_load_context(struct drm_device *dev, int chid) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV04_RAMFC(chid), tmp; + + dev_priv->engine.instmem.prepare_access(dev, false); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); + tmp = nv_ri32(dev, fc + 8); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16)); + nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20)); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24)); + + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); +} + +int +nv04_fifo_load_context(struct nouveau_channel *chan) +{ + uint32_t tmp; + + nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1, + NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); + nv04_fifo_do_load_context(chan->dev, chan->id); + nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); + + /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ + tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); + nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv04_fifo_unload_context(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_channel *chan = NULL; + uint32_t tmp; + int chid; + + chid = pfifo->channel_id(dev); + if (chid < 0 || chid >= dev_priv->engine.fifo.channels) + return 0; + + chan = dev_priv->fifos[chid]; + if (!chan) { + NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); + return -EINVAL; + } + + dev_priv->engine.instmem.prepare_access(dev, true); + RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); + RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; + tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE); + RAMFC_WR(DMA_INSTANCE, tmp); + RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); + RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); + RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); + RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); + dev_priv->engine.instmem.finish_access(dev); + + nv04_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); + return 0; +} + +static void +nv04_fifo_init_reset(struct drm_device *dev) +{ + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); + + nv_wr32(dev, 0x003224, 0x000f0078); + nv_wr32(dev, 0x002044, 0x0101ffff); + nv_wr32(dev, 0x002040, 0x000000ff); + nv_wr32(dev, 0x002500, 0x00000000); + nv_wr32(dev, 0x003000, 0x00000000); + nv_wr32(dev, 0x003050, 0x00000000); + nv_wr32(dev, 0x003200, 0x00000000); + nv_wr32(dev, 0x003250, 0x00000000); + nv_wr32(dev, 0x003220, 0x00000000); + + nv_wr32(dev, 0x003250, 0x00000000); + nv_wr32(dev, 0x003270, 0x00000000); + nv_wr32(dev, 0x003210, 0x00000000); +} + +static void +nv04_fifo_init_ramxx(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((dev_priv->ramht_bits - 9) << 16) | + (dev_priv->ramht_offset >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); +} + +static void +nv04_fifo_init_intr(struct drm_device *dev) +{ + nv_wr32(dev, 0x002100, 0xffffffff); + nv_wr32(dev, 0x002140, 0xffffffff); +} + +int +nv04_fifo_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + int i; + + nv04_fifo_init_reset(dev); + nv04_fifo_init_ramxx(dev); + + nv04_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); + + nv04_fifo_init_intr(dev); + pfifo->enable(dev); + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + if (dev_priv->fifos[i]) { + uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); + nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); + } + } + + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c new file mode 100644 index 000000000000..396ee92118f6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -0,0 +1,579 @@ +/* + * Copyright 2007 Stephane Marchesin + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" + +static uint32_t nv04_graph_ctx_regs[] = { + NV04_PGRAPH_CTX_SWITCH1, + NV04_PGRAPH_CTX_SWITCH2, + NV04_PGRAPH_CTX_SWITCH3, + NV04_PGRAPH_CTX_SWITCH4, + NV04_PGRAPH_CTX_CACHE1, + NV04_PGRAPH_CTX_CACHE2, + NV04_PGRAPH_CTX_CACHE3, + NV04_PGRAPH_CTX_CACHE4, + 0x00400184, + 0x004001a4, + 0x004001c4, + 0x004001e4, + 0x00400188, + 0x004001a8, + 0x004001c8, + 0x004001e8, + 0x0040018c, + 0x004001ac, + 0x004001cc, + 0x004001ec, + 0x00400190, + 0x004001b0, + 0x004001d0, + 0x004001f0, + 0x00400194, + 0x004001b4, + 0x004001d4, + 0x004001f4, + 0x00400198, + 0x004001b8, + 0x004001d8, + 0x004001f8, + 0x0040019c, + 0x004001bc, + 0x004001dc, + 0x004001fc, + 0x00400174, + NV04_PGRAPH_DMA_START_0, + NV04_PGRAPH_DMA_START_1, + NV04_PGRAPH_DMA_LENGTH, + NV04_PGRAPH_DMA_MISC, + NV04_PGRAPH_DMA_PITCH, + NV04_PGRAPH_BOFFSET0, + NV04_PGRAPH_BBASE0, + NV04_PGRAPH_BLIMIT0, + NV04_PGRAPH_BOFFSET1, + NV04_PGRAPH_BBASE1, + NV04_PGRAPH_BLIMIT1, + NV04_PGRAPH_BOFFSET2, + NV04_PGRAPH_BBASE2, + NV04_PGRAPH_BLIMIT2, + NV04_PGRAPH_BOFFSET3, + NV04_PGRAPH_BBASE3, + NV04_PGRAPH_BLIMIT3, + NV04_PGRAPH_BOFFSET4, + NV04_PGRAPH_BBASE4, + NV04_PGRAPH_BLIMIT4, + NV04_PGRAPH_BOFFSET5, + NV04_PGRAPH_BBASE5, + NV04_PGRAPH_BLIMIT5, + NV04_PGRAPH_BPITCH0, + NV04_PGRAPH_BPITCH1, + NV04_PGRAPH_BPITCH2, + NV04_PGRAPH_BPITCH3, + NV04_PGRAPH_BPITCH4, + NV04_PGRAPH_SURFACE, + NV04_PGRAPH_STATE, + NV04_PGRAPH_BSWIZZLE2, + NV04_PGRAPH_BSWIZZLE5, + NV04_PGRAPH_BPIXEL, + NV04_PGRAPH_NOTIFY, + NV04_PGRAPH_PATT_COLOR0, + NV04_PGRAPH_PATT_COLOR1, + NV04_PGRAPH_PATT_COLORRAM+0x00, + NV04_PGRAPH_PATT_COLORRAM+0x01, + NV04_PGRAPH_PATT_COLORRAM+0x02, + NV04_PGRAPH_PATT_COLORRAM+0x03, + NV04_PGRAPH_PATT_COLORRAM+0x04, + NV04_PGRAPH_PATT_COLORRAM+0x05, + NV04_PGRAPH_PATT_COLORRAM+0x06, + NV04_PGRAPH_PATT_COLORRAM+0x07, + NV04_PGRAPH_PATT_COLORRAM+0x08, + NV04_PGRAPH_PATT_COLORRAM+0x09, + NV04_PGRAPH_PATT_COLORRAM+0x0A, + NV04_PGRAPH_PATT_COLORRAM+0x0B, + NV04_PGRAPH_PATT_COLORRAM+0x0C, + NV04_PGRAPH_PATT_COLORRAM+0x0D, + NV04_PGRAPH_PATT_COLORRAM+0x0E, + NV04_PGRAPH_PATT_COLORRAM+0x0F, + NV04_PGRAPH_PATT_COLORRAM+0x10, + NV04_PGRAPH_PATT_COLORRAM+0x11, + NV04_PGRAPH_PATT_COLORRAM+0x12, + NV04_PGRAPH_PATT_COLORRAM+0x13, + NV04_PGRAPH_PATT_COLORRAM+0x14, + NV04_PGRAPH_PATT_COLORRAM+0x15, + NV04_PGRAPH_PATT_COLORRAM+0x16, + NV04_PGRAPH_PATT_COLORRAM+0x17, + NV04_PGRAPH_PATT_COLORRAM+0x18, + NV04_PGRAPH_PATT_COLORRAM+0x19, + NV04_PGRAPH_PATT_COLORRAM+0x1A, + NV04_PGRAPH_PATT_COLORRAM+0x1B, + NV04_PGRAPH_PATT_COLORRAM+0x1C, + NV04_PGRAPH_PATT_COLORRAM+0x1D, + NV04_PGRAPH_PATT_COLORRAM+0x1E, + NV04_PGRAPH_PATT_COLORRAM+0x1F, + NV04_PGRAPH_PATT_COLORRAM+0x20, + NV04_PGRAPH_PATT_COLORRAM+0x21, + NV04_PGRAPH_PATT_COLORRAM+0x22, + NV04_PGRAPH_PATT_COLORRAM+0x23, + NV04_PGRAPH_PATT_COLORRAM+0x24, + NV04_PGRAPH_PATT_COLORRAM+0x25, + NV04_PGRAPH_PATT_COLORRAM+0x26, + NV04_PGRAPH_PATT_COLORRAM+0x27, + NV04_PGRAPH_PATT_COLORRAM+0x28, + NV04_PGRAPH_PATT_COLORRAM+0x29, + NV04_PGRAPH_PATT_COLORRAM+0x2A, + NV04_PGRAPH_PATT_COLORRAM+0x2B, + NV04_PGRAPH_PATT_COLORRAM+0x2C, + NV04_PGRAPH_PATT_COLORRAM+0x2D, + NV04_PGRAPH_PATT_COLORRAM+0x2E, + NV04_PGRAPH_PATT_COLORRAM+0x2F, + NV04_PGRAPH_PATT_COLORRAM+0x30, + NV04_PGRAPH_PATT_COLORRAM+0x31, + NV04_PGRAPH_PATT_COLORRAM+0x32, + NV04_PGRAPH_PATT_COLORRAM+0x33, + NV04_PGRAPH_PATT_COLORRAM+0x34, + NV04_PGRAPH_PATT_COLORRAM+0x35, + NV04_PGRAPH_PATT_COLORRAM+0x36, + NV04_PGRAPH_PATT_COLORRAM+0x37, + NV04_PGRAPH_PATT_COLORRAM+0x38, + NV04_PGRAPH_PATT_COLORRAM+0x39, + NV04_PGRAPH_PATT_COLORRAM+0x3A, + NV04_PGRAPH_PATT_COLORRAM+0x3B, + NV04_PGRAPH_PATT_COLORRAM+0x3C, + NV04_PGRAPH_PATT_COLORRAM+0x3D, + NV04_PGRAPH_PATT_COLORRAM+0x3E, + NV04_PGRAPH_PATT_COLORRAM+0x3F, + NV04_PGRAPH_PATTERN, + 0x0040080c, + NV04_PGRAPH_PATTERN_SHAPE, + 0x00400600, + NV04_PGRAPH_ROP3, + NV04_PGRAPH_CHROMA, + NV04_PGRAPH_BETA_AND, + NV04_PGRAPH_BETA_PREMULT, + NV04_PGRAPH_CONTROL0, + NV04_PGRAPH_CONTROL1, + NV04_PGRAPH_CONTROL2, + NV04_PGRAPH_BLEND, + NV04_PGRAPH_STORED_FMT, + NV04_PGRAPH_SOURCE_COLOR, + 0x00400560, + 0x00400568, + 0x00400564, + 0x0040056c, + 0x00400400, + 0x00400480, + 0x00400404, + 0x00400484, + 0x00400408, + 0x00400488, + 0x0040040c, + 0x0040048c, + 0x00400410, + 0x00400490, + 0x00400414, + 0x00400494, + 0x00400418, + 0x00400498, + 0x0040041c, + 0x0040049c, + 0x00400420, + 0x004004a0, + 0x00400424, + 0x004004a4, + 0x00400428, + 0x004004a8, + 0x0040042c, + 0x004004ac, + 0x00400430, + 0x004004b0, + 0x00400434, + 0x004004b4, + 0x00400438, + 0x004004b8, + 0x0040043c, + 0x004004bc, + 0x00400440, + 0x004004c0, + 0x00400444, + 0x004004c4, + 0x00400448, + 0x004004c8, + 0x0040044c, + 0x004004cc, + 0x00400450, + 0x004004d0, + 0x00400454, + 0x004004d4, + 0x00400458, + 0x004004d8, + 0x0040045c, + 0x004004dc, + 0x00400460, + 0x004004e0, + 0x00400464, + 0x004004e4, + 0x00400468, + 0x004004e8, + 0x0040046c, + 0x004004ec, + 0x00400470, + 0x004004f0, + 0x00400474, + 0x004004f4, + 0x00400478, + 0x004004f8, + 0x0040047c, + 0x004004fc, + 0x0040053c, + 0x00400544, + 0x00400540, + 0x00400548, + 0x00400560, + 0x00400568, + 0x00400564, + 0x0040056c, + 0x00400534, + 0x00400538, + 0x00400514, + 0x00400518, + 0x0040051c, + 0x00400520, + 0x00400524, + 0x00400528, + 0x0040052c, + 0x00400530, + 0x00400d00, + 0x00400d40, + 0x00400d80, + 0x00400d04, + 0x00400d44, + 0x00400d84, + 0x00400d08, + 0x00400d48, + 0x00400d88, + 0x00400d0c, + 0x00400d4c, + 0x00400d8c, + 0x00400d10, + 0x00400d50, + 0x00400d90, + 0x00400d14, + 0x00400d54, + 0x00400d94, + 0x00400d18, + 0x00400d58, + 0x00400d98, + 0x00400d1c, + 0x00400d5c, + 0x00400d9c, + 0x00400d20, + 0x00400d60, + 0x00400da0, + 0x00400d24, + 0x00400d64, + 0x00400da4, + 0x00400d28, + 0x00400d68, + 0x00400da8, + 0x00400d2c, + 0x00400d6c, + 0x00400dac, + 0x00400d30, + 0x00400d70, + 0x00400db0, + 0x00400d34, + 0x00400d74, + 0x00400db4, + 0x00400d38, + 0x00400d78, + 0x00400db8, + 0x00400d3c, + 0x00400d7c, + 0x00400dbc, + 0x00400590, + 0x00400594, + 0x00400598, + 0x0040059c, + 0x004005a8, + 0x004005ac, + 0x004005b0, + 0x004005b4, + 0x004005c0, + 0x004005c4, + 0x004005c8, + 0x004005cc, + 0x004005d0, + 0x004005d4, + 0x004005d8, + 0x004005dc, + 0x004005e0, + NV04_PGRAPH_PASSTHRU_0, + NV04_PGRAPH_PASSTHRU_1, + NV04_PGRAPH_PASSTHRU_2, + NV04_PGRAPH_DVD_COLORFMT, + NV04_PGRAPH_SCALED_FORMAT, + NV04_PGRAPH_MISC24_0, + NV04_PGRAPH_MISC24_1, + NV04_PGRAPH_MISC24_2, + 0x00400500, + 0x00400504, + NV04_PGRAPH_VALID1, + NV04_PGRAPH_VALID2 + + +}; + +struct graph_state { + int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; +}; + +struct nouveau_channel * +nv04_graph_channel(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chid = dev_priv->engine.fifo.channels; + + if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) + chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24; + + if (chid >= dev_priv->engine.fifo.channels) + return NULL; + + return dev_priv->fifos[chid]; +} + +void +nv04_graph_context_switch(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_channel *chan = NULL; + int chid; + + pgraph->fifo_access(dev, false); + nouveau_wait_for_idle(dev); + + /* If previous context is valid, we need to save it */ + pgraph->unload_context(dev); + + /* Load context for next channel */ + chid = dev_priv->engine.fifo.channel_id(dev); + chan = dev_priv->fifos[chid]; + if (chan) + nv04_graph_load_context(chan); + + pgraph->fifo_access(dev, true); +} + +int nv04_graph_create_context(struct nouveau_channel *chan) +{ + struct graph_state *pgraph_ctx; + NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id); + + chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), + GFP_KERNEL); + if (pgraph_ctx == NULL) + return -ENOMEM; + + /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */ + pgraph_ctx->nv04[0] = 0x0001ffff; + /* is it really needed ??? */ +#if 0 + dev_priv->fifos[channel].pgraph_ctx[1] = + nv_rd32(dev, NV_PGRAPH_DEBUG_4); + dev_priv->fifos[channel].pgraph_ctx[2] = + nv_rd32(dev, 0x004006b0); +#endif + return 0; +} + +void nv04_graph_destroy_context(struct nouveau_channel *chan) +{ + struct graph_state *pgraph_ctx = chan->pgraph_ctx; + + kfree(pgraph_ctx); + chan->pgraph_ctx = NULL; +} + +int nv04_graph_load_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct graph_state *pgraph_ctx = chan->pgraph_ctx; + uint32_t tmp; + int i; + + for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) + nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); + + nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); + nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24); + tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); + nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); + return 0; +} + +int +nv04_graph_unload_context(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_channel *chan = NULL; + struct graph_state *ctx; + uint32_t tmp; + int i; + + chan = pgraph->channel(dev); + if (!chan) + return 0; + ctx = chan->pgraph_ctx; + + for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) + ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]); + + nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000); + tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; + tmp |= (dev_priv->engine.fifo.channels - 1) << 24; + nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); + return 0; +} + +int nv04_graph_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t tmp; + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & + ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + + /* Enable PGRAPH interrupts */ + nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(dev, NV04_PGRAPH_VALID1, 0); + nv_wr32(dev, NV04_PGRAPH_VALID2, 0); + /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF); + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000); + /*1231C000 blob, 001 haiku*/ + //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ + nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100); + /*0x72111100 blob , 01 haiku*/ + /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ + nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071); + /*haiku same*/ + + /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ + nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); + /*haiku and blob 10d4*/ + + nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); + nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); + tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; + tmp |= dev_priv->engine.fifo.channels << 24; + nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); + + /* These don't belong here, they're part of a per-channel context */ + nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); + nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); + + return 0; +} + +void nv04_graph_takedown(struct drm_device *dev) +{ +} + +void +nv04_graph_fifo_access(struct drm_device *dev, bool enabled) +{ + if (enabled) + nv_wr32(dev, NV04_PGRAPH_FIFO, + nv_rd32(dev, NV04_PGRAPH_FIFO) | 1); + else + nv_wr32(dev, NV04_PGRAPH_FIFO, + nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1); +} + +static int +nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + chan->fence.last_sequence_irq = data; + nouveau_fence_handler(chan->dev, chan->id); + return 0; +} + +static int +nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + struct drm_device *dev = chan->dev; + uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff; + int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; + uint32_t tmp; + + tmp = nv_ri32(dev, instance); + tmp &= ~0x00038000; + tmp |= ((data & 7) << 15); + + nv_wi32(dev, instance, tmp); + nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); + nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp); + return 0; +} + +static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = { + { 0x0150, nv04_graph_mthd_set_ref }, + {} +}; + +static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = { + { 0x02fc, nv04_graph_mthd_set_operation }, + {}, +}; + +struct nouveau_pgraph_object_class nv04_graph_grclass[] = { + { 0x0039, false, nv04_graph_mthds_m2mf }, + { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ + { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ + { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ + { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */ + { 0x0030, false, NULL }, /* null */ + { 0x0042, false, NULL }, /* surf2d */ + { 0x0043, false, NULL }, /* rop */ + { 0x0012, false, NULL }, /* beta1 */ + { 0x0072, false, NULL }, /* beta4 */ + { 0x0019, false, NULL }, /* cliprect */ + { 0x0044, false, NULL }, /* pattern */ + { 0x0052, false, NULL }, /* swzsurf */ + { 0x0053, false, NULL }, /* surf3d */ + { 0x0054, false, NULL }, /* tex_tri */ + { 0x0055, false, NULL }, /* multitex_tri */ + {} +}; + diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c new file mode 100644 index 000000000000..a20c206625a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -0,0 +1,208 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +/* returns the size of fifo context */ +static int +nouveau_fifo_ctx_size(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset >= 0x40) + return 128; + else + if (dev_priv->chipset >= 0x17) + return 64; + + return 32; +} + +static void +nv04_instmem_determine_amount(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + /* Figure out how much instance memory we need */ + if (dev_priv->card_type >= NV_40) { + /* We'll want more instance memory than this on some NV4x cards. + * There's a 16MB aperture to play with that maps onto the end + * of vram. For now, only reserve a small piece until we know + * more about what each chipset requires. + */ + switch (dev_priv->chipset & 0xf0) { + case 0x40: + case 0x47: + case 0x49: + case 0x4b: + dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024); + break; + default: + dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024); + break; + } + } else { + /*XXX: what *are* the limits on ramin_rsvd_vram = (512 * 1024); + } + NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10); + + /* Clear all of it, except the BIOS image that's in the first 64KiB */ + dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) + nv_wi32(dev, i, 0x00000000); + dev_priv->engine.instmem.finish_access(dev); +} + +static void +nv04_instmem_configure_fixed_tables(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + + /* FIFO hash table (RAMHT) + * use 4k hash table at RAMIN+0x10000 + * TODO: extend the hash table + */ + dev_priv->ramht_offset = 0x10000; + dev_priv->ramht_bits = 9; + dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */ + dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */ + NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, + dev_priv->ramht_size); + + /* FIFO runout table (RAMRO) - 512k at 0x11200 */ + dev_priv->ramro_offset = 0x11200; + dev_priv->ramro_size = 512; + NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, + dev_priv->ramro_size); + + /* FIFO context table (RAMFC) + * NV40 : Not sure exactly how to position RAMFC on some cards, + * 0x30002 seems to position it at RAMIN+0x20000 on these + * cards. RAMFC is 4kb (32 fifos, 128byte entries). + * Others: Position RAMFC at RAMIN+0x11400 + */ + dev_priv->ramfc_size = engine->fifo.channels * + nouveau_fifo_ctx_size(dev); + switch (dev_priv->card_type) { + case NV_40: + dev_priv->ramfc_offset = 0x20000; + break; + case NV_30: + case NV_20: + case NV_10: + case NV_04: + default: + dev_priv->ramfc_offset = 0x11400; + break; + } + NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, + dev_priv->ramfc_size); +} + +int nv04_instmem_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t offset; + int ret = 0; + + nv04_instmem_determine_amount(dev); + nv04_instmem_configure_fixed_tables(dev); + + /* Create a heap to manage RAMIN allocations, we don't allocate + * the space that was reserved for RAMHT/FC/RO. + */ + offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; + + /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230 + * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0 + * ("new style" control) the upper 16-bits of 0x2220 points at this + * other mysterious table that's clobbering important things. + * + * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting + * smashed to pieces on us, so reserve 0x30000-0x40000 too.. + */ + if (dev_priv->card_type >= NV_40) { + if (offset < 0x40000) + offset = 0x40000; + } + + ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, + offset, dev_priv->ramin_rsvd_vram - offset); + if (ret) { + dev_priv->ramin_heap = NULL; + NV_ERROR(dev, "Failed to init RAMIN heap\n"); + } + + return ret; +} + +void +nv04_instmem_takedown(struct drm_device *dev) +{ +} + +int +nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) +{ + if (gpuobj->im_backing) + return -EINVAL; + + return 0; +} + +void +nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (gpuobj && gpuobj->im_backing) { + if (gpuobj->im_bound) + dev_priv->engine.instmem.unbind(dev, gpuobj); + gpuobj->im_backing = NULL; + } +} + +int +nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +{ + if (!gpuobj->im_pramin || gpuobj->im_bound) + return -EINVAL; + + gpuobj->im_bound = 1; + return 0; +} + +int +nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +{ + if (gpuobj->im_bound == 0) + return -EINVAL; + + gpuobj->im_bound = 0; + return 0; +} + +void +nv04_instmem_prepare_access(struct drm_device *dev, bool write) +{ +} + +void +nv04_instmem_finish_access(struct drm_device *dev) +{ +} + +int +nv04_instmem_suspend(struct drm_device *dev) +{ + return 0; +} + +void +nv04_instmem_resume(struct drm_device *dev) +{ +} + diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c new file mode 100644 index 000000000000..617ed1e05269 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_mc.c @@ -0,0 +1,20 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +int +nv04_mc_init(struct drm_device *dev) +{ + /* Power up everything, resetting each individual unit will + * be done later if needed. + */ + + nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); + return 0; +} + +void +nv04_mc_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c new file mode 100644 index 000000000000..1d09ddd57399 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_timer.c @@ -0,0 +1,51 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +int +nv04_timer_init(struct drm_device *dev) +{ + nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000); + nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF); + + /* Just use the pre-existing values when possible for now; these regs + * are not written in nv (driver writer missed a /4 on the address), and + * writing 8 and 3 to the correct regs breaks the timings on the LVDS + * hardware sequencing microcode. + * A correct solution (involving calculations with the GPU PLL) can + * be done when kernel modesetting lands + */ + if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) || + !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) { + nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008); + nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003); + } + + return 0; +} + +uint64_t +nv04_timer_read(struct drm_device *dev) +{ + uint32_t low; + /* From kmmio dumps on nv28 this looks like how the blob does this. + * It reads the high dword twice, before and after. + * The only explanation seems to be that the 64-bit timer counter + * advances between high and low dword reads and may corrupt the + * result. Not confirmed. + */ + uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1); + uint32_t high1; + do { + high1 = high2; + low = nv_rd32(dev, NV04_PTIMER_TIME_0); + high2 = nv_rd32(dev, NV04_PTIMER_TIME_1); + } while (high1 != high2); + return (((uint64_t)high2) << 32) | (uint64_t)low; +} + +void +nv04_timer_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c new file mode 100644 index 000000000000..9c63099e9c42 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -0,0 +1,305 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nouveau_hw.h" +#include "drm_crtc_helper.h" + +#include "i2c/ch7006.h" + +static struct { + struct i2c_board_info board_info; + struct drm_encoder_funcs funcs; + struct drm_encoder_helper_funcs hfuncs; + void *params; + +} nv04_tv_encoder_info[] = { + { + .board_info = { I2C_BOARD_INFO("ch7006", 0x75) }, + .params = &(struct ch7006_encoder_params) { + CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER, + 0, 0, 0, + CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED, + CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC + }, + }, +}; + +static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr) +{ + struct i2c_msg msg = { + .addr = addr, + .len = 0, + }; + + return i2c_transfer(adapter, &msg, 1) == 1; +} + +int nv04_tv_identify(struct drm_device *dev, int i2c_index) +{ + struct nouveau_i2c_chan *i2c; + bool was_locked; + int i, ret; + + NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index); + + i2c = nouveau_i2c_find(dev, i2c_index); + if (!i2c) + return -ENODEV; + + was_locked = NVLockVgaCrtcs(dev, false); + + for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) { + if (probe_i2c_addr(&i2c->adapter, + nv04_tv_encoder_info[i].board_info.addr)) { + ret = i; + break; + } + } + + if (i < ARRAY_SIZE(nv04_tv_encoder_info)) { + NV_TRACE(dev, "Detected TV encoder: %s\n", + nv04_tv_encoder_info[i].board_info.type); + + } else { + NV_TRACE(dev, "No TV encoders found.\n"); + i = -ENODEV; + } + + NVLockVgaCrtcs(dev, was_locked); + return i; +} + +#define PLLSEL_TV_CRTC1_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1) +#define PLLSEL_TV_CRTC2_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2) + +static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_mode_state *state = &dev_priv->mode_reg; + uint8_t crtc1A; + + NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); + + if (mode == DRM_MODE_DPMS_ON) { + int head = nouveau_crtc(encoder->crtc)->index; + crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX); + + state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK : + PLLSEL_TV_CRTC1_MASK; + + /* Inhibit hsync */ + crtc1A |= 0x80; + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A); + } + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); + + to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode); +} + +static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head]; + + state->tv_setup = 0; + + if (bind) { + state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0; + state->CRTC[NV_CIO_CRE_49] |= 0x10; + } else { + state->CRTC[NV_CIO_CRE_49] &= ~0x10; + } + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, + state->CRTC[NV_CIO_CRE_LCD__INDEX]); + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49, + state->CRTC[NV_CIO_CRE_49]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, + state->tv_setup); +} + +static void nv04_tv_prepare(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + int head = nouveau_crtc(encoder->crtc)->index; + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_disable(dev, head); + + if (nv_two_heads(dev)) + nv04_tv_bind(dev, head ^ 1, false); + + nv04_tv_bind(dev, head, true); +} + +static void nv04_tv_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; + + regp->tv_htotal = adjusted_mode->htotal; + regp->tv_vtotal = adjusted_mode->vtotal; + + /* These delay the TV signals with respect to the VGA port, + * they might be useful if we ever allow a CRTC to drive + * multiple outputs. + */ + regp->tv_hskew = 1; + regp->tv_hsync_delay = 1; + regp->tv_hsync_delay2 = 64; + regp->tv_vskew = 1; + regp->tv_vsync_delay = 1; + + to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode); +} + +static void nv04_tv_commit(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, + '@' + ffs(nv_encoder->dcb->or)); +} + +static void nv04_tv_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + to_encoder_slave(encoder)->slave_funcs->destroy(encoder); + + drm_encoder_cleanup(encoder); + + kfree(nv_encoder); +} + +int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) +{ + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct i2c_adapter *adap; + struct drm_encoder_funcs *funcs = NULL; + struct drm_encoder_helper_funcs *hfuncs = NULL; + struct drm_encoder_slave_funcs *sfuncs = NULL; + int i2c_index = entry->i2c_index; + int type, ret; + bool was_locked; + + /* Ensure that we can talk to this encoder */ + type = nv04_tv_identify(dev, i2c_index); + if (type < 0) + return type; + + /* Allocate the necessary memory */ + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + + /* Initialize the common members */ + encoder = to_drm_encoder(nv_encoder); + + funcs = &nv04_tv_encoder_info[type].funcs; + hfuncs = &nv04_tv_encoder_info[type].hfuncs; + + drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_helper_add(encoder, hfuncs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + /* Run the slave-specific initialization */ + adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter; + + was_locked = NVLockVgaCrtcs(dev, false); + + ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap, + &nv04_tv_encoder_info[type].board_info); + + NVLockVgaCrtcs(dev, was_locked); + + if (ret < 0) + goto fail; + + /* Fill the function pointers */ + sfuncs = to_encoder_slave(encoder)->slave_funcs; + + *funcs = (struct drm_encoder_funcs) { + .destroy = nv04_tv_destroy, + }; + + *hfuncs = (struct drm_encoder_helper_funcs) { + .dpms = nv04_tv_dpms, + .save = sfuncs->save, + .restore = sfuncs->restore, + .mode_fixup = sfuncs->mode_fixup, + .prepare = nv04_tv_prepare, + .commit = nv04_tv_commit, + .mode_set = nv04_tv_mode_set, + .detect = sfuncs->detect, + }; + + /* Set the slave encoder configuration */ + sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params); + + return 0; + +fail: + drm_encoder_cleanup(encoder); + + kfree(nv_encoder); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c new file mode 100644 index 000000000000..79e2d104d70a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_fb.c @@ -0,0 +1,24 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +int +nv10_fb_init(struct drm_device *dev) +{ + uint32_t fb_bar_size; + int i; + + fb_bar_size = drm_get_resource_len(dev, 0) - 1; + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { + nv_wr32(dev, NV10_PFB_TILE(i), 0); + nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); + } + + return 0; +} + +void +nv10_fb_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c new file mode 100644 index 000000000000..7aeabf262bc0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -0,0 +1,260 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) +#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) + +int +nv10_fifo_channel_id(struct drm_device *dev) +{ + return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & + NV10_PFIFO_CACHE1_PUSH1_CHID_MASK; +} + +int +nv10_fifo_create_context(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_device *dev = chan->dev; + uint32_t fc = NV10_RAMFC(chan->id); + int ret; + + ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, + NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); + if (ret) + return ret; + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wi32(dev, fc + 0, chan->pushbuf_base); + nv_wi32(dev, fc + 4, chan->pushbuf_base); + nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); + nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0); + dev_priv->engine.instmem.finish_access(dev); + + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); + return 0; +} + +void +nv10_fifo_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); + + nouveau_gpuobj_ref_del(dev, &chan->ramfc); +} + +static void +nv10_fifo_do_load_context(struct drm_device *dev, int chid) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV10_RAMFC(chid), tmp; + + dev_priv->engine.instmem.prepare_access(dev, false); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); + nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); + + tmp = nv_ri32(dev, fc + 12); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20)); + nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24)); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28)); + + if (dev_priv->chipset < 0x17) + goto out; + + nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32)); + tmp = nv_ri32(dev, fc + 36); + nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp); + nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40)); + nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44)); + nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48)); + +out: + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); +} + +int +nv10_fifo_load_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + uint32_t tmp; + + nv10_fifo_do_load_context(dev, chan->id); + + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, + NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); + + /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv10_fifo_unload_context(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + uint32_t fc, tmp; + int chid; + + chid = pfifo->channel_id(dev); + if (chid < 0 || chid >= dev_priv->engine.fifo.channels) + return 0; + fc = NV10_RAMFC(chid); + + dev_priv->engine.instmem.prepare_access(dev, true); + + nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); + nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; + tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); + nv_wi32(dev, fc + 12, tmp); + nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); + nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); + nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); + nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); + + if (dev_priv->chipset < 0x17) + goto out; + + nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); + tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); + nv_wi32(dev, fc + 36, tmp); + nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); + nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE)); + nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + +out: + dev_priv->engine.instmem.finish_access(dev); + + nv10_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); + return 0; +} + +static void +nv10_fifo_init_reset(struct drm_device *dev) +{ + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); + + nv_wr32(dev, 0x003224, 0x000f0078); + nv_wr32(dev, 0x002044, 0x0101ffff); + nv_wr32(dev, 0x002040, 0x000000ff); + nv_wr32(dev, 0x002500, 0x00000000); + nv_wr32(dev, 0x003000, 0x00000000); + nv_wr32(dev, 0x003050, 0x00000000); + + nv_wr32(dev, 0x003258, 0x00000000); + nv_wr32(dev, 0x003210, 0x00000000); + nv_wr32(dev, 0x003270, 0x00000000); +} + +static void +nv10_fifo_init_ramxx(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((dev_priv->ramht_bits - 9) << 16) | + (dev_priv->ramht_offset >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + + if (dev_priv->chipset < 0x17) { + nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); + } else { + nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) | + (1 << 16) /* 64 Bytes entry*/); + /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ + } +} + +static void +nv10_fifo_init_intr(struct drm_device *dev) +{ + nv_wr32(dev, 0x002100, 0xffffffff); + nv_wr32(dev, 0x002140, 0xffffffff); +} + +int +nv10_fifo_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + int i; + + nv10_fifo_init_reset(dev); + nv10_fifo_init_ramxx(dev); + + nv10_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); + + nv10_fifo_init_intr(dev); + pfifo->enable(dev); + pfifo->reassign(dev, true); + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + if (dev_priv->fifos[i]) { + uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); + nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c new file mode 100644 index 000000000000..6bf6804bb0ef --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -0,0 +1,892 @@ +/* + * Copyright 2007 Matthieu CASTET + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" + +#define NV10_FIFO_NUMBER 32 + +struct pipe_state { + uint32_t pipe_0x0000[0x040/4]; + uint32_t pipe_0x0040[0x010/4]; + uint32_t pipe_0x0200[0x0c0/4]; + uint32_t pipe_0x4400[0x080/4]; + uint32_t pipe_0x6400[0x3b0/4]; + uint32_t pipe_0x6800[0x2f0/4]; + uint32_t pipe_0x6c00[0x030/4]; + uint32_t pipe_0x7000[0x130/4]; + uint32_t pipe_0x7400[0x0c0/4]; + uint32_t pipe_0x7800[0x0c0/4]; +}; + +static int nv10_graph_ctx_regs[] = { + NV10_PGRAPH_CTX_SWITCH1, + NV10_PGRAPH_CTX_SWITCH2, + NV10_PGRAPH_CTX_SWITCH3, + NV10_PGRAPH_CTX_SWITCH4, + NV10_PGRAPH_CTX_SWITCH5, + NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */ + NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */ + NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */ + NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */ + NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */ + 0x00400164, + 0x00400184, + 0x004001a4, + 0x004001c4, + 0x004001e4, + 0x00400168, + 0x00400188, + 0x004001a8, + 0x004001c8, + 0x004001e8, + 0x0040016c, + 0x0040018c, + 0x004001ac, + 0x004001cc, + 0x004001ec, + 0x00400170, + 0x00400190, + 0x004001b0, + 0x004001d0, + 0x004001f0, + 0x00400174, + 0x00400194, + 0x004001b4, + 0x004001d4, + 0x004001f4, + 0x00400178, + 0x00400198, + 0x004001b8, + 0x004001d8, + 0x004001f8, + 0x0040017c, + 0x0040019c, + 0x004001bc, + 0x004001dc, + 0x004001fc, + NV10_PGRAPH_CTX_USER, + NV04_PGRAPH_DMA_START_0, + NV04_PGRAPH_DMA_START_1, + NV04_PGRAPH_DMA_LENGTH, + NV04_PGRAPH_DMA_MISC, + NV10_PGRAPH_DMA_PITCH, + NV04_PGRAPH_BOFFSET0, + NV04_PGRAPH_BBASE0, + NV04_PGRAPH_BLIMIT0, + NV04_PGRAPH_BOFFSET1, + NV04_PGRAPH_BBASE1, + NV04_PGRAPH_BLIMIT1, + NV04_PGRAPH_BOFFSET2, + NV04_PGRAPH_BBASE2, + NV04_PGRAPH_BLIMIT2, + NV04_PGRAPH_BOFFSET3, + NV04_PGRAPH_BBASE3, + NV04_PGRAPH_BLIMIT3, + NV04_PGRAPH_BOFFSET4, + NV04_PGRAPH_BBASE4, + NV04_PGRAPH_BLIMIT4, + NV04_PGRAPH_BOFFSET5, + NV04_PGRAPH_BBASE5, + NV04_PGRAPH_BLIMIT5, + NV04_PGRAPH_BPITCH0, + NV04_PGRAPH_BPITCH1, + NV04_PGRAPH_BPITCH2, + NV04_PGRAPH_BPITCH3, + NV04_PGRAPH_BPITCH4, + NV10_PGRAPH_SURFACE, + NV10_PGRAPH_STATE, + NV04_PGRAPH_BSWIZZLE2, + NV04_PGRAPH_BSWIZZLE5, + NV04_PGRAPH_BPIXEL, + NV10_PGRAPH_NOTIFY, + NV04_PGRAPH_PATT_COLOR0, + NV04_PGRAPH_PATT_COLOR1, + NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */ + 0x00400904, + 0x00400908, + 0x0040090c, + 0x00400910, + 0x00400914, + 0x00400918, + 0x0040091c, + 0x00400920, + 0x00400924, + 0x00400928, + 0x0040092c, + 0x00400930, + 0x00400934, + 0x00400938, + 0x0040093c, + 0x00400940, + 0x00400944, + 0x00400948, + 0x0040094c, + 0x00400950, + 0x00400954, + 0x00400958, + 0x0040095c, + 0x00400960, + 0x00400964, + 0x00400968, + 0x0040096c, + 0x00400970, + 0x00400974, + 0x00400978, + 0x0040097c, + 0x00400980, + 0x00400984, + 0x00400988, + 0x0040098c, + 0x00400990, + 0x00400994, + 0x00400998, + 0x0040099c, + 0x004009a0, + 0x004009a4, + 0x004009a8, + 0x004009ac, + 0x004009b0, + 0x004009b4, + 0x004009b8, + 0x004009bc, + 0x004009c0, + 0x004009c4, + 0x004009c8, + 0x004009cc, + 0x004009d0, + 0x004009d4, + 0x004009d8, + 0x004009dc, + 0x004009e0, + 0x004009e4, + 0x004009e8, + 0x004009ec, + 0x004009f0, + 0x004009f4, + 0x004009f8, + 0x004009fc, + NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */ + 0x0040080c, + NV04_PGRAPH_PATTERN_SHAPE, + NV03_PGRAPH_MONO_COLOR0, + NV04_PGRAPH_ROP3, + NV04_PGRAPH_CHROMA, + NV04_PGRAPH_BETA_AND, + NV04_PGRAPH_BETA_PREMULT, + 0x00400e70, + 0x00400e74, + 0x00400e78, + 0x00400e7c, + 0x00400e80, + 0x00400e84, + 0x00400e88, + 0x00400e8c, + 0x00400ea0, + 0x00400ea4, + 0x00400ea8, + 0x00400e90, + 0x00400e94, + 0x00400e98, + 0x00400e9c, + NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */ + NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */ + 0x00400f04, + 0x00400f24, + 0x00400f08, + 0x00400f28, + 0x00400f0c, + 0x00400f2c, + 0x00400f10, + 0x00400f30, + 0x00400f14, + 0x00400f34, + 0x00400f18, + 0x00400f38, + 0x00400f1c, + 0x00400f3c, + NV10_PGRAPH_XFMODE0, + NV10_PGRAPH_XFMODE1, + NV10_PGRAPH_GLOBALSTATE0, + NV10_PGRAPH_GLOBALSTATE1, + NV04_PGRAPH_STORED_FMT, + NV04_PGRAPH_SOURCE_COLOR, + NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */ + NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */ + 0x00400404, + 0x00400484, + 0x00400408, + 0x00400488, + 0x0040040c, + 0x0040048c, + 0x00400410, + 0x00400490, + 0x00400414, + 0x00400494, + 0x00400418, + 0x00400498, + 0x0040041c, + 0x0040049c, + 0x00400420, + 0x004004a0, + 0x00400424, + 0x004004a4, + 0x00400428, + 0x004004a8, + 0x0040042c, + 0x004004ac, + 0x00400430, + 0x004004b0, + 0x00400434, + 0x004004b4, + 0x00400438, + 0x004004b8, + 0x0040043c, + 0x004004bc, + 0x00400440, + 0x004004c0, + 0x00400444, + 0x004004c4, + 0x00400448, + 0x004004c8, + 0x0040044c, + 0x004004cc, + 0x00400450, + 0x004004d0, + 0x00400454, + 0x004004d4, + 0x00400458, + 0x004004d8, + 0x0040045c, + 0x004004dc, + 0x00400460, + 0x004004e0, + 0x00400464, + 0x004004e4, + 0x00400468, + 0x004004e8, + 0x0040046c, + 0x004004ec, + 0x00400470, + 0x004004f0, + 0x00400474, + 0x004004f4, + 0x00400478, + 0x004004f8, + 0x0040047c, + 0x004004fc, + NV03_PGRAPH_ABS_UCLIP_XMIN, + NV03_PGRAPH_ABS_UCLIP_XMAX, + NV03_PGRAPH_ABS_UCLIP_YMIN, + NV03_PGRAPH_ABS_UCLIP_YMAX, + 0x00400550, + 0x00400558, + 0x00400554, + 0x0040055c, + NV03_PGRAPH_ABS_UCLIPA_XMIN, + NV03_PGRAPH_ABS_UCLIPA_XMAX, + NV03_PGRAPH_ABS_UCLIPA_YMIN, + NV03_PGRAPH_ABS_UCLIPA_YMAX, + NV03_PGRAPH_ABS_ICLIP_XMAX, + NV03_PGRAPH_ABS_ICLIP_YMAX, + NV03_PGRAPH_XY_LOGIC_MISC0, + NV03_PGRAPH_XY_LOGIC_MISC1, + NV03_PGRAPH_XY_LOGIC_MISC2, + NV03_PGRAPH_XY_LOGIC_MISC3, + NV03_PGRAPH_CLIPX_0, + NV03_PGRAPH_CLIPX_1, + NV03_PGRAPH_CLIPY_0, + NV03_PGRAPH_CLIPY_1, + NV10_PGRAPH_COMBINER0_IN_ALPHA, + NV10_PGRAPH_COMBINER1_IN_ALPHA, + NV10_PGRAPH_COMBINER0_IN_RGB, + NV10_PGRAPH_COMBINER1_IN_RGB, + NV10_PGRAPH_COMBINER_COLOR0, + NV10_PGRAPH_COMBINER_COLOR1, + NV10_PGRAPH_COMBINER0_OUT_ALPHA, + NV10_PGRAPH_COMBINER1_OUT_ALPHA, + NV10_PGRAPH_COMBINER0_OUT_RGB, + NV10_PGRAPH_COMBINER1_OUT_RGB, + NV10_PGRAPH_COMBINER_FINAL0, + NV10_PGRAPH_COMBINER_FINAL1, + 0x00400e00, + 0x00400e04, + 0x00400e08, + 0x00400e0c, + 0x00400e10, + 0x00400e14, + 0x00400e18, + 0x00400e1c, + 0x00400e20, + 0x00400e24, + 0x00400e28, + 0x00400e2c, + 0x00400e30, + 0x00400e34, + 0x00400e38, + 0x00400e3c, + NV04_PGRAPH_PASSTHRU_0, + NV04_PGRAPH_PASSTHRU_1, + NV04_PGRAPH_PASSTHRU_2, + NV10_PGRAPH_DIMX_TEXTURE, + NV10_PGRAPH_WDIMX_TEXTURE, + NV10_PGRAPH_DVD_COLORFMT, + NV10_PGRAPH_SCALED_FORMAT, + NV04_PGRAPH_MISC24_0, + NV04_PGRAPH_MISC24_1, + NV04_PGRAPH_MISC24_2, + NV03_PGRAPH_X_MISC, + NV03_PGRAPH_Y_MISC, + NV04_PGRAPH_VALID1, + NV04_PGRAPH_VALID2, +}; + +static int nv17_graph_ctx_regs[] = { + NV10_PGRAPH_DEBUG_4, + 0x004006b0, + 0x00400eac, + 0x00400eb0, + 0x00400eb4, + 0x00400eb8, + 0x00400ebc, + 0x00400ec0, + 0x00400ec4, + 0x00400ec8, + 0x00400ecc, + 0x00400ed0, + 0x00400ed4, + 0x00400ed8, + 0x00400edc, + 0x00400ee0, + 0x00400a00, + 0x00400a04, +}; + +struct graph_state { + int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; + int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; + struct pipe_state pipe_state; +}; + +static void nv10_graph_save_pipe(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct graph_state *pgraph_ctx = chan->pgraph_ctx; + struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; + int i; +#define PIPE_SAVE(addr) \ + do { \ + nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \ + fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \ + } while (0) + + PIPE_SAVE(0x4400); + PIPE_SAVE(0x0200); + PIPE_SAVE(0x6400); + PIPE_SAVE(0x6800); + PIPE_SAVE(0x6c00); + PIPE_SAVE(0x7000); + PIPE_SAVE(0x7400); + PIPE_SAVE(0x7800); + PIPE_SAVE(0x0040); + PIPE_SAVE(0x0000); + +#undef PIPE_SAVE +} + +static void nv10_graph_load_pipe(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct graph_state *pgraph_ctx = chan->pgraph_ctx; + struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; + int i; + uint32_t xfmode0, xfmode1; +#define PIPE_RESTORE(addr) \ + do { \ + nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \ + nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ + } while (0) + + + nouveau_wait_for_idle(dev); + /* XXX check haiku comments */ + xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0); + xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1); + nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000); + nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000); + nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); + for (i = 0; i < 4; i++) + nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); + for (i = 0; i < 4; i++) + nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); + + nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); + for (i = 0; i < 3; i++) + nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); + + nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); + for (i = 0; i < 3; i++) + nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); + + nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); + nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); + + + PIPE_RESTORE(0x0200); + nouveau_wait_for_idle(dev); + + /* restore XFMODE */ + nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); + nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); + PIPE_RESTORE(0x6400); + PIPE_RESTORE(0x6800); + PIPE_RESTORE(0x6c00); + PIPE_RESTORE(0x7000); + PIPE_RESTORE(0x7400); + PIPE_RESTORE(0x7800); + PIPE_RESTORE(0x4400); + PIPE_RESTORE(0x0000); + PIPE_RESTORE(0x0040); + nouveau_wait_for_idle(dev); + +#undef PIPE_RESTORE +} + +static void nv10_graph_create_pipe(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct graph_state *pgraph_ctx = chan->pgraph_ctx; + struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; + uint32_t *fifo_pipe_state_addr; + int i; +#define PIPE_INIT(addr) \ + do { \ + fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ + } while (0) +#define PIPE_INIT_END(addr) \ + do { \ + uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \ + ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \ + if (fifo_pipe_state_addr != __end_addr) \ + NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \ + addr, fifo_pipe_state_addr, __end_addr); \ + } while (0) +#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value + + PIPE_INIT(0x0200); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0200); + + PIPE_INIT(0x6400); + for (i = 0; i < 211; i++) + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + PIPE_INIT_END(0x6400); + + PIPE_INIT(0x6800); + for (i = 0; i < 162; i++) + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + for (i = 0; i < 25; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6800); + + PIPE_INIT(0x6c00); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0xbf800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6c00); + + PIPE_INIT(0x7000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + for (i = 0; i < 35; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7000); + + PIPE_INIT(0x7400); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7400); + + PIPE_INIT(0x7800); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7800); + + PIPE_INIT(0x4400); + for (i = 0; i < 32; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x4400); + + PIPE_INIT(0x0000); + for (i = 0; i < 16; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0000); + + PIPE_INIT(0x0040); + for (i = 0; i < 4; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0040); + +#undef PIPE_INIT +#undef PIPE_INIT_END +#undef NV_WRITE_PIPE_INIT +} + +static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) { + if (nv10_graph_ctx_regs[i] == reg) + return i; + } + NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg); + return -1; +} + +static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) { + if (nv17_graph_ctx_regs[i] == reg) + return i; + } + NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg); + return -1; +} + +int nv10_graph_load_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct graph_state *pgraph_ctx = chan->pgraph_ctx; + uint32_t tmp; + int i; + + for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) + nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); + if (dev_priv->chipset >= 0x17) { + for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) + nv_wr32(dev, nv17_graph_ctx_regs[i], + pgraph_ctx->nv17[i]); + } + + nv10_graph_load_pipe(chan); + + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); + tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER); + nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24); + tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2); + nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff); + return 0; +} + +int +nv10_graph_unload_context(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_channel *chan; + struct graph_state *ctx; + uint32_t tmp; + int i; + + chan = pgraph->channel(dev); + if (!chan) + return 0; + ctx = chan->pgraph_ctx; + + for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) + ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]); + + if (dev_priv->chipset >= 0x17) { + for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) + ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]); + } + + nv10_graph_save_pipe(chan); + + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); + tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; + tmp |= (pfifo->channels - 1) << 24; + nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); + return 0; +} + +void +nv10_graph_context_switch(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_channel *chan = NULL; + int chid; + + pgraph->fifo_access(dev, false); + nouveau_wait_for_idle(dev); + + /* If previous context is valid, we need to save it */ + nv10_graph_unload_context(dev); + + /* Load context for next channel */ + chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; + chan = dev_priv->fifos[chid]; + if (chan) + nv10_graph_load_context(chan); + + pgraph->fifo_access(dev, true); +} + +#define NV_WRITE_CTX(reg, val) do { \ + int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ + if (offset > 0) \ + pgraph_ctx->nv10[offset] = val; \ + } while (0) + +#define NV17_WRITE_CTX(reg, val) do { \ + int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ + if (offset > 0) \ + pgraph_ctx->nv17[offset] = val; \ + } while (0) + +struct nouveau_channel * +nv10_graph_channel(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int chid = dev_priv->engine.fifo.channels; + + if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000) + chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24; + + if (chid >= dev_priv->engine.fifo.channels) + return NULL; + + return dev_priv->fifos[chid]; +} + +int nv10_graph_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct graph_state *pgraph_ctx; + + NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id); + + chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), + GFP_KERNEL); + if (pgraph_ctx == NULL) + return -ENOMEM; + + + NV_WRITE_CTX(0x00400e88, 0x08000000); + NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); + NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); + NV_WRITE_CTX(0x00400e10, 0x00001000); + NV_WRITE_CTX(0x00400e14, 0x00001000); + NV_WRITE_CTX(0x00400e30, 0x00080008); + NV_WRITE_CTX(0x00400e34, 0x00080008); + if (dev_priv->chipset >= 0x17) { + /* is it really needed ??? */ + NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, + nv_rd32(dev, NV10_PGRAPH_DEBUG_4)); + NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0)); + NV17_WRITE_CTX(0x00400eac, 0x0fff0000); + NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); + NV17_WRITE_CTX(0x00400ec0, 0x00000080); + NV17_WRITE_CTX(0x00400ed0, 0x00000080); + } + NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); + + nv10_graph_create_pipe(chan); + return 0; +} + +void nv10_graph_destroy_context(struct nouveau_channel *chan) +{ + struct graph_state *pgraph_ctx = chan->pgraph_ctx; + + kfree(pgraph_ctx); + chan->pgraph_ctx = NULL; +} + +int nv10_graph_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t tmp; + int i; + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & + ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700); + /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */ + nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9); + nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | + (1<<29) | + (1<<31)); + if (dev_priv->chipset >= 0x17) { + nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000); + nv_wr32(dev, 0x004006b0, 0x40000020); + } else + nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); + + /* copy tile info from PFB */ + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { + nv_wr32(dev, NV10_PGRAPH_TILE(i), + nv_rd32(dev, NV10_PFB_TILE(i))); + nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), + nv_rd32(dev, NV10_PFB_TLIMIT(i))); + nv_wr32(dev, NV10_PGRAPH_TSIZE(i), + nv_rd32(dev, NV10_PFB_TSIZE(i))); + nv_wr32(dev, NV10_PGRAPH_TSTATUS(i), + nv_rd32(dev, NV10_PFB_TSTATUS(i))); + } + + nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000); + nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000); + nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000); + nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000); + nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); + + tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; + tmp |= (dev_priv->engine.fifo.channels - 1) << 24; + nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); + nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); + + return 0; +} + +void nv10_graph_takedown(struct drm_device *dev) +{ +} + +struct nouveau_pgraph_object_class nv10_graph_grclass[] = { + { 0x0030, false, NULL }, /* null */ + { 0x0039, false, NULL }, /* m2mf */ + { 0x004a, false, NULL }, /* gdirect */ + { 0x005f, false, NULL }, /* imageblit */ + { 0x009f, false, NULL }, /* imageblit (nv12) */ + { 0x008a, false, NULL }, /* ifc */ + { 0x0089, false, NULL }, /* sifm */ + { 0x0062, false, NULL }, /* surf2d */ + { 0x0043, false, NULL }, /* rop */ + { 0x0012, false, NULL }, /* beta1 */ + { 0x0072, false, NULL }, /* beta4 */ + { 0x0019, false, NULL }, /* cliprect */ + { 0x0044, false, NULL }, /* pattern */ + { 0x0052, false, NULL }, /* swzsurf */ + { 0x0093, false, NULL }, /* surf3d */ + { 0x0094, false, NULL }, /* tex_tri */ + { 0x0095, false, NULL }, /* multitex_tri */ + { 0x0056, false, NULL }, /* celcius (nv10) */ + { 0x0096, false, NULL }, /* celcius (nv11) */ + { 0x0099, false, NULL }, /* celcius (nv17) */ + {} +}; diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c new file mode 100644 index 000000000000..2e58c331e9b7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv17_gpio.c @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" + +static bool +get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift, + uint32_t *mask) +{ + if (ent->line < 2) { + *reg = NV_PCRTC_GPIO; + *shift = ent->line * 16; + *mask = 0x11; + + } else if (ent->line < 10) { + *reg = NV_PCRTC_GPIO_EXT; + *shift = (ent->line - 2) * 4; + *mask = 0x3; + + } else if (ent->line < 14) { + *reg = NV_PCRTC_850; + *shift = (ent->line - 10) * 4; + *mask = 0x3; + + } else { + return false; + } + + return true; +} + +int +nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) +{ + struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); + uint32_t reg, shift, mask, value; + + if (!ent) + return -ENODEV; + + if (!get_gpio_location(ent, ®, &shift, &mask)) + return -ENODEV; + + value = NVReadCRTC(dev, 0, reg) >> shift; + + return (ent->invert ? 1 : 0) ^ (value & 1); +} + +int +nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) +{ + struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); + uint32_t reg, shift, mask, value; + + if (!ent) + return -ENODEV; + + if (!get_gpio_location(ent, ®, &shift, &mask)) + return -ENODEV; + + value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift; + mask = ~(mask << shift); + + NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask)); + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c new file mode 100644 index 000000000000..46cfd9c60478 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -0,0 +1,681 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" +#include "nouveau_drv.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nouveau_hw.h" +#include "nv17_tv.h" + +enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, + struct drm_connector *connector, + uint32_t pin_mask) +{ + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + + tv_enc->pin_mask = pin_mask >> 28 & 0xe; + + switch (tv_enc->pin_mask) { + case 0x2: + case 0x4: + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite; + break; + case 0xc: + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; + break; + case 0xe: + if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output) + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; + else + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; + break; + default: + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + break; + } + + drm_connector_property_set_value(connector, + encoder->dev->mode_config.tv_subconnector_property, + tv_enc->subconnector); + + return tv_enc->subconnector ? connector_status_connected : + connector_status_disconnected; +} + +static const struct { + int hdisplay; + int vdisplay; +} modes[] = { + { 640, 400 }, + { 640, 480 }, + { 720, 480 }, + { 720, 576 }, + { 800, 600 }, + { 1024, 768 }, + { 1280, 720 }, + { 1280, 1024 }, + { 1920, 1080 } +}; + +static int nv17_tv_get_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + struct drm_display_mode *mode; + struct drm_display_mode *output_mode; + int n = 0; + int i; + + if (tv_norm->kind != CTV_ENC_MODE) { + struct drm_display_mode *tv_mode; + + for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { + mode = drm_mode_duplicate(encoder->dev, tv_mode); + + mode->clock = tv_norm->tv_enc_mode.vrefresh * + mode->htotal / 1000 * + mode->vtotal / 1000; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + mode->clock *= 2; + + if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && + mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + n++; + } + return n; + } + + /* tv_norm->kind == CTV_ENC_MODE */ + output_mode = &tv_norm->ctv_enc_mode.mode; + for (i = 0; i < ARRAY_SIZE(modes); i++) { + if (modes[i].hdisplay > output_mode->hdisplay || + modes[i].vdisplay > output_mode->vdisplay) + continue; + + if (modes[i].hdisplay == output_mode->hdisplay && + modes[i].vdisplay == output_mode->vdisplay) { + mode = drm_mode_duplicate(encoder->dev, output_mode); + mode->type |= DRM_MODE_TYPE_PREFERRED; + } else { + mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay, + modes[i].vdisplay, 60, false, + output_mode->flags & DRM_MODE_FLAG_INTERLACE, + false); + } + + /* CVT modes are sometimes unsuitable... */ + if (output_mode->hdisplay <= 720 + || output_mode->hdisplay >= 1920) { + mode->htotal = output_mode->htotal; + mode->hsync_start = (mode->hdisplay + (mode->htotal + - mode->hdisplay) * 9 / 10) & ~7; + mode->hsync_end = mode->hsync_start + 8; + } + if (output_mode->vdisplay >= 1024) { + mode->vtotal = output_mode->vtotal; + mode->vsync_start = output_mode->vsync_start; + mode->vsync_end = output_mode->vsync_end; + } + + mode->type |= DRM_MODE_TYPE_DRIVER; + drm_mode_probed_add(connector, mode); + n++; + } + return n; +} + +static int nv17_tv_mode_valid(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (tv_norm->kind == CTV_ENC_MODE) { + struct drm_display_mode *output_mode = + &tv_norm->ctv_enc_mode.mode; + + if (mode->clock > 400000) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > output_mode->hdisplay || + mode->vdisplay > output_mode->vdisplay) + return MODE_BAD; + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) != + (output_mode->flags & DRM_MODE_FLAG_INTERLACE)) + return MODE_NO_INTERLACE; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + } else { + const int vsync_tolerance = 600; + + if (mode->clock > 70000) + return MODE_CLOCK_HIGH; + + if (abs(drm_mode_vrefresh(mode) * 1000 - + tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance) + return MODE_VSYNC; + + /* The encoder takes care of the actual interlacing */ + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + } + + return MODE_OK; +} + +static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (tv_norm->kind == CTV_ENC_MODE) + adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock; + else + adjusted_mode->clock = 90000; + + return true; +} + +static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (nouveau_encoder(encoder)->last_dpms == mode) + return; + nouveau_encoder(encoder)->last_dpms = mode; + + NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n", + mode, nouveau_encoder(encoder)->dcb->index); + + regs->ptv_200 &= ~1; + + if (tv_norm->kind == CTV_ENC_MODE) { + nv04_dfp_update_fp_control(encoder, mode); + + } else { + nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF); + + if (mode == DRM_MODE_DPMS_ON) + regs->ptv_200 |= 1; + } + + nv_load_ptv(dev, regs, 200); + + nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); + nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); + + nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); +} + +static void nv17_tv_prepare(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + int head = nouveau_crtc(encoder->crtc)->index; + uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[ + NV_CIO_CRE_LCD__INDEX]; + uint32_t dacclk_off = NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder); + uint32_t dacclk; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_disable(dev, head); + + /* Unbind any FP encoders from this head if we need the FP + * stuff enabled. */ + if (tv_norm->kind == CTV_ENC_MODE) { + struct drm_encoder *enc; + + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { + struct dcb_entry *dcb = nouveau_encoder(enc)->dcb; + + if ((dcb->type == OUTPUT_TMDS || + dcb->type == OUTPUT_LVDS) && + !enc->crtc && + nv04_dfp_get_bound_head(dev, dcb) == head) { + nv04_dfp_bind_head(dev, dcb, head ^ 1, + dev_priv->VBIOS.fp.dual_link); + } + } + + } + + /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) + * at LCD__INDEX which we don't alter + */ + if (!(*cr_lcd & 0x44)) { + if (tv_norm->kind == CTV_ENC_MODE) + *cr_lcd = 0x1 | (head ? 0x0 : 0x8); + else + *cr_lcd = 0; + } + + /* Set the DACCLK register */ + dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; + + if (dev_priv->card_type == NV_40) + dacclk |= 0x1a << 16; + + if (tv_norm->kind == CTV_ENC_MODE) { + dacclk |= 0x20; + + if (head) + dacclk |= 0x100; + else + dacclk &= ~0x100; + + } else { + dacclk |= 0x10; + + } + + NVWriteRAMDAC(dev, 0, dacclk_off, dacclk); +} + +static void nv17_tv_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *drm_mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int head = nouveau_crtc(encoder->crtc)->index; + struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; + struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + int i; + + regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */ + regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */ + regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */ + regs->tv_setup = 1; + regs->ramdac_8c0 = 0x0; + + if (tv_norm->kind == TV_ENC_MODE) { + tv_regs->ptv_200 = 0x13111100; + if (head) + tv_regs->ptv_200 |= 0x10; + + tv_regs->ptv_20c = 0x808010; + tv_regs->ptv_304 = 0x2d00000; + tv_regs->ptv_600 = 0x0; + tv_regs->ptv_60c = 0x0; + tv_regs->ptv_610 = 0x1e00000; + + if (tv_norm->tv_enc_mode.vdisplay == 576) { + tv_regs->ptv_508 = 0x1200000; + tv_regs->ptv_614 = 0x33; + + } else if (tv_norm->tv_enc_mode.vdisplay == 480) { + tv_regs->ptv_508 = 0xf00000; + tv_regs->ptv_614 = 0x13; + } + + if (dev_priv->card_type >= NV_30) { + tv_regs->ptv_500 = 0xe8e0; + tv_regs->ptv_504 = 0x1710; + tv_regs->ptv_604 = 0x0; + tv_regs->ptv_608 = 0x0; + } else { + if (tv_norm->tv_enc_mode.vdisplay == 576) { + tv_regs->ptv_604 = 0x20; + tv_regs->ptv_608 = 0x10; + tv_regs->ptv_500 = 0x19710; + tv_regs->ptv_504 = 0x68f0; + + } else if (tv_norm->tv_enc_mode.vdisplay == 480) { + tv_regs->ptv_604 = 0x10; + tv_regs->ptv_608 = 0x20; + tv_regs->ptv_500 = 0x4b90; + tv_regs->ptv_504 = 0x1b480; + } + } + + for (i = 0; i < 0x40; i++) + tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i]; + + } else { + struct drm_display_mode *output_mode = + &tv_norm->ctv_enc_mode.mode; + + /* The registers in PRAMDAC+0xc00 control some timings and CSC + * parameters for the CTV encoder (It's only used for "HD" TV + * modes, I don't think I have enough working to guess what + * they exactly mean...), it's probably connected at the + * output of the FP encoder, but it also needs the analog + * encoder in its OR enabled and routed to the head it's + * using. It's enabled with the DACCLK register, bits [5:4]. + */ + for (i = 0; i < 38; i++) + regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i]; + + regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1; + regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; + regs->fp_horiz_regs[FP_SYNC_START] = + output_mode->hsync_start - 1; + regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; + regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay + + max((output_mode->hdisplay-600)/40 - 1, 1); + + regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1; + regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1; + regs->fp_vert_regs[FP_SYNC_START] = + output_mode->vsync_start - 1; + regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1; + regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1; + + regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | + NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | + NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; + + if (output_mode->flags & DRM_MODE_FLAG_PVSYNC) + regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS; + if (output_mode->flags & DRM_MODE_FLAG_PHSYNC) + regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS; + + regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR | + NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR | + NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED | + NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE | + NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE; + + regs->fp_debug_2 = 0; + + regs->fp_margin_color = 0x801080; + + } +} + +static void nv17_tv_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + if (get_tv_norm(encoder)->kind == TV_ENC_MODE) { + nv17_tv_update_rescaler(encoder); + nv17_tv_update_properties(encoder); + } else { + nv17_ctv_update_rescaler(encoder); + } + + nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); + + /* This could use refinement for flatpanels, but it should work */ + if (dev_priv->chipset < 0x44) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + + nv04_dac_output_offset(encoder), + 0xf0000000); + else + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + + nv04_dac_output_offset(encoder), + 0x00100000); + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name( + &nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +} + +static void nv17_tv_save(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + + nouveau_encoder(encoder)->restore.output = + NVReadRAMDAC(dev, 0, + NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder)); + + nv17_tv_state_save(dev, &tv_enc->saved_state); + + tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200; +} + +static void nv17_tv_restore(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder), + nouveau_encoder(encoder)->restore.output); + + nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); +} + +static int nv17_tv_create_resources(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct drm_mode_config *conf = &dev->mode_config; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; + int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS : + NUM_LD_TV_NORMS; + int i; + + if (nouveau_tv_norm) { + for (i = 0; i < num_tv_norms; i++) { + if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) { + tv_enc->tv_norm = i; + break; + } + } + + if (i == num_tv_norms) + NV_WARN(dev, "Invalid TV norm setting \"%s\"\n", + nouveau_tv_norm); + } + + drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); + + drm_connector_attach_property(connector, + conf->tv_select_subconnector_property, + tv_enc->select_subconnector); + drm_connector_attach_property(connector, + conf->tv_subconnector_property, + tv_enc->subconnector); + drm_connector_attach_property(connector, + conf->tv_mode_property, + tv_enc->tv_norm); + drm_connector_attach_property(connector, + conf->tv_flicker_reduction_property, + tv_enc->flicker); + drm_connector_attach_property(connector, + conf->tv_saturation_property, + tv_enc->saturation); + drm_connector_attach_property(connector, + conf->tv_hue_property, + tv_enc->hue); + drm_connector_attach_property(connector, + conf->tv_overscan_property, + tv_enc->overscan); + + return 0; +} + +static int nv17_tv_set_property(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct drm_mode_config *conf = &encoder->dev->mode_config; + struct drm_crtc *crtc = encoder->crtc; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + bool modes_changed = false; + + if (property == conf->tv_overscan_property) { + tv_enc->overscan = val; + if (encoder->crtc) { + if (tv_norm->kind == CTV_ENC_MODE) + nv17_ctv_update_rescaler(encoder); + else + nv17_tv_update_rescaler(encoder); + } + + } else if (property == conf->tv_saturation_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->saturation = val; + nv17_tv_update_properties(encoder); + + } else if (property == conf->tv_hue_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->hue = val; + nv17_tv_update_properties(encoder); + + } else if (property == conf->tv_flicker_reduction_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->flicker = val; + if (encoder->crtc) + nv17_tv_update_rescaler(encoder); + + } else if (property == conf->tv_mode_property) { + if (connector->dpms != DRM_MODE_DPMS_OFF) + return -EINVAL; + + tv_enc->tv_norm = val; + + modes_changed = true; + + } else if (property == conf->tv_select_subconnector_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->select_subconnector = val; + nv17_tv_update_properties(encoder); + + } else { + return -EINVAL; + } + + if (modes_changed) { + drm_helper_probe_single_connector_modes(connector, 0, 0); + + /* Disable the crtc to ensure a full modeset is + * performed whenever it's turned on again. */ + if (crtc) { + struct drm_mode_set modeset = { + .crtc = crtc, + }; + + crtc->funcs->set_config(&modeset); + } + } + + return 0; +} + +static void nv17_tv_destroy(struct drm_encoder *encoder) +{ + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + + NV_DEBUG(encoder->dev, "\n"); + + drm_encoder_cleanup(encoder); + kfree(tv_enc); +} + +static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { + .dpms = nv17_tv_dpms, + .save = nv17_tv_save, + .restore = nv17_tv_restore, + .mode_fixup = nv17_tv_mode_fixup, + .prepare = nv17_tv_prepare, + .commit = nv17_tv_commit, + .mode_set = nv17_tv_mode_set, + .detect = nv17_dac_detect, +}; + +static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { + .get_modes = nv17_tv_get_modes, + .mode_valid = nv17_tv_mode_valid, + .create_resources = nv17_tv_create_resources, + .set_property = nv17_tv_set_property, +}; + +static struct drm_encoder_funcs nv17_tv_funcs = { + .destroy = nv17_tv_destroy, +}; + +int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) +{ + struct drm_encoder *encoder; + struct nv17_tv_encoder *tv_enc = NULL; + + tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL); + if (!tv_enc) + return -ENOMEM; + + tv_enc->overscan = 50; + tv_enc->flicker = 50; + tv_enc->saturation = 50; + tv_enc->hue = 0; + tv_enc->tv_norm = TV_NORM_PAL; + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic; + tv_enc->pin_mask = 0; + + encoder = to_drm_encoder(&tv_enc->base); + + tv_enc->base.dcb = entry; + tv_enc->base.or = ffs(entry->or) - 1; + + drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); + to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h new file mode 100644 index 000000000000..c00977cedabd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv17_tv.h @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NV17_TV_H__ +#define __NV17_TV_H__ + +struct nv17_tv_state { + uint8_t tv_enc[0x40]; + + uint32_t hfilter[4][7]; + uint32_t hfilter2[4][7]; + uint32_t vfilter[4][7]; + + uint32_t ptv_200; + uint32_t ptv_204; + uint32_t ptv_208; + uint32_t ptv_20c; + uint32_t ptv_304; + uint32_t ptv_500; + uint32_t ptv_504; + uint32_t ptv_508; + uint32_t ptv_600; + uint32_t ptv_604; + uint32_t ptv_608; + uint32_t ptv_60c; + uint32_t ptv_610; + uint32_t ptv_614; +}; + +enum nv17_tv_norm{ + TV_NORM_PAL, + TV_NORM_PAL_M, + TV_NORM_PAL_N, + TV_NORM_PAL_NC, + TV_NORM_NTSC_M, + TV_NORM_NTSC_J, + NUM_LD_TV_NORMS, + TV_NORM_HD480I = NUM_LD_TV_NORMS, + TV_NORM_HD480P, + TV_NORM_HD576I, + TV_NORM_HD576P, + TV_NORM_HD720P, + TV_NORM_HD1080I, + NUM_TV_NORMS +}; + +struct nv17_tv_encoder { + struct nouveau_encoder base; + + struct nv17_tv_state state; + struct nv17_tv_state saved_state; + + int overscan; + int flicker; + int saturation; + int hue; + enum nv17_tv_norm tv_norm; + int subconnector; + int select_subconnector; + uint32_t pin_mask; +}; +#define to_tv_enc(x) container_of(nouveau_encoder(x), \ + struct nv17_tv_encoder, base) + +extern char *nv17_tv_norm_names[NUM_TV_NORMS]; + +extern struct nv17_tv_norm_params { + enum { + TV_ENC_MODE, + CTV_ENC_MODE, + } kind; + + union { + struct { + int hdisplay; + int vdisplay; + int vrefresh; /* mHz */ + + uint8_t tv_enc[0x40]; + } tv_enc_mode; + + struct { + struct drm_display_mode mode; + + uint32_t ctv_regs[38]; + } ctv_enc_mode; + }; + +} nv17_tv_norms[NUM_TV_NORMS]; +#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm]) + +extern struct drm_display_mode nv17_tv_modes[]; + +static inline int interpolate(int y0, int y1, int y2, int x) +{ + return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50; +} + +void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state); +void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state); +void nv17_tv_update_properties(struct drm_encoder *encoder); +void nv17_tv_update_rescaler(struct drm_encoder *encoder); +void nv17_ctv_update_rescaler(struct drm_encoder *encoder); + +/* TV hardware access functions */ + +static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val) +{ + nv_wr32(dev, reg, val); +} + +static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) +{ + return nv_rd32(dev, reg); +} + +static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val) +{ + nv_write_ptv(dev, NV_PTV_TV_INDEX, reg); + nv_write_ptv(dev, NV_PTV_TV_DATA, val); +} + +static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg) +{ + nv_write_ptv(dev, NV_PTV_TV_INDEX, reg); + return nv_read_ptv(dev, NV_PTV_TV_DATA); +} + +#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg) +#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg) +#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg]) + +#endif diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c new file mode 100644 index 000000000000..d64683d97e0d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c @@ -0,0 +1,583 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" +#include "nouveau_drv.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_hw.h" +#include "nv17_tv.h" + +char *nv17_tv_norm_names[NUM_TV_NORMS] = { + [TV_NORM_PAL] = "PAL", + [TV_NORM_PAL_M] = "PAL-M", + [TV_NORM_PAL_N] = "PAL-N", + [TV_NORM_PAL_NC] = "PAL-Nc", + [TV_NORM_NTSC_M] = "NTSC-M", + [TV_NORM_NTSC_J] = "NTSC-J", + [TV_NORM_HD480I] = "hd480i", + [TV_NORM_HD480P] = "hd480p", + [TV_NORM_HD576I] = "hd576i", + [TV_NORM_HD576P] = "hd576p", + [TV_NORM_HD720P] = "hd720p", + [TV_NORM_HD1080I] = "hd1080i" +}; + +/* TV standard specific parameters */ + +struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = { + [TV_NORM_PAL] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, + 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, + 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 + } } } }, + + [TV_NORM_PAL_M] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_PAL_N] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_PAL_NC] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, + 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, + 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 + } } } }, + + [TV_NORM_NTSC_M] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_NTSC_J] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_HD480I] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_HD576I] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, + 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, + 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 + } } } }, + + + [TV_NORM_HD480P] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, + 720, 735, 743, 858, 0, 480, 490, 494, 525, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, + 0x354003a, 0x40000, 0x6f0344, 0x18100000, + 0x10160004, 0x10060005, 0x1006000c, 0x10060020, + 0x10060021, 0x140e0022, 0x10060202, 0x1802020a, + 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff, + 0x10000fff, 0x10000fff, 0x10000fff, 0x70, + 0x3ff0000, 0x57, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, + 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 + } } } }, + + [TV_NORM_HD576P] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, + 720, 730, 738, 864, 0, 576, 581, 585, 625, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, + 0x354003a, 0x40000, 0x6f0344, 0x18100000, + 0x10060001, 0x10060009, 0x10060026, 0x10060027, + 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff, + 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, + 0x10000fff, 0x10000fff, 0x10000fff, 0x69, + 0x3ff0000, 0x57, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, + 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 + } } } }, + + [TV_NORM_HD720P] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, + 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622, + 0x66b0021, 0x6004a, 0x1210626, 0x8170000, + 0x70004, 0x70016, 0x70017, 0x40f0018, + 0x702e8, 0x81702ed, 0xfff, 0xfff, + 0xfff, 0xfff, 0xfff, 0xfff, + 0xfff, 0xfff, 0xfff, 0x0, + 0x2e40001, 0x58, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300, + 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_HD1080I] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, + 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC + | DRM_MODE_FLAG_INTERLACE) }, + .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868, + 0x8940028, 0x60054, 0xe80870, 0xbf70000, + 0xbc70004, 0x70005, 0x70012, 0x70013, + 0x40f0014, 0x70230, 0xbf70232, 0xbf70233, + 0x1c70237, 0x70238, 0x70244, 0x70245, + 0x40f0246, 0x70462, 0x1f70464, 0x0, + 0x2e40001, 0x58, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300, + 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0 + } } } } +}; + +/* + * The following is some guesswork on how the TV encoder flicker + * filter/rescaler works: + * + * It seems to use some sort of resampling filter, it is controlled + * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they + * control the horizontal and vertical stage respectively, there is + * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER, + * but they seem to do nothing. A rough guess might be that they could + * be used to independently control the filtering of each interlaced + * field, but I don't know how they are enabled. The whole filtering + * process seems to be disabled with bits 26:27 of PTV_200, but we + * aren't doing that. + * + * The layout of both register sets is the same: + * + * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40] + * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c] + * + * Each coefficient is stored in bits [31],[15:9] in two's complement + * format. They seem to be some kind of weights used in a low-pass + * filter. Both A and B coefficients are applied to the 14 nearest + * samples on each side (Listed from nearest to furthermost. They + * roughly cover 2 framebuffer pixels on each side). They are + * probably multiplied with some more hardwired weights before being + * used: B-coefficients are applied the same on both sides, + * A-coefficients are inverted before being applied to the opposite + * side. + * + * After all the hassle, I got the following formula by empirical + * means... + */ + +#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o) + +#define id1 (1LL << 8) +#define id2 (1LL << 16) +#define id3 (1LL << 24) +#define id4 (1LL << 32) +#define id5 (1LL << 48) + +static struct filter_params{ + int64_t k1; + int64_t ki; + int64_t ki2; + int64_t ki3; + int64_t kr; + int64_t kir; + int64_t ki2r; + int64_t ki3r; + int64_t kf; + int64_t kif; + int64_t ki2f; + int64_t ki3f; + int64_t krf; + int64_t kirf; + int64_t ki2rf; + int64_t ki3rf; +} fparams[2][4] = { + /* Horizontal filter parameters */ + { + {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5, + 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4, + 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3, + -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1}, + {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5, + 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4, + 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3, + -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1}, + {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5, + 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4, + 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3, + 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1}, + {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5, + -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4, + -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3, + 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1}, + }, + + /* Vertical filter parameters */ + { + {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5, + -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4, + -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3, + 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1}, + {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5, + 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4, + 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3, + -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1}, + {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5, + 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4, + 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3, + -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1}, + {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5, + 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4, + 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3, + -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1}, + } +}; + +static void tv_setup_filter(struct drm_encoder *encoder) +{ + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + struct drm_display_mode *mode = &encoder->crtc->mode; + uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter, + &tv_enc->state.vfilter}; + int i, j, k; + int32_t overscan = calc_overscan(tv_enc->overscan); + int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100); + uint64_t rs[] = {mode->hdisplay * id3, + mode->vdisplay * id3}; + + do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay); + do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay); + + for (k = 0; k < 2; k++) { + rs[k] = max((int64_t)rs[k], id2); + + for (j = 0; j < 4; j++) { + struct filter_params *p = &fparams[k][j]; + + for (i = 0; i < 7; i++) { + int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i) + + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k] + + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker + + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k]; + + (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9); + } + } + } +} + +/* Hardware state saving/restoring */ + +static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) +{ + int i, j; + uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 7; j++) + regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j); + } +} + +static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) +{ + int i, j; + uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 7; j++) + nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]); + } +} + +void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state) +{ + int i; + + for (i = 0; i < 0x40; i++) + state->tv_enc[i] = nv_read_tv_enc(dev, i); + + tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter); + tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2); + tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter); + + nv_save_ptv(dev, state, 200); + nv_save_ptv(dev, state, 204); + nv_save_ptv(dev, state, 208); + nv_save_ptv(dev, state, 20c); + nv_save_ptv(dev, state, 304); + nv_save_ptv(dev, state, 500); + nv_save_ptv(dev, state, 504); + nv_save_ptv(dev, state, 508); + nv_save_ptv(dev, state, 600); + nv_save_ptv(dev, state, 604); + nv_save_ptv(dev, state, 608); + nv_save_ptv(dev, state, 60c); + nv_save_ptv(dev, state, 610); + nv_save_ptv(dev, state, 614); +} + +void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state) +{ + int i; + + for (i = 0; i < 0x40; i++) + nv_write_tv_enc(dev, i, state->tv_enc[i]); + + tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter); + tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2); + tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter); + + nv_load_ptv(dev, state, 200); + nv_load_ptv(dev, state, 204); + nv_load_ptv(dev, state, 208); + nv_load_ptv(dev, state, 20c); + nv_load_ptv(dev, state, 304); + nv_load_ptv(dev, state, 500); + nv_load_ptv(dev, state, 504); + nv_load_ptv(dev, state, 508); + nv_load_ptv(dev, state, 600); + nv_load_ptv(dev, state, 604); + nv_load_ptv(dev, state, 608); + nv_load_ptv(dev, state, 60c); + nv_load_ptv(dev, state, 610); + nv_load_ptv(dev, state, 614); + + /* This is required for some settings to kick in. */ + nv_write_tv_enc(dev, 0x3e, 1); + nv_write_tv_enc(dev, 0x3e, 0); +} + +/* Timings similar to the ones the blob sets */ + +struct drm_display_mode nv17_tv_modes[] = { + { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0, + 320, 344, 392, 560, 0, 200, 200, 202, 220, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC + | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, + { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0, + 320, 344, 392, 560, 0, 240, 240, 246, 263, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC + | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, + { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0, + 400, 432, 496, 640, 0, 300, 300, 303, 314, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC + | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0, + 640, 672, 768, 880, 0, 480, 480, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0, + 720, 752, 872, 960, 0, 480, 480, 493, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0, + 720, 776, 856, 960, 0, 576, 576, 588, 597, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0, + 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0, + 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + {} +}; + +void nv17_tv_update_properties(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_state *regs = &tv_enc->state; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + int subconnector = tv_enc->select_subconnector ? + tv_enc->select_subconnector : + tv_enc->subconnector; + + switch (subconnector) { + case DRM_MODE_SUBCONNECTOR_Composite: + { + regs->ptv_204 = 0x2; + + /* The composite connector may be found on either pin. */ + if (tv_enc->pin_mask & 0x4) + regs->ptv_204 |= 0x010000; + else if (tv_enc->pin_mask & 0x2) + regs->ptv_204 |= 0x100000; + else + regs->ptv_204 |= 0x110000; + + regs->tv_enc[0x7] = 0x10; + break; + } + case DRM_MODE_SUBCONNECTOR_SVIDEO: + regs->ptv_204 = 0x11012; + regs->tv_enc[0x7] = 0x18; + break; + + case DRM_MODE_SUBCONNECTOR_Component: + regs->ptv_204 = 0x111333; + regs->tv_enc[0x7] = 0x14; + break; + + case DRM_MODE_SUBCONNECTOR_SCART: + regs->ptv_204 = 0x111012; + regs->tv_enc[0x7] = 0x18; + break; + } + + regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255, + tv_enc->saturation); + regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255, + tv_enc->saturation); + regs->tv_enc[0x25] = tv_enc->hue * 255 / 100; + + nv_load_ptv(dev, regs, 204); + nv_load_tv_enc(dev, regs, 7); + nv_load_tv_enc(dev, regs, 20); + nv_load_tv_enc(dev, regs, 22); + nv_load_tv_enc(dev, regs, 25); +} + +void nv17_tv_update_rescaler(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_state *regs = &tv_enc->state; + + regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8); + + tv_setup_filter(encoder); + + nv_load_ptv(dev, regs, 208); + tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter); + tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2); + tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter); +} + +void nv17_ctv_update_rescaler(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + int head = nouveau_crtc(encoder->crtc)->index; + struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; + struct drm_display_mode *crtc_mode = &encoder->crtc->mode; + struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode; + int overscan, hmargin, vmargin, hratio, vratio; + + /* The rescaler doesn't do the right thing for interlaced modes. */ + if (output_mode->flags & DRM_MODE_FLAG_INTERLACE) + overscan = 100; + else + overscan = tv_enc->overscan; + + hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2; + vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2; + + hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin, + overscan); + vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin, + overscan); + + hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin); + vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3; + + regs->fp_horiz_regs[FP_VALID_START] = hmargin; + regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1; + regs->fp_vert_regs[FP_VALID_START] = vmargin; + regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1; + + regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE | + XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) | + NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE | + XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START, + regs->fp_horiz_regs[FP_VALID_START]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END, + regs->fp_horiz_regs[FP_VALID_END]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START, + regs->fp_vert_regs[FP_VALID_START]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END, + regs->fp_vert_regs[FP_VALID_END]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1); +} diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c new file mode 100644 index 000000000000..18ba74f19703 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -0,0 +1,780 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +/* + * NV20 + * ----- + * There are 3 families : + * NV20 is 0x10de:0x020* + * NV25/28 is 0x10de:0x025* / 0x10de:0x028* + * NV2A is 0x10de:0x02A0 + * + * NV30 + * ----- + * There are 3 families : + * NV30/31 is 0x10de:0x030* / 0x10de:0x031* + * NV34 is 0x10de:0x032* + * NV35/36 is 0x10de:0x033* / 0x10de:0x034* + * + * Not seen in the wild, no dumps (probably NV35) : + * NV37 is 0x10de:0x00fc, 0x10de:0x00fd + * NV38 is 0x10de:0x0333, 0x10de:0x00fe + * + */ + +#define NV20_GRCTX_SIZE (3580*4) +#define NV25_GRCTX_SIZE (3529*4) +#define NV2A_GRCTX_SIZE (3500*4) + +#define NV30_31_GRCTX_SIZE (24392) +#define NV34_GRCTX_SIZE (18140) +#define NV35_36_GRCTX_SIZE (22396) + +static void +nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + int i; + + nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); + nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x047c/4, 0x00000101); + nv_wo32(dev, ctx, 0x0490/4, 0x00000111); + nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); + for (i = 0x04d4; i <= 0x04e0; i += 4) + nv_wo32(dev, ctx, i/4, 0x00030303); + for (i = 0x04f4; i <= 0x0500; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080000); + for (i = 0x050c; i <= 0x0518; i += 4) + nv_wo32(dev, ctx, i/4, 0x01012000); + for (i = 0x051c; i <= 0x0528; i += 4) + nv_wo32(dev, ctx, i/4, 0x000105b8); + for (i = 0x052c; i <= 0x0538; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080008); + for (i = 0x055c; i <= 0x0598; i += 4) + nv_wo32(dev, ctx, i/4, 0x07ff0000); + nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); + nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); + nv_wo32(dev, ctx, 0x0604/4, 0x00004000); + nv_wo32(dev, ctx, 0x0610/4, 0x00000001); + nv_wo32(dev, ctx, 0x0618/4, 0x00040000); + nv_wo32(dev, ctx, 0x061c/4, 0x00010000); + for (i = 0x1c1c; i <= 0x248c; i += 16) { + nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); + nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); + nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + } + nv_wo32(dev, ctx, 0x281c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2830/4, 0x3f800000); + nv_wo32(dev, ctx, 0x285c/4, 0x40000000); + nv_wo32(dev, ctx, 0x2860/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2864/4, 0x3f000000); + nv_wo32(dev, ctx, 0x286c/4, 0x40000000); + nv_wo32(dev, ctx, 0x2870/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2878/4, 0xbf800000); + nv_wo32(dev, ctx, 0x2880/4, 0xbf800000); + nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000); + nv_wo32(dev, ctx, 0x3530/4, 0x000003f8); + nv_wo32(dev, ctx, 0x3540/4, 0x002fe000); + for (i = 0x355c; i <= 0x3578; i += 4) + nv_wo32(dev, ctx, i/4, 0x001c527c); +} + +static void +nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + int i; + + nv_wo32(dev, ctx, 0x035c/4, 0xffff0000); + nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x049c/4, 0x00000101); + nv_wo32(dev, ctx, 0x04b0/4, 0x00000111); + nv_wo32(dev, ctx, 0x04c8/4, 0x00000080); + nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000); + nv_wo32(dev, ctx, 0x04d0/4, 0x00000001); + nv_wo32(dev, ctx, 0x04e4/4, 0x44400000); + nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000); + for (i = 0x0510; i <= 0x051c; i += 4) + nv_wo32(dev, ctx, i/4, 0x00030303); + for (i = 0x0530; i <= 0x053c; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080000); + for (i = 0x0548; i <= 0x0554; i += 4) + nv_wo32(dev, ctx, i/4, 0x01012000); + for (i = 0x0558; i <= 0x0564; i += 4) + nv_wo32(dev, ctx, i/4, 0x000105b8); + for (i = 0x0568; i <= 0x0574; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080008); + for (i = 0x0598; i <= 0x05d4; i += 4) + nv_wo32(dev, ctx, i/4, 0x07ff0000); + nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff); + nv_wo32(dev, ctx, 0x0620/4, 0x00000080); + nv_wo32(dev, ctx, 0x0624/4, 0x30201000); + nv_wo32(dev, ctx, 0x0628/4, 0x70605040); + nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080); + nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0); + nv_wo32(dev, ctx, 0x0664/4, 0x00000001); + nv_wo32(dev, ctx, 0x066c/4, 0x00004000); + nv_wo32(dev, ctx, 0x0678/4, 0x00000001); + nv_wo32(dev, ctx, 0x0680/4, 0x00040000); + nv_wo32(dev, ctx, 0x0684/4, 0x00010000); + for (i = 0x1b04; i <= 0x2374; i += 16) { + nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); + nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); + nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + } + nv_wo32(dev, ctx, 0x2704/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2718/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2744/4, 0x40000000); + nv_wo32(dev, ctx, 0x2748/4, 0x3f800000); + nv_wo32(dev, ctx, 0x274c/4, 0x3f000000); + nv_wo32(dev, ctx, 0x2754/4, 0x40000000); + nv_wo32(dev, ctx, 0x2758/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2760/4, 0xbf800000); + nv_wo32(dev, ctx, 0x2768/4, 0xbf800000); + nv_wo32(dev, ctx, 0x308c/4, 0x000fe000); + nv_wo32(dev, ctx, 0x3108/4, 0x000003f8); + nv_wo32(dev, ctx, 0x3468/4, 0x002fe000); + for (i = 0x3484; i <= 0x34a0; i += 4) + nv_wo32(dev, ctx, i/4, 0x001c527c); +} + +static void +nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + int i; + + nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); + nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x047c/4, 0x00000101); + nv_wo32(dev, ctx, 0x0490/4, 0x00000111); + nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); + for (i = 0x04d4; i <= 0x04e0; i += 4) + nv_wo32(dev, ctx, i/4, 0x00030303); + for (i = 0x04f4; i <= 0x0500; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080000); + for (i = 0x050c; i <= 0x0518; i += 4) + nv_wo32(dev, ctx, i/4, 0x01012000); + for (i = 0x051c; i <= 0x0528; i += 4) + nv_wo32(dev, ctx, i/4, 0x000105b8); + for (i = 0x052c; i <= 0x0538; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080008); + for (i = 0x055c; i <= 0x0598; i += 4) + nv_wo32(dev, ctx, i/4, 0x07ff0000); + nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); + nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); + nv_wo32(dev, ctx, 0x0604/4, 0x00004000); + nv_wo32(dev, ctx, 0x0610/4, 0x00000001); + nv_wo32(dev, ctx, 0x0618/4, 0x00040000); + nv_wo32(dev, ctx, 0x061c/4, 0x00010000); + for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ + nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); + nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); + nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + } + nv_wo32(dev, ctx, 0x269c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000); + nv_wo32(dev, ctx, 0x26dc/4, 0x40000000); + nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000); + nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000); + nv_wo32(dev, ctx, 0x26ec/4, 0x40000000); + nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000); + nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000); + nv_wo32(dev, ctx, 0x2700/4, 0xbf800000); + nv_wo32(dev, ctx, 0x3024/4, 0x000fe000); + nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8); + nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000); + for (i = 0x341c; i <= 0x3438; i += 4) + nv_wo32(dev, ctx, i/4, 0x001c527c); +} + +static void +nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + int i; + + nv_wo32(dev, ctx, 0x0410/4, 0x00000101); + nv_wo32(dev, ctx, 0x0424/4, 0x00000111); + nv_wo32(dev, ctx, 0x0428/4, 0x00000060); + nv_wo32(dev, ctx, 0x0444/4, 0x00000080); + nv_wo32(dev, ctx, 0x0448/4, 0xffff0000); + nv_wo32(dev, ctx, 0x044c/4, 0x00000001); + nv_wo32(dev, ctx, 0x0460/4, 0x44400000); + nv_wo32(dev, ctx, 0x048c/4, 0xffff0000); + for (i = 0x04e0; i < 0x04e8; i += 4) + nv_wo32(dev, ctx, i/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x04ec/4, 0x00011100); + for (i = 0x0508; i < 0x0548; i += 4) + nv_wo32(dev, ctx, i/4, 0x07ff0000); + nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff); + nv_wo32(dev, ctx, 0x058c/4, 0x00000080); + nv_wo32(dev, ctx, 0x0590/4, 0x30201000); + nv_wo32(dev, ctx, 0x0594/4, 0x70605040); + nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888); + nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8); + nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000); + for (i = 0x0600; i < 0x0640; i += 4) + nv_wo32(dev, ctx, i/4, 0x00010588); + for (i = 0x0640; i < 0x0680; i += 4) + nv_wo32(dev, ctx, i/4, 0x00030303); + for (i = 0x06c0; i < 0x0700; i += 4) + nv_wo32(dev, ctx, i/4, 0x0008aae4); + for (i = 0x0700; i < 0x0740; i += 4) + nv_wo32(dev, ctx, i/4, 0x01012000); + for (i = 0x0740; i < 0x0780; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(dev, ctx, 0x085c/4, 0x00040000); + nv_wo32(dev, ctx, 0x0860/4, 0x00010000); + for (i = 0x0864; i < 0x0874; i += 4) + nv_wo32(dev, ctx, i/4, 0x00040004); + for (i = 0x1f18; i <= 0x3088 ; i += 16) { + nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); + nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); + nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + } + for (i = 0x30b8; i < 0x30c8; i += 4) + nv_wo32(dev, ctx, i/4, 0x0000ffff); + nv_wo32(dev, ctx, 0x344c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x3808/4, 0x3f800000); + nv_wo32(dev, ctx, 0x381c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x3848/4, 0x40000000); + nv_wo32(dev, ctx, 0x384c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x3850/4, 0x3f000000); + nv_wo32(dev, ctx, 0x3858/4, 0x40000000); + nv_wo32(dev, ctx, 0x385c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x3864/4, 0xbf800000); + nv_wo32(dev, ctx, 0x386c/4, 0xbf800000); +} + +static void +nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + int i; + + nv_wo32(dev, ctx, 0x040c/4, 0x01000101); + nv_wo32(dev, ctx, 0x0420/4, 0x00000111); + nv_wo32(dev, ctx, 0x0424/4, 0x00000060); + nv_wo32(dev, ctx, 0x0440/4, 0x00000080); + nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); + nv_wo32(dev, ctx, 0x0448/4, 0x00000001); + nv_wo32(dev, ctx, 0x045c/4, 0x44400000); + nv_wo32(dev, ctx, 0x0480/4, 0xffff0000); + for (i = 0x04d4; i < 0x04dc; i += 4) + nv_wo32(dev, ctx, i/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x04e0/4, 0x00011100); + for (i = 0x04fc; i < 0x053c; i += 4) + nv_wo32(dev, ctx, i/4, 0x07ff0000); + nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff); + nv_wo32(dev, ctx, 0x057c/4, 0x00000080); + nv_wo32(dev, ctx, 0x0580/4, 0x30201000); + nv_wo32(dev, ctx, 0x0584/4, 0x70605040); + nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888); + nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8); + nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000); + for (i = 0x05f0; i < 0x0630; i += 4) + nv_wo32(dev, ctx, i/4, 0x00010588); + for (i = 0x0630; i < 0x0670; i += 4) + nv_wo32(dev, ctx, i/4, 0x00030303); + for (i = 0x06b0; i < 0x06f0; i += 4) + nv_wo32(dev, ctx, i/4, 0x0008aae4); + for (i = 0x06f0; i < 0x0730; i += 4) + nv_wo32(dev, ctx, i/4, 0x01012000); + for (i = 0x0730; i < 0x0770; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(dev, ctx, 0x0850/4, 0x00040000); + nv_wo32(dev, ctx, 0x0854/4, 0x00010000); + for (i = 0x0858; i < 0x0868; i += 4) + nv_wo32(dev, ctx, i/4, 0x00040004); + for (i = 0x15ac; i <= 0x271c ; i += 16) { + nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); + nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); + nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + } + for (i = 0x274c; i < 0x275c; i += 4) + nv_wo32(dev, ctx, i/4, 0x0000ffff); + nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2edc/4, 0x40000000); + nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000); + nv_wo32(dev, ctx, 0x2eec/4, 0x40000000); + nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000); + nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000); + nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000); +} + +static void +nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + int i; + + nv_wo32(dev, ctx, 0x040c/4, 0x00000101); + nv_wo32(dev, ctx, 0x0420/4, 0x00000111); + nv_wo32(dev, ctx, 0x0424/4, 0x00000060); + nv_wo32(dev, ctx, 0x0440/4, 0x00000080); + nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); + nv_wo32(dev, ctx, 0x0448/4, 0x00000001); + nv_wo32(dev, ctx, 0x045c/4, 0x44400000); + nv_wo32(dev, ctx, 0x0488/4, 0xffff0000); + for (i = 0x04dc; i < 0x04e4; i += 4) + nv_wo32(dev, ctx, i/4, 0x0fff0000); + nv_wo32(dev, ctx, 0x04e8/4, 0x00011100); + for (i = 0x0504; i < 0x0544; i += 4) + nv_wo32(dev, ctx, i/4, 0x07ff0000); + nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff); + nv_wo32(dev, ctx, 0x0588/4, 0x00000080); + nv_wo32(dev, ctx, 0x058c/4, 0x30201000); + nv_wo32(dev, ctx, 0x0590/4, 0x70605040); + nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888); + nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8); + nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000); + for (i = 0x0604; i < 0x0644; i += 4) + nv_wo32(dev, ctx, i/4, 0x00010588); + for (i = 0x0644; i < 0x0684; i += 4) + nv_wo32(dev, ctx, i/4, 0x00030303); + for (i = 0x06c4; i < 0x0704; i += 4) + nv_wo32(dev, ctx, i/4, 0x0008aae4); + for (i = 0x0704; i < 0x0744; i += 4) + nv_wo32(dev, ctx, i/4, 0x01012000); + for (i = 0x0744; i < 0x0784; i += 4) + nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(dev, ctx, 0x0860/4, 0x00040000); + nv_wo32(dev, ctx, 0x0864/4, 0x00010000); + for (i = 0x0868; i < 0x0878; i += 4) + nv_wo32(dev, ctx, i/4, 0x00040004); + for (i = 0x1f1c; i <= 0x308c ; i += 16) { + nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); + nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); + nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + } + for (i = 0x30bc; i < 0x30cc; i += 4) + nv_wo32(dev, ctx, i/4, 0x0000ffff); + nv_wo32(dev, ctx, 0x3450/4, 0x3f800000); + nv_wo32(dev, ctx, 0x380c/4, 0x3f800000); + nv_wo32(dev, ctx, 0x3820/4, 0x3f800000); + nv_wo32(dev, ctx, 0x384c/4, 0x40000000); + nv_wo32(dev, ctx, 0x3850/4, 0x3f800000); + nv_wo32(dev, ctx, 0x3854/4, 0x3f000000); + nv_wo32(dev, ctx, 0x385c/4, 0x40000000); + nv_wo32(dev, ctx, 0x3860/4, 0x3f800000); + nv_wo32(dev, ctx, 0x3868/4, 0xbf800000); + nv_wo32(dev, ctx, 0x3870/4, 0xbf800000); +} + +int +nv20_graph_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); + unsigned int ctx_size; + unsigned int idoffs = 0x28/4; + int ret; + + switch (dev_priv->chipset) { + case 0x20: + ctx_size = NV20_GRCTX_SIZE; + ctx_init = nv20_graph_context_init; + idoffs = 0; + break; + case 0x25: + case 0x28: + ctx_size = NV25_GRCTX_SIZE; + ctx_init = nv25_graph_context_init; + break; + case 0x2a: + ctx_size = NV2A_GRCTX_SIZE; + ctx_init = nv2a_graph_context_init; + idoffs = 0; + break; + case 0x30: + case 0x31: + ctx_size = NV30_31_GRCTX_SIZE; + ctx_init = nv30_31_graph_context_init; + break; + case 0x34: + ctx_size = NV34_GRCTX_SIZE; + ctx_init = nv34_graph_context_init; + break; + case 0x35: + case 0x36: + ctx_size = NV35_36_GRCTX_SIZE; + ctx_init = nv35_36_graph_context_init; + break; + default: + ctx_size = 0; + ctx_init = nv35_36_graph_context_init; + NV_ERROR(dev, "Please contact the devs if you want your NV%x" + " card to work\n", dev_priv->chipset); + return -ENOSYS; + break; + } + + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramin_grctx); + if (ret) + return ret; + + /* Initialise default context values */ + dev_priv->engine.instmem.prepare_access(dev, true); + ctx_init(dev, chan->ramin_grctx->gpuobj); + + /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ + nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs, + (chan->id << 24) | 0x1); /* CTX_USER */ + + nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, + chan->ramin_grctx->instance >> 4); + + dev_priv->engine.instmem.finish_access(dev); + return 0; +} + +void +nv20_graph_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (chan->ramin_grctx) + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); + + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0); + dev_priv->engine.instmem.finish_access(dev); +} + +int +nv20_graph_load_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + uint32_t inst; + + if (!chan->ramin_grctx) + return -EINVAL; + inst = chan->ramin_grctx->instance >> 4; + + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, + NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); + + nouveau_wait_for_idle(dev); + return 0; +} + +int +nv20_graph_unload_context(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_channel *chan; + uint32_t inst, tmp; + + chan = pgraph->channel(dev); + if (!chan) + return 0; + inst = chan->ramin_grctx->instance >> 4; + + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, + NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); + + nouveau_wait_for_idle(dev); + + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); + tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; + tmp |= (pfifo->channels - 1) << 24; + nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); + return 0; +} + +static void +nv20_graph_rdi(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i, writecount = 32; + uint32_t rdi_index = 0x2c80000; + + if (dev_priv->chipset == 0x20) { + rdi_index = 0x3d0000; + writecount = 15; + } + + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index); + for (i = 0; i < writecount; i++) + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0); + + nouveau_wait_for_idle(dev); +} + +int +nv20_graph_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; + uint32_t tmp, vramsz; + int ret, i; + + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + + if (!dev_priv->ctx_table) { + /* Create Context Pointer Table */ + dev_priv->ctx_table_size = 32 * 4; + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, + dev_priv->ctx_table_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->ctx_table); + if (ret) + return ret; + } + + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, + dev_priv->ctx_table->instance >> 4); + + nv20_graph_rdi(dev); + + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700); + nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ + nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); + nv_wr32(dev, 0x40009C , 0x00000040); + + if (dev_priv->chipset >= 0x25) { + nv_wr32(dev, 0x400890, 0x00080000); + nv_wr32(dev, 0x400610, 0x304B1FB6); + nv_wr32(dev, 0x400B80, 0x18B82880); + nv_wr32(dev, 0x400B84, 0x44000000); + nv_wr32(dev, 0x400098, 0x40000080); + nv_wr32(dev, 0x400B88, 0x000000ff); + } else { + nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ + nv_wr32(dev, 0x400094, 0x00000005); + nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ + nv_wr32(dev, 0x400B84, 0x24000000); + nv_wr32(dev, 0x400098, 0x00000040); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); + } + + /* copy tile info from PFB */ + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { + nv_wr32(dev, 0x00400904 + i * 0x10, + nv_rd32(dev, NV10_PFB_TLIMIT(i))); + /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, + nv_rd32(dev, NV10_PFB_TLIMIT(i))); + nv_wr32(dev, 0x00400908 + i * 0x10, + nv_rd32(dev, NV10_PFB_TSIZE(i))); + /* which is NV40_PGRAPH_TSIZE0(i) ?? */ + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, + nv_rd32(dev, NV10_PFB_TSIZE(i))); + nv_wr32(dev, 0x00400900 + i * 0x10, + nv_rd32(dev, NV10_PFB_TILE(i))); + /* which is NV40_PGRAPH_TILE0(i) ?? */ + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, + nv_rd32(dev, NV10_PFB_TILE(i))); + } + for (i = 0; i < 8; i++) { + nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, + nv_rd32(dev, 0x100300 + i * 4)); + } + nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); + + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); + nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); + + tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00; + nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp); + tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100; + nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp); + + /* begin RAM config */ + vramsz = drm_get_resource_len(dev, 0) - 1; + nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1)); + nv_wr32(dev, 0x400820, 0); + nv_wr32(dev, 0x400824, 0); + nv_wr32(dev, 0x400864, vramsz - 1); + nv_wr32(dev, 0x400868, vramsz - 1); + + /* interesting.. the below overwrites some of the tile setup above.. */ + nv_wr32(dev, 0x400B20, 0x00000000); + nv_wr32(dev, 0x400B04, 0xFFFFFFFF); + + nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0); + nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0); + nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); + nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); + + return 0; +} + +void +nv20_graph_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); +} + +int +nv30_graph_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret, i; + + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + + if (!dev_priv->ctx_table) { + /* Create Context Pointer Table */ + dev_priv->ctx_table_size = 32 * 4; + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, + dev_priv->ctx_table_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->ctx_table); + if (ret) + return ret; + } + + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, + dev_priv->ctx_table->instance >> 4); + + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0); + nv_wr32(dev, 0x400890, 0x01b463ff); + nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475); + nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000); + nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); + nv_wr32(dev, 0x400B80, 0x1003d888); + nv_wr32(dev, 0x400B84, 0x0c000000); + nv_wr32(dev, 0x400098, 0x00000000); + nv_wr32(dev, 0x40009C, 0x0005ad00); + nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */ + nv_wr32(dev, 0x4000a0, 0x00000000); + nv_wr32(dev, 0x4000a4, 0x00000008); + nv_wr32(dev, 0x4008a8, 0xb784a400); + nv_wr32(dev, 0x400ba0, 0x002f8685); + nv_wr32(dev, 0x400ba4, 0x00231f3f); + nv_wr32(dev, 0x4008a4, 0x40000020); + + if (dev_priv->chipset == 0x34) { + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002); + } + + nv_wr32(dev, 0x4000c0, 0x00000016); + + /* copy tile info from PFB */ + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { + nv_wr32(dev, 0x00400904 + i * 0x10, + nv_rd32(dev, NV10_PFB_TLIMIT(i))); + /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ + nv_wr32(dev, 0x00400908 + i * 0x10, + nv_rd32(dev, NV10_PFB_TSIZE(i))); + /* which is NV40_PGRAPH_TSIZE0(i) ?? */ + nv_wr32(dev, 0x00400900 + i * 0x10, + nv_rd32(dev, NV10_PFB_TILE(i))); + /* which is NV40_PGRAPH_TILE0(i) ?? */ + } + + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); + nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); + nv_wr32(dev, 0x0040075c , 0x00000001); + + /* begin RAM config */ + /* vramsz = drm_get_resource_len(dev, 0) - 1; */ + nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); + if (dev_priv->chipset != 0x34) { + nv_wr32(dev, 0x400750, 0x00EA0000); + nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x400750, 0x00EA0004); + nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1)); + } + + return 0; +} + +struct nouveau_pgraph_object_class nv20_graph_grclass[] = { + { 0x0030, false, NULL }, /* null */ + { 0x0039, false, NULL }, /* m2mf */ + { 0x004a, false, NULL }, /* gdirect */ + { 0x009f, false, NULL }, /* imageblit (nv12) */ + { 0x008a, false, NULL }, /* ifc */ + { 0x0089, false, NULL }, /* sifm */ + { 0x0062, false, NULL }, /* surf2d */ + { 0x0043, false, NULL }, /* rop */ + { 0x0012, false, NULL }, /* beta1 */ + { 0x0072, false, NULL }, /* beta4 */ + { 0x0019, false, NULL }, /* cliprect */ + { 0x0044, false, NULL }, /* pattern */ + { 0x009e, false, NULL }, /* swzsurf */ + { 0x0096, false, NULL }, /* celcius */ + { 0x0097, false, NULL }, /* kelvin (nv20) */ + { 0x0597, false, NULL }, /* kelvin (nv25) */ + {} +}; + +struct nouveau_pgraph_object_class nv30_graph_grclass[] = { + { 0x0030, false, NULL }, /* null */ + { 0x0039, false, NULL }, /* m2mf */ + { 0x004a, false, NULL }, /* gdirect */ + { 0x009f, false, NULL }, /* imageblit (nv12) */ + { 0x008a, false, NULL }, /* ifc */ + { 0x038a, false, NULL }, /* ifc (nv30) */ + { 0x0089, false, NULL }, /* sifm */ + { 0x0389, false, NULL }, /* sifm (nv30) */ + { 0x0062, false, NULL }, /* surf2d */ + { 0x0362, false, NULL }, /* surf2d (nv30) */ + { 0x0043, false, NULL }, /* rop */ + { 0x0012, false, NULL }, /* beta1 */ + { 0x0072, false, NULL }, /* beta4 */ + { 0x0019, false, NULL }, /* cliprect */ + { 0x0044, false, NULL }, /* pattern */ + { 0x039e, false, NULL }, /* swzsurf */ + { 0x0397, false, NULL }, /* rankine (nv30) */ + { 0x0497, false, NULL }, /* rankine (nv35) */ + { 0x0697, false, NULL }, /* rankine (nv34) */ + {} +}; + diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c new file mode 100644 index 000000000000..ca1d27107a8e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_fb.c @@ -0,0 +1,62 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +int +nv40_fb_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fb_bar_size, tmp; + int num_tiles; + int i; + + /* This is strictly a NV4x register (don't know about NV5x). */ + /* The blob sets these to all kinds of values, and they mess up our setup. */ + /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */ + /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */ + /* Any idea what this is? */ + nv_wr32(dev, NV40_PFB_UNK_800, 0x1); + + switch (dev_priv->chipset) { + case 0x40: + case 0x45: + tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); + nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); + num_tiles = NV10_PFB_TILE__SIZE; + break; + case 0x46: /* G72 */ + case 0x47: /* G70 */ + case 0x49: /* G71 */ + case 0x4b: /* G73 */ + case 0x4c: /* C51 (G7X version) */ + num_tiles = NV40_PFB_TILE__SIZE_1; + break; + default: + num_tiles = NV40_PFB_TILE__SIZE_0; + break; + } + + fb_bar_size = drm_get_resource_len(dev, 0) - 1; + switch (dev_priv->chipset) { + case 0x40: + for (i = 0; i < num_tiles; i++) { + nv_wr32(dev, NV10_PFB_TILE(i), 0); + nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); + } + break; + default: + for (i = 0; i < num_tiles; i++) { + nv_wr32(dev, NV40_PFB_TILE(i), 0); + nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size); + } + break; + } + + return 0; +} + +void +nv40_fb_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c new file mode 100644 index 000000000000..b4f19ccb8b41 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -0,0 +1,314 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE)) +#define NV40_RAMFC__SIZE 128 + +int +nv40_fifo_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV40_RAMFC(chan->id); + int ret; + + ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, + NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); + if (ret) + return ret; + + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wi32(dev, fc + 0, chan->pushbuf_base); + nv_wi32(dev, fc + 4, chan->pushbuf_base); + nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); + nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + 0x30000000 /* no idea.. */); + nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); + nv_wi32(dev, fc + 60, 0x0001FFFF); + dev_priv->engine.instmem.finish_access(dev); + + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); + return 0; +} + +void +nv40_fifo_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); + + if (chan->ramfc) + nouveau_gpuobj_ref_del(dev, &chan->ramfc); +} + +static void +nv40_fifo_do_load_context(struct drm_device *dev, int chid) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; + + dev_priv->engine.instmem.prepare_access(dev, false); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); + nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16)); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20)); + + /* No idea what 0x2058 is.. */ + tmp = nv_ri32(dev, fc + 24); + tmp2 = nv_rd32(dev, 0x2058) & 0xFFF; + tmp2 |= (tmp & 0x30000000); + nv_wr32(dev, 0x2058, tmp2); + tmp &= ~0x30000000; + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp); + + nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28)); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32)); + nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36)); + tmp = nv_ri32(dev, fc + 40); + nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp); + nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44)); + nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48)); + nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52)); + nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56)); + + /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ + tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; + tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF; + nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp); + + nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64)); + /* NVIDIA does this next line twice... */ + nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68)); + nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); + nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); + + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); +} + +int +nv40_fifo_load_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + uint32_t tmp; + + nv40_fifo_do_load_context(dev, chan->id); + + /* Set channel active, and in DMA mode */ + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, + NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); + + /* Reset DMA_CTL_AT_INFO to INVALID */ + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); + + return 0; +} + +int +nv40_fifo_unload_context(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + uint32_t fc, tmp; + int chid; + + chid = pfifo->channel_id(dev); + if (chid < 0 || chid >= dev_priv->engine.fifo.channels) + return 0; + fc = NV40_RAMFC(chid); + + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); + nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); + nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE)); + nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT)); + nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH); + tmp |= nv_rd32(dev, 0x2058) & 0x30000000; + nv_wi32(dev, fc + 24, tmp); + nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); + nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); + nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); + tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); + nv_wi32(dev, fc + 40, tmp); + nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); + nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE)); + /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something + * more involved depending on the value of 0x3228? + */ + nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); + nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE)); + nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff); + /* No idea what the below is for exactly, ripped from a mmio-trace */ + nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4)); + /* NVIDIA do this next line twice.. bug? */ + nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8)); + nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088)); + nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300)); +#if 0 /* no real idea which is PUT/GET in UNK_48.. */ + tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET); + tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); + nv_wi32(dev, fc + 72, tmp); +#endif + dev_priv->engine.instmem.finish_access(dev); + + nv40_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, + NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1)); + return 0; +} + +static void +nv40_fifo_init_reset(struct drm_device *dev) +{ + int i; + + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); + nv_wr32(dev, NV03_PMC_ENABLE, + nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); + + nv_wr32(dev, 0x003224, 0x000f0078); + nv_wr32(dev, 0x003210, 0x00000000); + nv_wr32(dev, 0x003270, 0x00000000); + nv_wr32(dev, 0x003240, 0x00000000); + nv_wr32(dev, 0x003244, 0x00000000); + nv_wr32(dev, 0x003258, 0x00000000); + nv_wr32(dev, 0x002504, 0x00000000); + for (i = 0; i < 16; i++) + nv_wr32(dev, 0x002510 + (i * 4), 0x00000000); + nv_wr32(dev, 0x00250c, 0x0000ffff); + nv_wr32(dev, 0x002048, 0x00000000); + nv_wr32(dev, 0x003228, 0x00000000); + nv_wr32(dev, 0x0032e8, 0x00000000); + nv_wr32(dev, 0x002410, 0x00000000); + nv_wr32(dev, 0x002420, 0x00000000); + nv_wr32(dev, 0x002058, 0x00000001); + nv_wr32(dev, 0x00221c, 0x00000000); + /* something with 0x2084, read/modify/write, no change */ + nv_wr32(dev, 0x002040, 0x000000ff); + nv_wr32(dev, 0x002500, 0x00000000); + nv_wr32(dev, 0x003200, 0x00000000); + + nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); +} + +static void +nv40_fifo_init_ramxx(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((dev_priv->ramht_bits - 9) << 16) | + (dev_priv->ramht_offset >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + + switch (dev_priv->chipset) { + case 0x47: + case 0x49: + case 0x4b: + nv_wr32(dev, 0x2230, 1); + break; + default: + break; + } + + switch (dev_priv->chipset) { + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x45: + case 0x47: + case 0x48: + case 0x49: + case 0x4b: + nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002); + break; + default: + nv_wr32(dev, 0x2230, 0); + nv_wr32(dev, NV40_PFIFO_RAMFC, + ((nouveau_mem_fb_amount(dev) - 512 * 1024 + + dev_priv->ramfc_offset) >> 16) | (3 << 16)); + break; + } +} + +static void +nv40_fifo_init_intr(struct drm_device *dev) +{ + nv_wr32(dev, 0x002100, 0xffffffff); + nv_wr32(dev, 0x002140, 0xffffffff); +} + +int +nv40_fifo_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + int i; + + nv40_fifo_init_reset(dev); + nv40_fifo_init_ramxx(dev); + + nv40_fifo_do_load_context(dev, pfifo->channels - 1); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); + + nv40_fifo_init_intr(dev); + pfifo->enable(dev); + pfifo->reassign(dev, true); + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + if (dev_priv->fifos[i]) { + uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); + nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c new file mode 100644 index 000000000000..d3e0a2a6acf8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -0,0 +1,560 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +MODULE_FIRMWARE("nouveau/nv40.ctxprog"); +MODULE_FIRMWARE("nouveau/nv40.ctxvals"); +MODULE_FIRMWARE("nouveau/nv41.ctxprog"); +MODULE_FIRMWARE("nouveau/nv41.ctxvals"); +MODULE_FIRMWARE("nouveau/nv42.ctxprog"); +MODULE_FIRMWARE("nouveau/nv42.ctxvals"); +MODULE_FIRMWARE("nouveau/nv43.ctxprog"); +MODULE_FIRMWARE("nouveau/nv43.ctxvals"); +MODULE_FIRMWARE("nouveau/nv44.ctxprog"); +MODULE_FIRMWARE("nouveau/nv44.ctxvals"); +MODULE_FIRMWARE("nouveau/nv46.ctxprog"); +MODULE_FIRMWARE("nouveau/nv46.ctxvals"); +MODULE_FIRMWARE("nouveau/nv47.ctxprog"); +MODULE_FIRMWARE("nouveau/nv47.ctxvals"); +MODULE_FIRMWARE("nouveau/nv49.ctxprog"); +MODULE_FIRMWARE("nouveau/nv49.ctxvals"); +MODULE_FIRMWARE("nouveau/nv4a.ctxprog"); +MODULE_FIRMWARE("nouveau/nv4a.ctxvals"); +MODULE_FIRMWARE("nouveau/nv4b.ctxprog"); +MODULE_FIRMWARE("nouveau/nv4b.ctxvals"); +MODULE_FIRMWARE("nouveau/nv4c.ctxprog"); +MODULE_FIRMWARE("nouveau/nv4c.ctxvals"); +MODULE_FIRMWARE("nouveau/nv4e.ctxprog"); +MODULE_FIRMWARE("nouveau/nv4e.ctxvals"); + +struct nouveau_channel * +nv40_graph_channel(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t inst; + int i; + + inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); + if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) + return NULL; + inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + struct nouveau_channel *chan = dev_priv->fifos[i]; + + if (chan && chan->ramin_grctx && + chan->ramin_grctx->instance == inst) + return chan; + } + + return NULL; +} + +int +nv40_graph_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ctx; + int ret; + + /* Allocate a 175KiB block of PRAMIN to store the context. This + * is massive overkill for a lot of chipsets, but it should be safe + * until we're able to implement this properly (will happen at more + * or less the same time we're able to write our own context programs. + */ + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramin_grctx); + if (ret) + return ret; + ctx = chan->ramin_grctx->gpuobj; + + /* Initialise default context values */ + dev_priv->engine.instmem.prepare_access(dev, true); + nv40_grctx_vals_load(dev, ctx); + nv_wo32(dev, ctx, 0, ctx->im_pramin->start); + dev_priv->engine.instmem.finish_access(dev); + + return 0; +} + +void +nv40_graph_destroy_context(struct nouveau_channel *chan) +{ + nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); +} + +static int +nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) +{ + uint32_t old_cp, tv = 1000, tmp; + int i; + + old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER); + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + + tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310); + tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : + NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; + nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp); + + tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304); + tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; + nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp); + + nouveau_wait_for_idle(dev); + + for (i = 0; i < tv; i++) { + if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0) + break; + } + + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); + + if (i == tv) { + uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT); + NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save); + NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n", + ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, + ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); + NV_ERROR(dev, "0x40030C = 0x%08x\n", + nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C)); + return -EBUSY; + } + + return 0; +} + +/* Restore the context for a specific channel into PGRAPH */ +int +nv40_graph_load_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + uint32_t inst; + int ret; + + if (!chan->ramin_grctx) + return -EINVAL; + inst = chan->ramin_grctx->instance >> 4; + + ret = nv40_graph_transfer_context(dev, inst, 0); + if (ret) + return ret; + + /* 0x40032C, no idea of it's exact function. Could simply be a + * record of the currently active PGRAPH context. It's currently + * unknown as to what bit 24 does. The nv ddx has it set, so we will + * set it here too. + */ + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, + (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) | + NV40_PGRAPH_CTXCTL_CUR_LOADED); + /* 0x32E0 records the instance address of the active FIFO's PGRAPH + * context. If at any time this doesn't match 0x40032C, you will + * recieve PGRAPH_INTR_CONTEXT_SWITCH + */ + nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst); + return 0; +} + +int +nv40_graph_unload_context(struct drm_device *dev) +{ + uint32_t inst; + int ret; + + inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); + if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) + return 0; + inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; + + ret = nv40_graph_transfer_context(dev, inst, 1); + + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); + return ret; +} + +struct nouveau_ctxprog { + uint32_t signature; + uint8_t version; + uint16_t length; + uint32_t data[]; +} __attribute__ ((packed)); + +struct nouveau_ctxvals { + uint32_t signature; + uint8_t version; + uint32_t length; + struct { + uint32_t offset; + uint32_t value; + } data[]; +} __attribute__ ((packed)); + +int +nv40_grctx_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + const int chipset = dev_priv->chipset; + const struct firmware *fw; + const struct nouveau_ctxprog *cp; + const struct nouveau_ctxvals *cv; + char name[32]; + int ret, i; + + pgraph->accel_blocked = true; + + if (!pgraph->ctxprog) { + sprintf(name, "nouveau/nv%02x.ctxprog", chipset); + ret = request_firmware(&fw, name, &dev->pdev->dev); + if (ret) { + NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset); + return ret; + } + + pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); + if (!pgraph->ctxprog) { + NV_ERROR(dev, "OOM copying ctxprog\n"); + release_firmware(fw); + return -ENOMEM; + } + memcpy(pgraph->ctxprog, fw->data, fw->size); + + cp = pgraph->ctxprog; + if (cp->signature != 0x5043564e || cp->version != 0 || + cp->length != ((fw->size - 7) / 4)) { + NV_ERROR(dev, "ctxprog invalid\n"); + release_firmware(fw); + nv40_grctx_fini(dev); + return -EINVAL; + } + release_firmware(fw); + } + + if (!pgraph->ctxvals) { + sprintf(name, "nouveau/nv%02x.ctxvals", chipset); + ret = request_firmware(&fw, name, &dev->pdev->dev); + if (ret) { + NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset); + nv40_grctx_fini(dev); + return ret; + } + + pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); + if (!pgraph->ctxprog) { + NV_ERROR(dev, "OOM copying ctxprog\n"); + release_firmware(fw); + nv40_grctx_fini(dev); + return -ENOMEM; + } + memcpy(pgraph->ctxvals, fw->data, fw->size); + + cv = (void *)pgraph->ctxvals; + if (cv->signature != 0x5643564e || cv->version != 0 || + cv->length != ((fw->size - 9) / 8)) { + NV_ERROR(dev, "ctxvals invalid\n"); + release_firmware(fw); + nv40_grctx_fini(dev); + return -EINVAL; + } + release_firmware(fw); + } + + cp = pgraph->ctxprog; + + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); + for (i = 0; i < cp->length; i++) + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]); + + pgraph->accel_blocked = false; + return 0; +} + +void +nv40_grctx_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + + if (pgraph->ctxprog) { + kfree(pgraph->ctxprog); + pgraph->ctxprog = NULL; + } + + if (pgraph->ctxvals) { + kfree(pgraph->ctxprog); + pgraph->ctxvals = NULL; + } +} + +void +nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_ctxvals *cv = pgraph->ctxvals; + int i; + + if (!cv) + return; + + for (i = 0; i < cv->length; i++) + nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value); +} + +/* + * G70 0x47 + * G71 0x49 + * NV45 0x48 + * G72[M] 0x46 + * G73 0x4b + * C51_G7X 0x4c + * C51 0x4e + */ +int +nv40_graph_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = + (struct drm_nouveau_private *)dev->dev_private; + uint32_t vramsz, tmp; + int i, j; + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & + ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + + nv40_grctx_init(dev); + + /* No context present currently */ + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); + + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0); + nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055); + nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000); + nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); + + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); + nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); + + j = nv_rd32(dev, 0x1540) & 0xff; + if (j) { + for (i = 0; !(j & 1); j >>= 1, i++) + ; + nv_wr32(dev, 0x405000, i); + } + + if (dev_priv->chipset == 0x40) { + nv_wr32(dev, 0x4009b0, 0x83280fff); + nv_wr32(dev, 0x4009b4, 0x000000a0); + } else { + nv_wr32(dev, 0x400820, 0x83280eff); + nv_wr32(dev, 0x400824, 0x000000a0); + } + + switch (dev_priv->chipset) { + case 0x40: + case 0x45: + nv_wr32(dev, 0x4009b8, 0x0078e366); + nv_wr32(dev, 0x4009bc, 0x0000014c); + break; + case 0x41: + case 0x42: /* pciid also 0x00Cx */ + /* case 0x0120: XXX (pciid) */ + nv_wr32(dev, 0x400828, 0x007596ff); + nv_wr32(dev, 0x40082c, 0x00000108); + break; + case 0x43: + nv_wr32(dev, 0x400828, 0x0072cb77); + nv_wr32(dev, 0x40082c, 0x00000108); + break; + case 0x44: + case 0x46: /* G72 */ + case 0x4a: + case 0x4c: /* G7x-based C51 */ + case 0x4e: + nv_wr32(dev, 0x400860, 0); + nv_wr32(dev, 0x400864, 0); + break; + case 0x47: /* G70 */ + case 0x49: /* G71 */ + case 0x4b: /* G73 */ + nv_wr32(dev, 0x400828, 0x07830610); + nv_wr32(dev, 0x40082c, 0x0000016A); + break; + default: + break; + } + + nv_wr32(dev, 0x400b38, 0x2ffff800); + nv_wr32(dev, 0x400b3c, 0x00006000); + + /* copy tile info from PFB */ + switch (dev_priv->chipset) { + case 0x40: /* vanilla NV40 */ + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { + tmp = nv_rd32(dev, NV10_PFB_TILE(i)); + nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); + tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i)); + nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); + tmp = nv_rd32(dev, NV10_PFB_TSIZE(i)); + nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); + tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i)); + nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); + } + break; + case 0x44: + case 0x4a: + case 0x4e: /* NV44-based cores don't have 0x406900? */ + for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) { + tmp = nv_rd32(dev, NV40_PFB_TILE(i)); + nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); + nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); + nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); + nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); + } + break; + case 0x46: + case 0x47: + case 0x49: + case 0x4b: /* G7X-based cores */ + for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) { + tmp = nv_rd32(dev, NV40_PFB_TILE(i)); + nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); + nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); + nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); + nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); + } + break; + default: /* everything else */ + for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) { + tmp = nv_rd32(dev, NV40_PFB_TILE(i)); + nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); + nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); + nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); + tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); + nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); + nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); + } + break; + } + + /* begin RAM config */ + vramsz = drm_get_resource_len(dev, 0) - 1; + switch (dev_priv->chipset) { + case 0x40: + nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); + nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1)); + nv_wr32(dev, 0x400820, 0); + nv_wr32(dev, 0x400824, 0); + nv_wr32(dev, 0x400864, vramsz); + nv_wr32(dev, 0x400868, vramsz); + break; + default: + switch (dev_priv->chipset) { + case 0x46: + case 0x47: + case 0x49: + case 0x4b: + nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); + break; + default: + nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); + break; + } + nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); + nv_wr32(dev, 0x400840, 0); + nv_wr32(dev, 0x400844, 0); + nv_wr32(dev, 0x4008A0, vramsz); + nv_wr32(dev, 0x4008A4, vramsz); + break; + } + + return 0; +} + +void nv40_graph_takedown(struct drm_device *dev) +{ +} + +struct nouveau_pgraph_object_class nv40_graph_grclass[] = { + { 0x0030, false, NULL }, /* null */ + { 0x0039, false, NULL }, /* m2mf */ + { 0x004a, false, NULL }, /* gdirect */ + { 0x009f, false, NULL }, /* imageblit (nv12) */ + { 0x008a, false, NULL }, /* ifc */ + { 0x0089, false, NULL }, /* sifm */ + { 0x3089, false, NULL }, /* sifm (nv40) */ + { 0x0062, false, NULL }, /* surf2d */ + { 0x3062, false, NULL }, /* surf2d (nv40) */ + { 0x0043, false, NULL }, /* rop */ + { 0x0012, false, NULL }, /* beta1 */ + { 0x0072, false, NULL }, /* beta4 */ + { 0x0019, false, NULL }, /* cliprect */ + { 0x0044, false, NULL }, /* pattern */ + { 0x309e, false, NULL }, /* swzsurf */ + { 0x4097, false, NULL }, /* curie (nv40) */ + { 0x4497, false, NULL }, /* curie (nv44) */ + {} +}; + diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c new file mode 100644 index 000000000000..2a3495e848e9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_mc.c @@ -0,0 +1,38 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +int +nv40_mc_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t tmp; + + /* Power up everything, resetting each individual unit will + * be done later if needed. + */ + nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); + + switch (dev_priv->chipset) { + case 0x44: + case 0x46: /* G72 */ + case 0x4e: + case 0x4c: /* C51_G7X */ + tmp = nv_rd32(dev, NV40_PFB_020C); + nv_wr32(dev, NV40_PMC_1700, tmp); + nv_wr32(dev, NV40_PMC_1704, 0); + nv_wr32(dev, NV40_PMC_1708, 0); + nv_wr32(dev, NV40_PMC_170C, tmp); + break; + default: + break; + } + + return 0; +} + +void +nv40_mc_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c new file mode 100644 index 000000000000..f8e28a1e44e7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -0,0 +1,769 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_mode.h" +#include "drm_crtc_helper.h" + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_fb.h" +#include "nouveau_connector.h" +#include "nv50_display.h" + +static void +nv50_crtc_lut_load(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); + int i; + + NV_DEBUG(crtc->dev, "\n"); + + for (i = 0; i < 256; i++) { + writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); + writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2); + writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4); + } + + if (nv_crtc->lut.depth == 30) { + writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0); + writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2); + writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4); + } +} + +int +nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + int index = nv_crtc->index, ret; + + NV_DEBUG(dev, "index %d\n", nv_crtc->index); + NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked"); + + if (blanked) { + nv_crtc->cursor.hide(nv_crtc, false); + + ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5); + if (ret) { + NV_ERROR(dev, "no space while blanking crtc\n"); + return ret; + } + BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); + OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); + OUT_RING(evo, 0); + if (dev_priv->chipset != 0x50) { + BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); + OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); + } + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); + OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); + } else { + if (nv_crtc->cursor.visible) + nv_crtc->cursor.show(nv_crtc, false); + else + nv_crtc->cursor.hide(nv_crtc, false); + + ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8); + if (ret) { + NV_ERROR(dev, "no space while unblanking crtc\n"); + return ret; + } + BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); + OUT_RING(evo, nv_crtc->lut.depth == 8 ? + NV50_EVO_CRTC_CLUT_MODE_OFF : + NV50_EVO_CRTC_CLUT_MODE_ON); + OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start << + PAGE_SHIFT) >> 8); + if (dev_priv->chipset != 0x50) { + BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); + OUT_RING(evo, NvEvoVRAM); + } + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2); + OUT_RING(evo, nv_crtc->fb.offset >> 8); + OUT_RING(evo, 0); + BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); + if (dev_priv->chipset != 0x50) + if (nv_crtc->fb.tile_flags == 0x7a00) + OUT_RING(evo, NvEvoFB32); + else + if (nv_crtc->fb.tile_flags == 0x7000) + OUT_RING(evo, NvEvoFB16); + else + OUT_RING(evo, NvEvoVRAM); + else + OUT_RING(evo, NvEvoVRAM); + } + + nv_crtc->fb.blanked = blanked; + return 0; +} + +static int +nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + int ret; + + NV_DEBUG(dev, "\n"); + + ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); + if (ret) { + NV_ERROR(dev, "no space while setting dither\n"); + return ret; + } + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1); + if (on) + OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON); + else + OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF); + + if (update) { + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + } + + return 0; +} + +struct nouveau_connector * +nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct drm_connector *connector; + struct drm_crtc *crtc = to_drm_crtc(nv_crtc); + + /* The safest approach is to find an encoder with the right crtc, that + * is also linked to a connector. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder) + if (connector->encoder->crtc == crtc) + return nouveau_connector(connector); + } + + return NULL; +} + +static int +nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) +{ + struct nouveau_connector *nv_connector = + nouveau_crtc_connector_get(nv_crtc); + struct drm_device *dev = nv_crtc->base.dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct drm_display_mode *native_mode = NULL; + struct drm_display_mode *mode = &nv_crtc->base.mode; + uint32_t outX, outY, horiz, vert; + int ret; + + NV_DEBUG(dev, "\n"); + + switch (scaling_mode) { + case DRM_MODE_SCALE_NONE: + break; + default: + if (!nv_connector || !nv_connector->native_mode) { + NV_ERROR(dev, "No native mode, forcing panel scaling\n"); + scaling_mode = DRM_MODE_SCALE_NONE; + } else { + native_mode = nv_connector->native_mode; + } + break; + } + + switch (scaling_mode) { + case DRM_MODE_SCALE_ASPECT: + horiz = (native_mode->hdisplay << 19) / mode->hdisplay; + vert = (native_mode->vdisplay << 19) / mode->vdisplay; + + if (vert > horiz) { + outX = (mode->hdisplay * horiz) >> 19; + outY = (mode->vdisplay * horiz) >> 19; + } else { + outX = (mode->hdisplay * vert) >> 19; + outY = (mode->vdisplay * vert) >> 19; + } + break; + case DRM_MODE_SCALE_FULLSCREEN: + outX = native_mode->hdisplay; + outY = native_mode->vdisplay; + break; + case DRM_MODE_SCALE_CENTER: + case DRM_MODE_SCALE_NONE: + default: + outX = mode->hdisplay; + outY = mode->vdisplay; + break; + } + + ret = RING_SPACE(evo, update ? 7 : 5); + if (ret) + return ret; + + /* Got a better name for SCALER_ACTIVE? */ + /* One day i've got to really figure out why this is needed. */ + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); + if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) || + (mode->flags & DRM_MODE_FLAG_INTERLACE) || + mode->hdisplay != outX || mode->vdisplay != outY) { + OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE); + } else { + OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE); + } + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); + OUT_RING(evo, outY << 16 | outX); + OUT_RING(evo, outY << 16 | outX); + + if (update) { + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + } + + return 0; +} + +int +nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) +{ + uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); + struct nouveau_pll_vals pll; + struct pll_lims limits; + uint32_t reg1, reg2; + int ret; + + ret = get_pll_limits(dev, pll_reg, &limits); + if (ret) + return ret; + + ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll); + if (ret <= 0) + return ret; + + if (limits.vco2.maxfreq) { + reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00; + reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00; + nv_wr32(dev, pll_reg, 0x10000611); + nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1); + nv_wr32(dev, pll_reg + 8, + reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2); + } else { + reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000; + nv_wr32(dev, pll_reg, 0x50000610); + nv_wr32(dev, pll_reg + 4, reg1 | + (pll.log2P << 16) | (pll.M1 << 8) | pll.N1); + } + + return 0; +} + +static void +nv50_crtc_destroy(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + NV_DEBUG(dev, "\n"); + + if (!crtc) + return; + + drm_crtc_cleanup(&nv_crtc->base); + + nv50_cursor_fini(nv_crtc); + + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + kfree(nv_crtc->mode); + kfree(nv_crtc); +} + +int +nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, uint32_t height) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_bo *cursor = NULL; + struct drm_gem_object *gem; + int ret = 0, i; + + if (width != 64 || height != 64) + return -EINVAL; + + if (!buffer_handle) { + nv_crtc->cursor.hide(nv_crtc, true); + return 0; + } + + gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); + if (!gem) + return -EINVAL; + cursor = nouveau_gem_object(gem); + + ret = nouveau_bo_map(cursor); + if (ret) + goto out; + + /* The simple will do for now. */ + for (i = 0; i < 64 * 64; i++) + nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i)); + + nouveau_bo_unmap(cursor); + + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset - + dev_priv->vm_vram_base); + nv_crtc->cursor.show(nv_crtc, true); + +out: + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gem); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->cursor.set_pos(nv_crtc, x, y); + return 0; +} + +static void +nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t size) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int i; + + if (size != 256) + return; + + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + /* We need to know the depth before we upload, but it's possible to + * get called before a framebuffer is bound. If this is the case, + * mark the lut values as dirty by setting depth==0, and it'll be + * uploaded on the first mode_set_base() + */ + if (!nv_crtc->base.fb) { + nv_crtc->lut.depth = 0; + return; + } + + nv50_crtc_lut_load(crtc); +} + +static void +nv50_crtc_save(struct drm_crtc *crtc) +{ + NV_ERROR(crtc->dev, "!!\n"); +} + +static void +nv50_crtc_restore(struct drm_crtc *crtc) +{ + NV_ERROR(crtc->dev, "!!\n"); +} + +static const struct drm_crtc_funcs nv50_crtc_funcs = { + .save = nv50_crtc_save, + .restore = nv50_crtc_restore, + .cursor_set = nv50_crtc_cursor_set, + .cursor_move = nv50_crtc_cursor_move, + .gamma_set = nv50_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = nv50_crtc_destroy, +}; + +static void +nv50_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static void +nv50_crtc_prepare(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + NV_DEBUG(dev, "index %d\n", nv_crtc->index); + + /* Disconnect all unused encoders. */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (drm_helper_encoder_in_use(encoder)) + continue; + + nv_encoder->disconnect(nv_encoder); + } + + nv50_crtc_blank(nv_crtc, true); +} + +static void +nv50_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_crtc *crtc2; + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ret; + + NV_DEBUG(dev, "index %d\n", nv_crtc->index); + + nv50_crtc_blank(nv_crtc, false); + + /* Explicitly blank all unused crtc's. */ + list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) { + if (!drm_helper_crtc_in_use(crtc2)) + nv50_crtc_blank(nouveau_crtc(crtc2), true); + } + + ret = RING_SPACE(evo, 2); + if (ret) { + NV_ERROR(dev, "no space while committing crtc\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); +} + +static bool +nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int +nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb, bool update) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = nv_crtc->base.dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct drm_framebuffer *drm_fb = nv_crtc->base.fb; + struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + int ret, format; + + NV_DEBUG(dev, "index %d\n", nv_crtc->index); + + switch (drm_fb->depth) { + case 8: + format = NV50_EVO_CRTC_FB_DEPTH_8; + break; + case 15: + format = NV50_EVO_CRTC_FB_DEPTH_15; + break; + case 16: + format = NV50_EVO_CRTC_FB_DEPTH_16; + break; + case 24: + case 32: + format = NV50_EVO_CRTC_FB_DEPTH_24; + break; + case 30: + format = NV50_EVO_CRTC_FB_DEPTH_30; + break; + default: + NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth); + return -EINVAL; + } + + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (old_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb); + nouveau_bo_unpin(ofb->nvbo); + } + + nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; + nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; + nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; + if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { + ret = RING_SPACE(evo, 2); + if (ret) + return ret; + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); + if (nv_crtc->fb.tile_flags == 0x7a00) + OUT_RING(evo, NvEvoFB32); + else + if (nv_crtc->fb.tile_flags == 0x7000) + OUT_RING(evo, NvEvoFB16); + else + OUT_RING(evo, NvEvoVRAM); + } + + ret = RING_SPACE(evo, 12); + if (ret) + return ret; + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); + OUT_RING(evo, nv_crtc->fb.offset >> 8); + OUT_RING(evo, 0); + OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width); + if (!nv_crtc->fb.tile_flags) { + OUT_RING(evo, drm_fb->pitch | (1 << 20)); + } else { + OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | + fb->nvbo->tile_mode); + } + if (dev_priv->chipset == 0x50) + OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); + else + OUT_RING(evo, format); + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); + OUT_RING(evo, fb->base.depth == 8 ? + NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); + OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR); + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); + OUT_RING(evo, (y << 16) | x); + + if (nv_crtc->lut.depth != fb->base.depth) { + nv_crtc->lut.depth = fb->base.depth; + nv50_crtc_lut_load(crtc); + } + + if (update) { + ret = RING_SPACE(evo, 2); + if (ret) + return ret; + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + } + + return 0; +} + +static int +nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_connector *nv_connector = NULL; + uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end; + uint32_t hunk1, vunk1, vunk2a, vunk2b; + int ret; + + /* Find the connector attached to this CRTC */ + nv_connector = nouveau_crtc_connector_get(nv_crtc); + + *nv_crtc->mode = *adjusted_mode; + + NV_DEBUG(dev, "index %d\n", nv_crtc->index); + + hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start; + vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start; + hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start; + vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start; + /* I can't give this a proper name, anyone else can? */ + hunk1 = adjusted_mode->htotal - + adjusted_mode->hsync_start + adjusted_mode->hdisplay; + vunk1 = adjusted_mode->vtotal - + adjusted_mode->vsync_start + adjusted_mode->vdisplay; + /* Another strange value, this time only for interlaced adjusted_modes. */ + vunk2a = 2 * adjusted_mode->vtotal - + adjusted_mode->vsync_start + adjusted_mode->vdisplay; + vunk2b = adjusted_mode->vtotal - + adjusted_mode->vsync_start + adjusted_mode->vtotal; + + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + vsync_dur /= 2; + vsync_start_to_end /= 2; + vunk1 /= 2; + vunk2a /= 2; + vunk2b /= 2; + /* magic */ + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) { + vsync_start_to_end -= 1; + vunk1 -= 1; + vunk2a -= 1; + vunk2b -= 1; + } + } + + ret = RING_SPACE(evo, 17); + if (ret) + return ret; + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2); + OUT_RING(evo, adjusted_mode->clock | 0x800000); + OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0); + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5); + OUT_RING(evo, 0); + OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal); + OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1)); + OUT_RING(evo, (vsync_start_to_end - 1) << 16 | + (hsync_start_to_end - 1)); + OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1)); + + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1); + OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1)); + } else { + OUT_RING(evo, 0); + OUT_RING(evo, 0); + } + + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1); + OUT_RING(evo, 0); + + /* This is the actual resolution of the mode. */ + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1); + OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay); + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1); + OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0)); + + nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false); + nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false); + + return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false); +} + +static int +nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true); +} + +static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { + .dpms = nv50_crtc_dpms, + .prepare = nv50_crtc_prepare, + .commit = nv50_crtc_commit, + .mode_fixup = nv50_crtc_mode_fixup, + .mode_set = nv50_crtc_mode_set, + .mode_set_base = nv50_crtc_mode_set_base, + .load_lut = nv50_crtc_lut_load, +}; + +int +nv50_crtc_create(struct drm_device *dev, int index) +{ + struct nouveau_crtc *nv_crtc = NULL; + int ret, i; + + NV_DEBUG(dev, "\n"); + + nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); + if (!nv_crtc) + return -ENOMEM; + + nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL); + if (!nv_crtc->mode) { + kfree(nv_crtc); + return -ENOMEM; + } + + /* Default CLUT parameters, will be activated on the hw upon + * first mode set. + */ + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = i << 8; + nv_crtc->lut.g[i] = i << 8; + nv_crtc->lut.b[i] = i << 8; + } + nv_crtc->lut.depth = 0; + + ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, false, true, &nv_crtc->lut.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->lut.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + } + + if (ret) { + kfree(nv_crtc->mode); + kfree(nv_crtc); + return ret; + } + + nv_crtc->index = index; + + /* set function pointers */ + nv_crtc->set_dither = nv50_crtc_set_dither; + nv_crtc->set_scale = nv50_crtc_set_scale; + + drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs); + drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); + drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); + + ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, false, true, &nv_crtc->cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + } + + nv50_cursor_init(nv_crtc); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c new file mode 100644 index 000000000000..e2e79a8f220d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_mode.h" + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drv.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +static void +nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct drm_device *dev = nv_crtc->base.dev; + int ret; + + NV_DEBUG(dev, "\n"); + + if (update && nv_crtc->cursor.visible) + return; + + ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); + if (ret) { + NV_ERROR(dev, "no space while unhiding cursor\n"); + return; + } + + if (dev_priv->chipset != 0x50) { + BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); + OUT_RING(evo, NvEvoVRAM); + } + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); + OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW); + OUT_RING(evo, nv_crtc->cursor.offset >> 8); + + if (update) { + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + nv_crtc->cursor.visible = true; + } +} + +static void +nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct drm_device *dev = nv_crtc->base.dev; + int ret; + + NV_DEBUG(dev, "\n"); + + if (update && !nv_crtc->cursor.visible) + return; + + ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); + if (ret) { + NV_ERROR(dev, "no space while hiding cursor\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); + OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); + OUT_RING(evo, 0); + if (dev_priv->chipset != 0x50) { + BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); + OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); + } + + if (update) { + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + nv_crtc->cursor.visible = false; + } +} + +static void +nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ + struct drm_device *dev = nv_crtc->base.dev; + + nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), + ((y & 0xFFFF) << 16) | (x & 0xFFFF)); + /* Needed to make the cursor move. */ + nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0); +} + +static void +nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ + NV_DEBUG(nv_crtc->base.dev, "\n"); + if (offset == nv_crtc->cursor.offset) + return; + + nv_crtc->cursor.offset = offset; + if (nv_crtc->cursor.visible) { + nv_crtc->cursor.visible = false; + nv_crtc->cursor.show(nv_crtc, true); + } +} + +int +nv50_cursor_init(struct nouveau_crtc *nv_crtc) +{ + nv_crtc->cursor.set_offset = nv50_cursor_set_offset; + nv_crtc->cursor.set_pos = nv50_cursor_set_pos; + nv_crtc->cursor.hide = nv50_cursor_hide; + nv_crtc->cursor.show = nv50_cursor_show; + return 0; +} + +void +nv50_cursor_fini(struct nouveau_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + int idx = nv_crtc->index; + + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); + if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { + NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); + NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx))); + } +} + diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c new file mode 100644 index 000000000000..fb5838e3be24 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_dac.c @@ -0,0 +1,304 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +static void +nv50_dac_disconnect(struct nouveau_encoder *nv_encoder) +{ + struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + int ret; + + NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or); + + ret = RING_SPACE(evo, 2); + if (ret) { + NV_ERROR(dev, "no space while disconnecting DAC\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); + OUT_RING(evo, 0); +} + +static enum drm_connector_status +nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + enum drm_connector_status status = connector_status_disconnected; + uint32_t dpms_state, load_pattern, load_state; + int or = nv_encoder->or; + + nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); + dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); + + nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), + 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); + return status; + } + + /* Use bios provided value if possible. */ + if (dev_priv->vbios->dactestval) { + load_pattern = dev_priv->vbios->dactestval; + NV_DEBUG(dev, "Using bios provided load_pattern of %d\n", + load_pattern); + } else { + load_pattern = 340; + NV_DEBUG(dev, "Using default load_pattern of %d\n", + load_pattern); + } + + nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), + NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); + mdelay(45); /* give it some time to process */ + load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); + + nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); + nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + + if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == + NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) + status = connector_status_connected; + + if (status == connector_status_connected) + NV_DEBUG(dev, "Load was detected on output with or %d\n", or); + else + NV_DEBUG(dev, "Load was not detected on output with or %d\n", or); + + return status; +} + +static void +nv50_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint32_t val; + int or = nv_encoder->or; + + NV_DEBUG(dev, "or %d mode %d\n", or, mode); + + /* wait for it to be done */ + if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); + return; + } + + val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; + + if (mode != DRM_MODE_DPMS_ON) + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; + + switch (mode) { + case DRM_MODE_DPMS_STANDBY: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; + break; + case DRM_MODE_DPMS_SUSPEND: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; + break; + case DRM_MODE_DPMS_OFF: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF; + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; + break; + default: + break; + } + + nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); +} + +static void +nv50_dac_save(struct drm_encoder *encoder) +{ + NV_ERROR(encoder->dev, "!!\n"); +} + +static void +nv50_dac_restore(struct drm_encoder *encoder) +{ + NV_ERROR(encoder->dev, "!!\n"); +} + +static bool +nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *connector; + + NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or); + + connector = nouveau_encoder_connector_get(nv_encoder); + if (!connector) { + NV_ERROR(encoder->dev, "Encoder has no connector\n"); + return false; + } + + if (connector->scaling_mode != DRM_MODE_SCALE_NONE && + connector->native_mode) { + int id = adjusted_mode->base.id; + *adjusted_mode = *connector->native_mode; + adjusted_mode->base.id = id; + } + + return true; +} + +static void +nv50_dac_prepare(struct drm_encoder *encoder) +{ +} + +static void +nv50_dac_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); + uint32_t mode_ctl = 0, mode_ctl2 = 0; + int ret; + + NV_DEBUG(dev, "or %d\n", nv_encoder->or); + + nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); + + if (crtc->index == 1) + mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1; + else + mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; + + /* Lacking a working tv-out, this is not a 100% sure. */ + if (nv_encoder->dcb->type == OUTPUT_ANALOG) + mode_ctl |= 0x40; + else + if (nv_encoder->dcb->type == OUTPUT_TV) + mode_ctl |= 0x100; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) + mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC; + + ret = RING_SPACE(evo, 3); + if (ret) { + NV_ERROR(dev, "no space while connecting DAC\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); + OUT_RING(evo, mode_ctl); + OUT_RING(evo, mode_ctl2); +} + +static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { + .dpms = nv50_dac_dpms, + .save = nv50_dac_save, + .restore = nv50_dac_restore, + .mode_fixup = nv50_dac_mode_fixup, + .prepare = nv50_dac_prepare, + .commit = nv50_dac_commit, + .mode_set = nv50_dac_mode_set, + .detect = nv50_dac_detect +}; + +static void +nv50_dac_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (!encoder) + return; + + NV_DEBUG(encoder->dev, "\n"); + + drm_encoder_cleanup(encoder); + kfree(nv_encoder); +} + +static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { + .destroy = nv50_dac_destroy, +}; + +int +nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) +{ + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + + NV_DEBUG(dev, "\n"); + NV_INFO(dev, "Detected a DAC output\n"); + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + nv_encoder->disconnect = nv50_dac_disconnect; + + drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c new file mode 100644 index 000000000000..12c5ee63495b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -0,0 +1,1015 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "nv50_display.h" +#include "nouveau_crtc.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_fb.h" +#include "drm_crtc_helper.h" + +static void +nv50_evo_channel_del(struct nouveau_channel **pchan) +{ + struct nouveau_channel *chan = *pchan; + + if (!chan) + return; + *pchan = NULL; + + nouveau_gpuobj_channel_takedown(chan); + nouveau_bo_ref(NULL, &chan->pushbuf_bo); + + if (chan->user) + iounmap(chan->user); + + kfree(chan); +} + +static int +nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, + uint32_t tile_flags, uint32_t magic_flags, + uint32_t offset, uint32_t limit) +{ + struct drm_nouveau_private *dev_priv = evo->dev->dev_private; + struct drm_device *dev = evo->dev; + struct nouveau_gpuobj *obj = NULL; + int ret; + + ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); + if (ret) + return ret; + obj->engine = NVOBJ_ENGINE_DISPLAY; + + ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL); + if (ret) { + nouveau_gpuobj_del(dev, &obj); + return ret; + } + + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); + nv_wo32(dev, obj, 1, limit); + nv_wo32(dev, obj, 2, offset); + nv_wo32(dev, obj, 3, 0x00000000); + nv_wo32(dev, obj, 4, 0x00000000); + nv_wo32(dev, obj, 5, 0x00010000); + dev_priv->engine.instmem.finish_access(dev); + + return 0; +} + +static int +nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + int ret; + + chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); + if (!chan) + return -ENOMEM; + *pchan = chan; + + chan->id = -1; + chan->dev = dev; + chan->user_get = 4; + chan->user_put = 0; + + INIT_LIST_HEAD(&chan->ramht_refs); + + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); + if (ret) { + NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj-> + im_pramin->start, 32768); + if (ret) { + NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16, + 0, &chan->ramht); + if (ret) { + NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + if (dev_priv->chipset != 0x50) { + ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + + + ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + } + + ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, + 0, nouveau_mem_fb_amount(dev)); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, + false, true, &chan->pushbuf_bo); + if (ret == 0) + ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_bo_map(chan->pushbuf_bo); + if (ret) { + NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV50_PDISPLAY_USER(0), PAGE_SIZE); + if (!chan->user) { + NV_ERROR(dev, "Error mapping EVO control regs.\n"); + nv50_evo_channel_del(pchan); + return -ENOMEM; + } + + return 0; +} + +int +nv50_display_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + struct nouveau_channel *evo = dev_priv->evo; + struct drm_connector *connector; + uint32_t val, ram_amount, hpd_en[2]; + uint64_t start; + int ret, i; + + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); + /* + * I think the 0x006101XX range is some kind of main control area + * that enables things. + */ + /* CRTC? */ + for (i = 0; i < 2; i++) { + val = nv_rd32(dev, 0x00616100 + (i * 0x800)); + nv_wr32(dev, 0x00610190 + (i * 0x10), val); + val = nv_rd32(dev, 0x00616104 + (i * 0x800)); + nv_wr32(dev, 0x00610194 + (i * 0x10), val); + val = nv_rd32(dev, 0x00616108 + (i * 0x800)); + nv_wr32(dev, 0x00610198 + (i * 0x10), val); + val = nv_rd32(dev, 0x0061610c + (i * 0x800)); + nv_wr32(dev, 0x0061019c + (i * 0x10), val); + } + /* DAC */ + for (i = 0; i < 3; i++) { + val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); + nv_wr32(dev, 0x006101d0 + (i * 0x04), val); + } + /* SOR */ + for (i = 0; i < 4; i++) { + val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); + nv_wr32(dev, 0x006101e0 + (i * 0x04), val); + } + /* Something not yet in use, tv-out maybe. */ + for (i = 0; i < 3; i++) { + val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); + nv_wr32(dev, 0x006101f0 + (i * 0x04), val); + } + + for (i = 0; i < 3; i++) { + nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); + } + + /* This used to be in crtc unblank, but seems out of place there. */ + nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); + /* RAM is clamped to 256 MiB. */ + ram_amount = nouveau_mem_fb_amount(dev); + NV_DEBUG(dev, "ram_amount %d\n", ram_amount); + if (ram_amount > 256*1024*1024) + ram_amount = 256*1024*1024; + nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); + nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000); + nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0); + + /* The precise purpose is unknown, i suspect it has something to do + * with text mode. + */ + if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) { + nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100); + nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1); + if (!nv_wait(0x006194e8, 2, 0)) { + NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n"); + NV_ERROR(dev, "0x6194e8 = 0x%08x\n", + nv_rd32(dev, 0x6194e8)); + return -EBUSY; + } + } + + /* taken from nv bug #12637, attempts to un-wedge the hw if it's + * stuck in some unspecified state + */ + start = ptimer->read(dev); + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00); + while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) { + if ((val & 0x9f0000) == 0x20000) + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), + val | 0x800000); + + if ((val & 0x3f0000) == 0x30000) + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), + val | 0x200000); + + if (ptimer->read(dev) - start > 1000000000ULL) { + NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", val); + return -EBUSY; + } + } + + nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); + if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) { + NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); + return -EBUSY; + } + + for (i = 0; i < 2; i++) { + nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); + if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { + NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); + NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + + nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); + if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { + NV_ERROR(dev, "timeout: " + "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i); + NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i, + nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + } + + nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9); + + /* initialise fifo */ + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), + ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) | + NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | + NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); + if (!nv_wait(0x610200, 0x80000000, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); + return -EBUSY; + } + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), + (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) | + NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); + nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 | + NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); + nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1); + + evo->dma.max = (4096/4) - 2; + evo->dma.put = 0; + evo->dma.cur = evo->dma.put; + evo->dma.free = evo->dma.max - evo->dma.cur; + + ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); + if (ret) + return ret; + + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(evo, 0); + + ret = RING_SPACE(evo, 11); + if (ret) + return ret; + BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2); + OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED); + OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE); + BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1); + OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); + BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1); + OUT_RING(evo, 0); + BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1); + OUT_RING(evo, 0); + BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2)) + NV_ERROR(dev, "evo pushbuf stalled\n"); + + /* enable clock change interrupts. */ + nv_wr32(dev, 0x610028, 0x00010001); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 | + NV50_PDISPLAY_INTR_EN_CLK_UNK20 | + NV50_PDISPLAY_INTR_EN_CLK_UNK40)); + + /* enable hotplug interrupts */ + hpd_en[0] = hpd_en[1] = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct nouveau_connector *conn = nouveau_connector(connector); + struct dcb_gpio_entry *gpio; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DVII && + connector->connector_type != DRM_MODE_CONNECTOR_DVID && + connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag); + if (!gpio) + continue; + + hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf)); + } + + nv_wr32(dev, 0xe054, 0xffffffff); + nv_wr32(dev, 0xe050, hpd_en[0]); + if (dev_priv->chipset >= 0x90) { + nv_wr32(dev, 0xe074, 0xffffffff); + nv_wr32(dev, 0xe070, hpd_en[1]); + } + + return 0; +} + +static int nv50_display_disable(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_crtc *drm_crtc; + int ret, i; + + NV_DEBUG(dev, "\n"); + + list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); + + nv50_crtc_blank(crtc, true); + } + + ret = RING_SPACE(dev_priv->evo, 2); + if (ret == 0) { + BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(dev_priv->evo, 0); + } + FIRE_RING(dev_priv->evo); + + /* Almost like ack'ing a vblank interrupt, maybe in the spirit of + * cleaning up? + */ + list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); + uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index); + + if (!crtc->base.enabled) + continue; + + nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask); + if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) { + NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == " + "0x%08x\n", mask, mask); + NV_ERROR(dev, "0x610024 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_INTR_1)); + } + } + + nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); + nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); + if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { + NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); + } + + for (i = 0; i < 3; i++) { + if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i), + NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { + NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); + NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, + nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i))); + } + } + + /* disable interrupts. */ + nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000); + + /* disable hotplug interrupts */ + nv_wr32(dev, 0xe054, 0xffffffff); + nv_wr32(dev, 0xe050, 0x00000000); + if (dev_priv->chipset >= 0x90) { + nv_wr32(dev, 0xe074, 0xffffffff); + nv_wr32(dev, 0xe070, 0x00000000); + } + return 0; +} + +int nv50_display_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct parsed_dcb *dcb = dev_priv->vbios->dcb; + uint32_t connector[16] = {}; + int ret, i; + + NV_DEBUG(dev, "\n"); + + /* init basic kernel modesetting */ + drm_mode_config_init(dev); + + /* Initialise some optional connector properties. */ + drm_mode_create_scaling_mode_property(dev); + drm_mode_create_dithering_property(dev); + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; + + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; + + dev->mode_config.fb_base = dev_priv->fb_phys; + + /* Create EVO channel */ + ret = nv50_evo_channel_new(dev, &dev_priv->evo); + if (ret) { + NV_ERROR(dev, "Error creating EVO channel: %d\n", ret); + return ret; + } + + /* Create CRTC objects */ + for (i = 0; i < 2; i++) + nv50_crtc_create(dev, i); + + /* We setup the encoders from the BIOS table */ + for (i = 0 ; i < dcb->entries; i++) { + struct dcb_entry *entry = &dcb->entry[i]; + + if (entry->location != DCB_LOC_ON_CHIP) { + NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n", + entry->type, ffs(entry->or) - 1); + continue; + } + + switch (entry->type) { + case OUTPUT_TMDS: + case OUTPUT_LVDS: + case OUTPUT_DP: + nv50_sor_create(dev, entry); + break; + case OUTPUT_ANALOG: + nv50_dac_create(dev, entry); + break; + default: + NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); + continue; + } + + connector[entry->connector] |= (1 << entry->type); + } + + /* It appears that DCB 3.0+ VBIOS has a connector table, however, + * I'm not 100% certain how to decode it correctly yet so just + * look at what encoders are present on each connector index and + * attempt to derive the connector type from that. + */ + for (i = 0 ; i < dcb->entries; i++) { + struct dcb_entry *entry = &dcb->entry[i]; + uint16_t encoders; + int type; + + encoders = connector[entry->connector]; + if (!(encoders & (1 << entry->type))) + continue; + connector[entry->connector] = 0; + + if (encoders & (1 << OUTPUT_DP)) { + type = DRM_MODE_CONNECTOR_DisplayPort; + } else if (encoders & (1 << OUTPUT_TMDS)) { + if (encoders & (1 << OUTPUT_ANALOG)) + type = DRM_MODE_CONNECTOR_DVII; + else + type = DRM_MODE_CONNECTOR_DVID; + } else if (encoders & (1 << OUTPUT_ANALOG)) { + type = DRM_MODE_CONNECTOR_VGA; + } else if (encoders & (1 << OUTPUT_LVDS)) { + type = DRM_MODE_CONNECTOR_LVDS; + } else { + type = DRM_MODE_CONNECTOR_Unknown; + } + + if (type == DRM_MODE_CONNECTOR_Unknown) + continue; + + nouveau_connector_create(dev, entry->connector, type); + } + + ret = nv50_display_init(dev); + if (ret) + return ret; + + return 0; +} + +int nv50_display_destroy(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + NV_DEBUG(dev, "\n"); + + drm_mode_config_cleanup(dev); + + nv50_display_disable(dev); + nv50_evo_channel_del(&dev_priv->evo); + + return 0; +} + +static inline uint32_t +nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t mc; + + if (sor) { + if (dev_priv->chipset < 0x90 || + dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) + mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or)); + else + mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or)); + } else { + mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or)); + } + + return mc; +} + +static int +nv50_display_irq_head(struct drm_device *dev, int *phead, + struct dcb_entry **pdcbent) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL); + uint32_t dac = 0, sor = 0; + int head, i, or = 0, type = OUTPUT_ANY; + + /* We're assuming that head 0 *or* head 1 will be active here, + * and not both. I'm not sure if the hw will even signal both + * ever, but it definitely shouldn't for us as we commit each + * CRTC separately, and submission will be blocked by the GPU + * until we handle each in turn. + */ + NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30); + head = ffs((unk30 >> 9) & 3) - 1; + if (head < 0) + return -EINVAL; + + /* This assumes CRTCs are never bound to multiple encoders, which + * should be the case. + */ + for (i = 0; i < 3 && type == OUTPUT_ANY; i++) { + uint32_t mc = nv50_display_mode_ctrl(dev, false, i); + if (!(mc & (1 << head))) + continue; + + switch ((mc >> 8) & 0xf) { + case 0: type = OUTPUT_ANALOG; break; + case 1: type = OUTPUT_TV; break; + default: + NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac); + return -1; + } + + or = i; + } + + for (i = 0; i < 4 && type == OUTPUT_ANY; i++) { + uint32_t mc = nv50_display_mode_ctrl(dev, true, i); + if (!(mc & (1 << head))) + continue; + + switch ((mc >> 8) & 0xf) { + case 0: type = OUTPUT_LVDS; break; + case 1: type = OUTPUT_TMDS; break; + case 2: type = OUTPUT_TMDS; break; + case 5: type = OUTPUT_TMDS; break; + case 8: type = OUTPUT_DP; break; + case 9: type = OUTPUT_DP; break; + default: + NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor); + return -1; + } + + or = i; + } + + NV_DEBUG(dev, "type %d, or %d\n", type, or); + if (type == OUTPUT_ANY) { + NV_ERROR(dev, "unknown encoder!!\n"); + return -1; + } + + for (i = 0; i < dev_priv->vbios->dcb->entries; i++) { + struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i]; + + if (dcbent->type != type) + continue; + + if (!(dcbent->or & (1 << or))) + continue; + + *phead = head; + *pdcbent = dcbent; + return 0; + } + + NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or); + return 0; +} + +static uint32_t +nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, + int pxclk) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->VBIOS; + uint32_t mc, script = 0, or; + + or = ffs(dcbent->or) - 1; + mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); + switch (dcbent->type) { + case OUTPUT_LVDS: + script = (mc >> 8) & 0xf; + if (bios->pub.fp_no_ddc) { + if (bios->fp.dual_link) + script |= 0x0100; + if (bios->fp.if_is_24bit) + script |= 0x0200; + } else { + if (pxclk >= bios->fp.duallink_transition_clk) { + script |= 0x0100; + if (bios->fp.strapless_is_24bit & 2) + script |= 0x0200; + } else + if (bios->fp.strapless_is_24bit & 1) + script |= 0x0200; + } + + if (nouveau_uscript_lvds >= 0) { + NV_INFO(dev, "override script 0x%04x with 0x%04x " + "for output LVDS-%d\n", script, + nouveau_uscript_lvds, or); + script = nouveau_uscript_lvds; + } + break; + case OUTPUT_TMDS: + script = (mc >> 8) & 0xf; + if (pxclk >= 165000) + script |= 0x0100; + + if (nouveau_uscript_tmds >= 0) { + NV_INFO(dev, "override script 0x%04x with 0x%04x " + "for output TMDS-%d\n", script, + nouveau_uscript_tmds, or); + script = nouveau_uscript_tmds; + } + break; + case OUTPUT_DP: + script = (mc >> 8) & 0xf; + break; + case OUTPUT_ANALOG: + script = 0xff; + break; + default: + NV_ERROR(dev, "modeset on unsupported output type!\n"); + break; + } + + return script; +} + +static void +nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + struct list_head *entry, *tmp; + + list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) { + chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait); + + nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, + chan->nvsw.vblsem_rval); + list_del(&chan->nvsw.vbl_wait); + } +} + +static void +nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) +{ + intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; + + if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) + nv50_display_vblank_crtc_handler(dev, 0); + + if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) + nv50_display_vblank_crtc_handler(dev, 1); + + nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, + NV50_PDISPLAY_INTR_EN) & ~intr); + nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr); +} + +static void +nv50_display_unk10_handler(struct drm_device *dev) +{ + struct dcb_entry *dcbent; + int head, ret; + + ret = nv50_display_irq_head(dev, &head, &dcbent); + if (ret) + goto ack; + + nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8); + + nouveau_bios_run_display_table(dev, dcbent, 0, -1); + +ack: + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); + nv_wr32(dev, 0x610030, 0x80000000); +} + +static void +nv50_display_unk20_handler(struct drm_device *dev) +{ + struct dcb_entry *dcbent; + uint32_t tmp, pclk, script; + int head, or, ret; + + ret = nv50_display_irq_head(dev, &head, &dcbent); + if (ret) + goto ack; + or = ffs(dcbent->or) - 1; + pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; + script = nv50_display_script_select(dev, dcbent, pclk); + + NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk); + + if (dcbent->type != OUTPUT_DP) + nouveau_bios_run_display_table(dev, dcbent, 0, -2); + + nv50_crtc_set_clock(dev, head, pclk); + + nouveau_bios_run_display_table(dev, dcbent, script, pclk); + + tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); + tmp &= ~0x000000f; + nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); + + if (dcbent->type != OUTPUT_ANALOG) { + tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); + tmp &= ~0x00000f0f; + if (script & 0x0100) + tmp |= 0x00000101; + nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp); + } else { + nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); + } + +ack: + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); + nv_wr32(dev, 0x610030, 0x80000000); +} + +static void +nv50_display_unk40_handler(struct drm_device *dev) +{ + struct dcb_entry *dcbent; + int head, pclk, script, ret; + + ret = nv50_display_irq_head(dev, &head, &dcbent); + if (ret) + goto ack; + pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; + script = nv50_display_script_select(dev, dcbent, pclk); + + nouveau_bios_run_display_table(dev, dcbent, script, -pclk); + +ack: + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); + nv_wr32(dev, 0x610030, 0x80000000); + nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8); +} + +void +nv50_display_irq_handler_bh(struct work_struct *work) +{ + struct drm_nouveau_private *dev_priv = + container_of(work, struct drm_nouveau_private, irq_work); + struct drm_device *dev = dev_priv->dev; + + for (;;) { + uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); + uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); + + NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); + + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) + nv50_display_unk10_handler(dev); + else + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20) + nv50_display_unk20_handler(dev); + else + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40) + nv50_display_unk40_handler(dev); + else + break; + } + + nv_wr32(dev, NV03_PMC_INTR_EN_0, 1); +} + +static void +nv50_display_error_handler(struct drm_device *dev) +{ + uint32_t addr, data; + + nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000); + addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR); + data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA); + + NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n", + 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); + + nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); +} + +static void +nv50_display_irq_hotplug(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_connector *connector; + const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + uint32_t unplug_mask, plug_mask, change_mask; + uint32_t hpd0, hpd1 = 0; + + hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); + if (dev_priv->chipset >= 0x90) + hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); + + plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); + unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); + change_mask = plug_mask | unplug_mask; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct drm_encoder_helper_funcs *helper; + struct nouveau_connector *nv_connector = + nouveau_connector(connector); + struct nouveau_encoder *nv_encoder; + struct dcb_gpio_entry *gpio; + uint32_t reg; + bool plugged; + + if (!nv_connector->dcb) + continue; + + gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag); + if (!gpio || !(change_mask & (1 << gpio->line))) + continue; + + reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]); + plugged = !!(reg & (4 << ((gpio->line & 7) << 2))); + NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", + drm_get_connector_name(connector)) ; + + if (!connector->encoder || !connector->encoder->crtc || + !connector->encoder->crtc->enabled) + continue; + nv_encoder = nouveau_encoder(connector->encoder); + helper = connector->encoder->helper_private; + + if (nv_encoder->dcb->type != OUTPUT_DP) + continue; + + if (plugged) + helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); + else + helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); + } + + nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); +} + +void +nv50_display_irq_handler(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t delayed = 0; + + while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) + nv50_display_irq_hotplug(dev); + + while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { + uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); + uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); + uint32_t clock; + + NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); + + if (!intr0 && !(intr1 & ~delayed)) + break; + + if (intr0 & 0x00010000) { + nv50_display_error_handler(dev); + intr0 &= ~0x00010000; + } + + if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { + nv50_display_vblank_handler(dev, intr1); + intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; + } + + clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | + NV50_PDISPLAY_INTR_1_CLK_UNK20 | + NV50_PDISPLAY_INTR_1_CLK_UNK40)); + if (clock) { + nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); + if (!work_pending(&dev_priv->irq_work)) + queue_work(dev_priv->wq, &dev_priv->irq_work); + delayed |= clock; + intr1 &= ~clock; + } + + if (intr0) { + NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0); + nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0); + } + + if (intr1) { + NV_ERROR(dev, + "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1); + nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1); + } + } +} + diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h new file mode 100644 index 000000000000..3ae8d0725f63 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NV50_DISPLAY_H__ +#define __NV50_DISPLAY_H__ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_reg.h" +#include "nouveau_crtc.h" +#include "nv50_evo.h" + +void nv50_display_irq_handler(struct drm_device *dev); +void nv50_display_irq_handler_bh(struct work_struct *work); +int nv50_display_init(struct drm_device *dev); +int nv50_display_create(struct drm_device *dev); +int nv50_display_destroy(struct drm_device *dev); +int nv50_crtc_blank(struct nouveau_crtc *, bool blank); +int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); + +#endif /* __NV50_DISPLAY_H__ */ diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h new file mode 100644 index 000000000000..aae13343bcec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_evo.h @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define NV50_EVO_UPDATE 0x00000080 +#define NV50_EVO_UNK84 0x00000084 +#define NV50_EVO_UNK84_NOTIFY 0x40000000 +#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000 +#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000 +#define NV50_EVO_DMA_NOTIFY 0x00000088 +#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff +#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000 +#define NV50_EVO_UNK8C 0x0000008C + +#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r) +#define NV50_EVO_DAC_MODE_CTRL 0x00000400 +#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001 +#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002 +#define NV50_EVO_DAC_MODE_CTRL2 0x00000404 +#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001 +#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002 + +#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r) +#define NV50_EVO_SOR_MODE_CTRL 0x00000600 +#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001 +#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002 +#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100 +#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400 +#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000 +#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000 + +#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r) +#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r) +#define NV50_EVO_CRTC_UNK0800 0x00000800 +#define NV50_EVO_CRTC_CLOCK 0x00000804 +#define NV50_EVO_CRTC_INTERLACE 0x00000808 +#define NV50_EVO_CRTC_DISPLAY_START 0x00000810 +#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814 +#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818 +#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c +#define NV50_EVO_CRTC_UNK0820 0x00000820 +#define NV50_EVO_CRTC_UNK0824 0x00000824 +#define NV50_EVO_CRTC_UNK082C 0x0000082c +#define NV50_EVO_CRTC_CLUT_MODE 0x00000840 +/* You can't have a palette in 8 bit mode (=OFF) */ +#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000 +#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000 +#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000 +#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844 +#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C +#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff +#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_FB_OFFSET 0x00000860 +#define NV50_EVO_CRTC_FB_SIZE 0x00000868 +#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c +#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000 +#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000 +#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000 +#define NV50_EVO_CRTC_FB_DEPTH 0x00000870 +#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00 +#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900 +#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800 +#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00 +#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100 +#define NV50_EVO_CRTC_FB_DMA 0x00000874 +#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff +#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880 +#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000 +#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000 +#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884 +#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c +#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff +#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0 +#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000 +#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011 +#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4 +#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000 +#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009 +#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8 +#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000 +#define NV50_EVO_CRTC_FB_POS 0x000008c0 +#define NV50_EVO_CRTC_REAL_RES 0x000008c8 +#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4 +#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \ + ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF)) +/* Both of these are needed, otherwise nothing happens. */ +#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 +#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc + diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c new file mode 100644 index 000000000000..6bcc6d39e9b0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -0,0 +1,273 @@ +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_fbcon.h" + +static void +nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + if (!(info->flags & FBINFO_HWACCEL_DISABLED) && + RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + + info->flags |= FBINFO_HWACCEL_DISABLED; + } + + if (info->flags & FBINFO_HWACCEL_DISABLED) { + cfb_fillrect(info, rect); + return; + } + + if (rect->rop != ROP_COPY) { + BEGIN_RING(chan, NvSub2D, 0x02ac, 1); + OUT_RING(chan, 1); + } + BEGIN_RING(chan, NvSub2D, 0x0588, 1); + OUT_RING(chan, rect->color); + BEGIN_RING(chan, NvSub2D, 0x0600, 4); + OUT_RING(chan, rect->dx); + OUT_RING(chan, rect->dy); + OUT_RING(chan, rect->dx + rect->width); + OUT_RING(chan, rect->dy + rect->height); + if (rect->rop != ROP_COPY) { + BEGIN_RING(chan, NvSub2D, 0x02ac, 1); + OUT_RING(chan, 3); + } + FIRE_RING(chan); +} + +static void +nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + + info->flags |= FBINFO_HWACCEL_DISABLED; + } + + if (info->flags & FBINFO_HWACCEL_DISABLED) { + cfb_copyarea(info, region); + return; + } + + BEGIN_RING(chan, NvSub2D, 0x0110, 1); + OUT_RING(chan, 0); + BEGIN_RING(chan, NvSub2D, 0x08b0, 4); + OUT_RING(chan, region->dx); + OUT_RING(chan, region->dy); + OUT_RING(chan, region->width); + OUT_RING(chan, region->height); + BEGIN_RING(chan, NvSub2D, 0x08d0, 4); + OUT_RING(chan, 0); + OUT_RING(chan, region->sx); + OUT_RING(chan, 0); + OUT_RING(chan, region->sy); + FIRE_RING(chan); +} + +static void +nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); + uint32_t *palette = info->pseudo_palette; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + if (image->depth != 1) { + cfb_imageblit(info, image); + return; + } + + if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + } + + if (info->flags & FBINFO_HWACCEL_DISABLED) { + cfb_imageblit(info, image); + return; + } + + width = (image->width + 31) & ~31; + dwords = (width * image->height) >> 5; + + BEGIN_RING(chan, NvSub2D, 0x0814, 2); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + OUT_RING(chan, palette[image->bg_color] | mask); + OUT_RING(chan, palette[image->fg_color] | mask); + } else { + OUT_RING(chan, image->bg_color); + OUT_RING(chan, image->fg_color); + } + BEGIN_RING(chan, NvSub2D, 0x0838, 2); + OUT_RING(chan, image->width); + OUT_RING(chan, image->height); + BEGIN_RING(chan, NvSub2D, 0x0850, 4); + OUT_RING(chan, 0); + OUT_RING(chan, image->dx); + OUT_RING(chan, 0); + OUT_RING(chan, image->dy); + + while (dwords) { + int push = dwords > 2047 ? 2047 : dwords; + + if (RING_SPACE(chan, push + 1)) { + NV_ERROR(dev, + "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; + cfb_imageblit(info, image); + return; + } + + dwords -= push; + + BEGIN_RING(chan, NvSub2D, 0x40000860, push); + OUT_RINGp(chan, data, push); + data += push; + } + + FIRE_RING(chan); +} + +int +nv50_fbcon_accel_init(struct fb_info *info) +{ + struct nouveau_fbcon_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + struct nouveau_gpuobj *eng2d = NULL; + int ret, format; + + switch (info->var.bits_per_pixel) { + case 8: + format = 0xf3; + break; + case 15: + format = 0xf8; + break; + case 16: + format = 0xe8; + break; + case 32: + switch (info->var.transp.length) { + case 0: /* depth 24 */ + case 8: /* depth 32, just use 24.. */ + format = 0xe6; + break; + case 2: /* depth 30 */ + format = 0xd1; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d); + if (ret) + return ret; + + ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL); + if (ret) + return ret; + + ret = RING_SPACE(chan, 59); + if (ret) { + NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); + return ret; + } + + BEGIN_RING(chan, NvSub2D, 0x0000, 1); + OUT_RING(chan, Nv2D); + BEGIN_RING(chan, NvSub2D, 0x0180, 4); + OUT_RING(chan, NvNotify0); + OUT_RING(chan, chan->vram_handle); + OUT_RING(chan, chan->vram_handle); + OUT_RING(chan, chan->vram_handle); + BEGIN_RING(chan, NvSub2D, 0x0290, 1); + OUT_RING(chan, 0); + BEGIN_RING(chan, NvSub2D, 0x0888, 1); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x02ac, 1); + OUT_RING(chan, 3); + BEGIN_RING(chan, NvSub2D, 0x02a0, 1); + OUT_RING(chan, 0x55); + BEGIN_RING(chan, NvSub2D, 0x08c0, 4); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0580, 2); + OUT_RING(chan, 4); + OUT_RING(chan, format); + BEGIN_RING(chan, NvSub2D, 0x02e8, 2); + OUT_RING(chan, 2); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0804, 1); + OUT_RING(chan, format); + BEGIN_RING(chan, NvSub2D, 0x0800, 1); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0808, 3); + OUT_RING(chan, 0); + OUT_RING(chan, 0); + OUT_RING(chan, 0); + BEGIN_RING(chan, NvSub2D, 0x081c, 1); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0840, 4); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0200, 2); + OUT_RING(chan, format); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0214, 5); + OUT_RING(chan, info->fix.line_length); + OUT_RING(chan, info->var.xres_virtual); + OUT_RING(chan, info->var.yres_virtual); + OUT_RING(chan, 0); + OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + + dev_priv->vm_vram_base); + BEGIN_RING(chan, NvSub2D, 0x0230, 2); + OUT_RING(chan, format); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0244, 5); + OUT_RING(chan, info->fix.line_length); + OUT_RING(chan, info->var.xres_virtual); + OUT_RING(chan, info->var.yres_virtual); + OUT_RING(chan, 0); + OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + + dev_priv->vm_vram_base); + + info->fbops->fb_fillrect = nv50_fbcon_fillrect; + info->fbops->fb_copyarea = nv50_fbcon_copyarea; + info->fbops->fb_imageblit = nv50_fbcon_imageblit; + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c new file mode 100644 index 000000000000..77ae1aaa0bce --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -0,0 +1,494 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +struct nv50_fifo_priv { + struct nouveau_gpuobj_ref *thingo[2]; + int cur_thingo; +}; + +#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) + +static void +nv50_fifo_init_thingo(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; + struct nouveau_gpuobj_ref *cur; + int i, nr; + + NV_DEBUG(dev, "\n"); + + cur = priv->thingo[priv->cur_thingo]; + priv->cur_thingo = !priv->cur_thingo; + + /* We never schedule channel 0 or 127 */ + dev_priv->engine.instmem.prepare_access(dev, true); + for (i = 1, nr = 0; i < 127; i++) { + if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) + nv_wo32(dev, cur->gpuobj, nr++, i); + } + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, 0x32f4, cur->instance >> 12); + nv_wr32(dev, 0x32ec, nr); + nv_wr32(dev, 0x2500, 0x101); +} + +static int +nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->fifos[channel]; + uint32_t inst; + + NV_DEBUG(dev, "ch%d\n", channel); + + if (!chan->ramfc) + return -EINVAL; + + if (IS_G80) + inst = chan->ramfc->instance >> 12; + else + inst = chan->ramfc->instance >> 8; + nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), + inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); + + if (!nt) + nv50_fifo_init_thingo(dev); + return 0; +} + +static void +nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t inst; + + NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt); + + if (IS_G80) + inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; + else + inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; + nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst); + + if (!nt) + nv50_fifo_init_thingo(dev); +} + +static void +nv50_fifo_init_reset(struct drm_device *dev) +{ + uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; + + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); +} + +static void +nv50_fifo_init_intr(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); + nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); +} + +static void +nv50_fifo_init_context_table(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; + + NV_DEBUG(dev, "\n"); + + for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { + if (dev_priv->fifos[i]) + nv50_fifo_channel_enable(dev, i, true); + else + nv50_fifo_channel_disable(dev, i, true); + } + + nv50_fifo_init_thingo(dev); +} + +static void +nv50_fifo_init_regs__nv(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, 0x250c, 0x6f3cfc34); +} + +static void +nv50_fifo_init_regs(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, 0x2500, 0); + nv_wr32(dev, 0x3250, 0); + nv_wr32(dev, 0x3220, 0); + nv_wr32(dev, 0x3204, 0); + nv_wr32(dev, 0x3210, 0); + nv_wr32(dev, 0x3270, 0); + + /* Enable dummy channels setup by nv50_instmem.c */ + nv50_fifo_channel_enable(dev, 0, true); + nv50_fifo_channel_enable(dev, 127, true); +} + +int +nv50_fifo_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fifo_priv *priv; + int ret; + + NV_DEBUG(dev, "\n"); + + priv = dev_priv->engine.fifo.priv; + if (priv) { + priv->cur_thingo = !priv->cur_thingo; + goto just_reset; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + dev_priv->engine.fifo.priv = priv; + + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); + if (ret) { + NV_ERROR(dev, "error creating thingo0: %d\n", ret); + return ret; + } + + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); + if (ret) { + NV_ERROR(dev, "error creating thingo1: %d\n", ret); + return ret; + } + +just_reset: + nv50_fifo_init_reset(dev); + nv50_fifo_init_intr(dev); + nv50_fifo_init_context_table(dev); + nv50_fifo_init_regs__nv(dev); + nv50_fifo_init_regs(dev); + dev_priv->engine.fifo.enable(dev); + dev_priv->engine.fifo.reassign(dev, true); + + return 0; +} + +void +nv50_fifo_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; + + NV_DEBUG(dev, "\n"); + + if (!priv) + return; + + nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); + nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); + + dev_priv->engine.fifo.priv = NULL; + kfree(priv); +} + +int +nv50_fifo_channel_id(struct drm_device *dev) +{ + return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & + NV50_PFIFO_CACHE1_PUSH1_CHID_MASK; +} + +int +nv50_fifo_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramfc = NULL; + int ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + if (IS_G80) { + uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; + uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; + + ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset, + 0x100, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &ramfc, + &chan->ramfc); + if (ret) + return ret; + + ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400, + ramin_voffset + 0x0400, 4096, + 0, NULL, &chan->cache); + if (ret) + return ret; + } else { + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, + &chan->ramfc); + if (ret) + return ret; + ramfc = chan->ramfc->gpuobj; + + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, + 0, &chan->cache); + if (ret) + return ret; + } + + dev_priv->engine.instmem.prepare_access(dev, true); + + nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base); + nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base); + nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); + nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); + nv_wo32(dev, ramfc, 0x3c/4, 0x00086078); + nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); + nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); + nv_wo32(dev, ramfc, 0x40/4, 0x00000000); + nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); + nv_wo32(dev, ramfc, 0x78/4, 0x00000000); + nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff); + + if (!IS_G80) { + nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); + nv_wo32(dev, chan->ramin->gpuobj, 1, + chan->ramfc->instance >> 8); + + nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10); + nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); + } + + dev_priv->engine.instmem.finish_access(dev); + + ret = nv50_fifo_channel_enable(dev, chan->id, false); + if (ret) { + NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + return ret; + } + + return 0; +} + +void +nv50_fifo_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref_del(dev, &chan->cache); + + nv50_fifo_channel_disable(dev, chan->id, false); + + /* Dummy channel, also used on ch 127 */ + if (chan->id == 0) + nv50_fifo_channel_disable(dev, 127, false); +} + +int +nv50_fifo_load_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; + struct nouveau_gpuobj *cache = chan->cache->gpuobj; + int ptr, cnt; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + dev_priv->engine.instmem.prepare_access(dev, false); + + nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); + nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); + nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); + nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4)); + nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4)); + nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4)); + nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4)); + nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4)); + nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4)); + nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4)); + nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4)); + nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4)); + nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4)); + nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4)); + nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4)); + nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4)); + nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4)); + nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4)); + nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4)); + nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4)); + nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4)); + nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4)); + nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4)); + nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4)); + nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4)); + nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4)); + nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4)); + nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4)); + nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4)); + nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4)); + nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4)); + nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4)); + nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4)); + + cnt = nv_ro32(dev, ramfc, 0x84/4); + for (ptr = 0; ptr < cnt; ptr++) { + nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr), + nv_ro32(dev, cache, (ptr * 2) + 0)); + nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), + nv_ro32(dev, cache, (ptr * 2) + 1)); + } + nv_wr32(dev, 0x3210, cnt << 2); + nv_wr32(dev, 0x3270, 0); + + /* guessing that all the 0x34xx regs aren't on NV50 */ + if (!IS_G80) { + nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); + nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); + nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); + nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4)); + nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); + } + + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); + return 0; +} + +int +nv50_fifo_unload_context(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_gpuobj *ramfc, *cache; + struct nouveau_channel *chan = NULL; + int chid, get, put, ptr; + + NV_DEBUG(dev, "\n"); + + chid = pfifo->channel_id(dev); + if (chid < 0 || chid >= dev_priv->engine.fifo.channels) + return 0; + + chan = dev_priv->fifos[chid]; + if (!chan) { + NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); + return -EINVAL; + } + NV_DEBUG(dev, "ch%d\n", chan->id); + ramfc = chan->ramfc->gpuobj; + cache = chan->cache->gpuobj; + + dev_priv->engine.instmem.prepare_access(dev, true); + + nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); + nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); + nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); + nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320)); + nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244)); + nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328)); + nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368)); + nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c)); + nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370)); + nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374)); + nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378)); + nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c)); + nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228)); + nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364)); + nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0)); + nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224)); + nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c)); + nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044)); + nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c)); + nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234)); + nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340)); + nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344)); + nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280)); + nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254)); + nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260)); + nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264)); + nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268)); + nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c)); + nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4)); + nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248)); + nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088)); + nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058)); + nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210)); + + put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2; + get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2; + ptr = 0; + while (put != get) { + nv_wo32(dev, cache, ptr++, + nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); + nv_wo32(dev, cache, ptr++, + nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); + get = (get + 1) & 0x1ff; + } + + /* guessing that all the 0x34xx regs aren't on NV50 */ + if (!IS_G80) { + nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); + nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); + nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); + nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404)); + nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408)); + nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); + } + + dev_priv->engine.instmem.finish_access(dev); + + /*XXX: probably reload ch127 (NULL) state back too */ + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c new file mode 100644 index 000000000000..177d8229336f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -0,0 +1,385 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +MODULE_FIRMWARE("nouveau/nv50.ctxprog"); +MODULE_FIRMWARE("nouveau/nv50.ctxvals"); +MODULE_FIRMWARE("nouveau/nv84.ctxprog"); +MODULE_FIRMWARE("nouveau/nv84.ctxvals"); +MODULE_FIRMWARE("nouveau/nv86.ctxprog"); +MODULE_FIRMWARE("nouveau/nv86.ctxvals"); +MODULE_FIRMWARE("nouveau/nv92.ctxprog"); +MODULE_FIRMWARE("nouveau/nv92.ctxvals"); +MODULE_FIRMWARE("nouveau/nv94.ctxprog"); +MODULE_FIRMWARE("nouveau/nv94.ctxvals"); +MODULE_FIRMWARE("nouveau/nv96.ctxprog"); +MODULE_FIRMWARE("nouveau/nv96.ctxvals"); +MODULE_FIRMWARE("nouveau/nv98.ctxprog"); +MODULE_FIRMWARE("nouveau/nv98.ctxvals"); +MODULE_FIRMWARE("nouveau/nva0.ctxprog"); +MODULE_FIRMWARE("nouveau/nva0.ctxvals"); +MODULE_FIRMWARE("nouveau/nva5.ctxprog"); +MODULE_FIRMWARE("nouveau/nva5.ctxvals"); +MODULE_FIRMWARE("nouveau/nva8.ctxprog"); +MODULE_FIRMWARE("nouveau/nva8.ctxvals"); +MODULE_FIRMWARE("nouveau/nvaa.ctxprog"); +MODULE_FIRMWARE("nouveau/nvaa.ctxvals"); +MODULE_FIRMWARE("nouveau/nvac.ctxprog"); +MODULE_FIRMWARE("nouveau/nvac.ctxvals"); + +#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) + +static void +nv50_graph_init_reset(struct drm_device *dev) +{ + uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); + + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); +} + +static void +nv50_graph_init_intr(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); + nv_wr32(dev, 0x400138, 0xffffffff); + nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); +} + +static void +nv50_graph_init_regs__nv(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, 0x400804, 0xc0000000); + nv_wr32(dev, 0x406800, 0xc0000000); + nv_wr32(dev, 0x400c04, 0xc0000000); + nv_wr32(dev, 0x401804, 0xc0000000); + nv_wr32(dev, 0x405018, 0xc0000000); + nv_wr32(dev, 0x402000, 0xc0000000); + + nv_wr32(dev, 0x400108, 0xffffffff); + + nv_wr32(dev, 0x400824, 0x00004000); + nv_wr32(dev, 0x400500, 0x00010001); +} + +static void +nv50_graph_init_regs(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, NV04_PGRAPH_DEBUG_3, + (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */); + nv_wr32(dev, 0x402ca8, 0x800); +} + +static int +nv50_graph_init_ctxctl(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + + nv40_grctx_init(dev); + + nv_wr32(dev, 0x400320, 4); + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); + return 0; +} + +int +nv50_graph_init(struct drm_device *dev) +{ + int ret; + + NV_DEBUG(dev, "\n"); + + nv50_graph_init_reset(dev); + nv50_graph_init_regs__nv(dev); + nv50_graph_init_regs(dev); + nv50_graph_init_intr(dev); + + ret = nv50_graph_init_ctxctl(dev); + if (ret) + return ret; + + return 0; +} + +void +nv50_graph_takedown(struct drm_device *dev) +{ + NV_DEBUG(dev, "\n"); + nv40_grctx_fini(dev); +} + +void +nv50_graph_fifo_access(struct drm_device *dev, bool enabled) +{ + const uint32_t mask = 0x00010001; + + if (enabled) + nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask); + else + nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask); +} + +struct nouveau_channel * +nv50_graph_channel(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t inst; + int i; + + inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); + if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) + return NULL; + inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + struct nouveau_channel *chan = dev_priv->fifos[i]; + + if (chan && chan->ramin && chan->ramin->instance == inst) + return chan; + } + + return NULL; +} + +int +nv50_graph_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + struct nouveau_gpuobj *ctx; + uint32_t grctx_size = 0x70000; + int hdr, ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); + if (ret) + return ret; + ctx = chan->ramin_grctx->gpuobj; + + hdr = IS_G80 ? 0x200 : 0x20; + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); + nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + + grctx_size - 1); + nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); + nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); + nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); + nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); + dev_priv->engine.instmem.finish_access(dev); + + dev_priv->engine.instmem.prepare_access(dev, true); + nv40_grctx_vals_load(dev, ctx); + nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); + if ((dev_priv->chipset & 0xf0) == 0xa0) + nv_wo32(dev, ctx, 0x00004/4, 0x00000000); + else + nv_wo32(dev, ctx, 0x0011c/4, 0x00000000); + dev_priv->engine.instmem.finish_access(dev); + + return 0; +} + +void +nv50_graph_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i, hdr = IS_G80 ? 0x200 : 0x20; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + if (!chan->ramin || !chan->ramin->gpuobj) + return; + + dev_priv->engine.instmem.prepare_access(dev, true); + for (i = hdr; i < hdr + 24; i += 4) + nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); + dev_priv->engine.instmem.finish_access(dev); + + nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); +} + +static int +nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) +{ + uint32_t fifo = nv_rd32(dev, 0x400500); + + nv_wr32(dev, 0x400500, fifo & ~1); + nv_wr32(dev, 0x400784, inst); + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40); + nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11); + nv_wr32(dev, 0x400040, 0xffffffff); + (void)nv_rd32(dev, 0x400040); + nv_wr32(dev, 0x400040, 0x00000000); + nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1); + + if (nouveau_wait_for_idle(dev)) + nv_wr32(dev, 0x40032c, inst | (1<<31)); + nv_wr32(dev, 0x400500, fifo); + + return 0; +} + +int +nv50_graph_load_context(struct nouveau_channel *chan) +{ + uint32_t inst = chan->ramin->instance >> 12; + + NV_DEBUG(chan->dev, "ch%d\n", chan->id); + return nv50_graph_do_load_context(chan->dev, inst); +} + +int +nv50_graph_unload_context(struct drm_device *dev) +{ + uint32_t inst, fifo = nv_rd32(dev, 0x400500); + + inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); + if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) + return 0; + inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; + + nv_wr32(dev, 0x400500, fifo & ~1); + nv_wr32(dev, 0x400784, inst); + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); + nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); + nouveau_wait_for_idle(dev); + nv_wr32(dev, 0x400500, fifo); + + nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); + return 0; +} + +void +nv50_graph_context_switch(struct drm_device *dev) +{ + uint32_t inst; + + nv50_graph_unload_context(dev); + + inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT); + inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE; + nv50_graph_do_load_context(dev, inst); + + nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, + NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH); +} + +static int +nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + struct nouveau_gpuobj_ref *ref = NULL; + + if (nouveau_gpuobj_ref_find(chan, data, &ref)) + return -ENOENT; + + if (nouveau_notifier_offset(ref->gpuobj, NULL)) + return -EINVAL; + + chan->nvsw.vblsem = ref->gpuobj; + chan->nvsw.vblsem_offset = ~0; + return 0; +} + +static int +nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) + return -ERANGE; + + chan->nvsw.vblsem_offset = data >> 2; + return 0; +} + +static int +nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + chan->nvsw.vblsem_rval = data; + return 0; +} + +static int +nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) + return -EINVAL; + + if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & + NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) { + nv_wr32(dev, NV50_PDISPLAY_INTR_1, + NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, + NV50_PDISPLAY_INTR_EN) | + NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data)); + } + + list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); + return 0; +} + +static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { + { 0x018c, nv50_graph_nvsw_dma_vblsem }, + { 0x0400, nv50_graph_nvsw_vblsem_offset }, + { 0x0404, nv50_graph_nvsw_vblsem_release_val }, + { 0x0408, nv50_graph_nvsw_vblsem_release }, + {} +}; + +struct nouveau_pgraph_object_class nv50_graph_grclass[] = { + { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ + { 0x0030, false, NULL }, /* null */ + { 0x5039, false, NULL }, /* m2mf */ + { 0x502d, false, NULL }, /* 2d */ + { 0x50c0, false, NULL }, /* compute */ + { 0x5097, false, NULL }, /* tesla (nv50) */ + { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ + { 0x8397, false, NULL }, /* tesla (nva0) */ + { 0x8597, false, NULL }, /* tesla (nva8) */ + {} +}; diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c new file mode 100644 index 000000000000..94400f777e7f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -0,0 +1,509 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +struct nv50_instmem_priv { + uint32_t save1700[5]; /* 0x1700->0x1710 */ + + struct nouveau_gpuobj_ref *pramin_pt; + struct nouveau_gpuobj_ref *pramin_bar; + struct nouveau_gpuobj_ref *fb_bar; + + bool last_access_wr; +}; + +#define NV50_INSTMEM_PAGE_SHIFT 12 +#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) +#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) + +/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN + */ +#define BAR0_WI32(g, o, v) do { \ + uint32_t offset; \ + if ((g)->im_backing) { \ + offset = (g)->im_backing_start; \ + } else { \ + offset = chan->ramin->gpuobj->im_backing_start; \ + offset += (g)->im_pramin->start; \ + } \ + offset += (o); \ + nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ +} while (0) + +int +nv50_instmem_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; + struct nv50_instmem_priv *priv; + int ret, i; + uint32_t v, save_nv001700; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + dev_priv->engine.instmem.priv = priv; + + /* Save state, will restore at takedown. */ + for (i = 0x1700; i <= 0x1710; i += 4) + priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); + + /* Reserve the last MiB of VRAM, we should probably try to avoid + * setting up the below tables over the top of the VBIOS image at + * some point. + */ + dev_priv->ramin_rsvd_vram = 1 << 20; + c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; + c_size = 128 << 10; + c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; + c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; + c_base = c_vmpd + 0x4000; + pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size); + + NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset); + NV_DEBUG(dev, " VBIOS image: 0x%08x\n", + (nv_rd32(dev, 0x619f04) & ~0xff) << 8); + NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20); + NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10); + + /* Determine VM layout, we need to do this first to make sure + * we allocate enough memory for all the page tables. + */ + dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); + dev_priv->vm_gart_size = NV50_VM_BLOCK; + + dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; + dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); + if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) + dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; + dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); + dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK; + + dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size; + + NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n", + dev_priv->vm_gart_base, + dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1); + NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n", + dev_priv->vm_vram_base, + dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1); + + c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8); + + /* Map BAR0 PRAMIN aperture over the memory we want to use */ + save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN); + nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); + + /* Create a fake channel, and use it as our "dummy" channels 0/127. + * The main reason for creating a channel is so we can use the gpuobj + * code. However, it's probably worth noting that NVIDIA also setup + * their channels 0/127 with the same values they configure here. + * So, there may be some other reason for doing this. + * + * Have to create the entire channel manually, as the real channel + * creation code assumes we have PRAMIN access, and we don't until + * we're done here. + */ + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->id = 0; + chan->dev = dev; + chan->file_priv = (struct drm_file *)-2; + dev_priv->fifos[0] = dev_priv->fifos[127] = chan; + + /* Channel's PRAMIN object + heap */ + ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, + NULL, &chan->ramin); + if (ret) + return ret; + + if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) + return -ENOMEM; + + /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ + ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, + 0x4000, 0, NULL, &chan->ramfc); + if (ret) + return ret; + + for (i = 0; i < c_vmpd; i += 4) + BAR0_WI32(chan->ramin->gpuobj, i, 0); + + /* VM page directory */ + ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, + 0x4000, 0, &chan->vm_pd, NULL); + if (ret) + return ret; + for (i = 0; i < 0x4000; i += 8) { + BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); + BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); + } + + /* PRAMIN page table, cheat and map into VM at 0x0000000000. + * We map the entire fake channel into the start of the PRAMIN BAR + */ + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, + 0, &priv->pramin_pt); + if (ret) + return ret; + + for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { + if (v < (c_offset + c_size)) + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); + else + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); + } + + BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); + BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); + + /* VRAM page table(s), mapped into VM at +1GiB */ + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, + NV50_VM_BLOCK/65536*8, 0, 0, + &chan->vm_vram_pt[i]); + if (ret) { + NV_ERROR(dev, "Error creating VRAM page tables: %d\n", + ret); + dev_priv->vm_vram_pt_nr = i; + return ret; + } + dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj; + + for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size; + v += 4) + BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); + + BAR0_WI32(chan->vm_pd, 0x10 + (i*8), + chan->vm_vram_pt[i]->instance | 0x61); + BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0); + } + + /* DMA object for PRAMIN BAR */ + ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, + &priv->pramin_bar); + if (ret) + return ret; + BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); + + /* DMA object for FB BAR */ + ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, + &priv->fb_bar); + if (ret) + return ret; + BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000); + BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 + + drm_get_resource_len(dev, 1) - 1); + BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000); + BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000); + BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000); + BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000); + + /* Poke the relevant regs, and pray it works :) */ + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); + nv_wr32(dev, NV50_PUNK_UNK1710, 0); + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | + NV50_PUNK_BAR_CFG_BASE_VALID); + nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | + NV50_PUNK_BAR1_CTXDMA_VALID); + nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | + NV50_PUNK_BAR3_CTXDMA_VALID); + + for (i = 0; i < 8; i++) + nv_wr32(dev, 0x1900 + (i*4), 0); + + /* Assume that praying isn't enough, check that we can re-read the + * entire fake channel back from the PRAMIN BAR */ + dev_priv->engine.instmem.prepare_access(dev, false); + for (i = 0; i < c_size; i += 4) { + if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) { + NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n", + i); + dev_priv->engine.instmem.finish_access(dev); + return -EINVAL; + } + } + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); + + /* Global PRAMIN heap */ + if (nouveau_mem_init_heap(&dev_priv->ramin_heap, + c_size, dev_priv->ramin_size - c_size)) { + dev_priv->ramin_heap = NULL; + NV_ERROR(dev, "Failed to init RAMIN heap\n"); + } + + /*XXX: incorrect, but needed to make hash func "work" */ + dev_priv->ramht_offset = 0x10000; + dev_priv->ramht_bits = 9; + dev_priv->ramht_size = (1 << dev_priv->ramht_bits); + return 0; +} + +void +nv50_instmem_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; + struct nouveau_channel *chan = dev_priv->fifos[0]; + int i; + + NV_DEBUG(dev, "\n"); + + if (!priv) + return; + + /* Restore state from before init */ + for (i = 0x1700; i <= 0x1710; i += 4) + nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); + + nouveau_gpuobj_ref_del(dev, &priv->fb_bar); + nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); + nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); + + /* Destroy dummy channel */ + if (chan) { + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { + nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); + dev_priv->vm_vram_pt[i] = NULL; + } + dev_priv->vm_vram_pt_nr = 0; + + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref_del(dev, &chan->ramin); + nouveau_mem_takedown(&chan->ramin_heap); + + dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; + kfree(chan); + } + + dev_priv->engine.instmem.priv = NULL; + kfree(priv); +} + +int +nv50_instmem_suspend(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->fifos[0]; + struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + int i; + + ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size); + if (!ramin->im_backing_suspend) + return -ENOMEM; + + for (i = 0; i < ramin->im_pramin->size; i += 4) + ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); + return 0; +} + +void +nv50_instmem_resume(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; + struct nouveau_channel *chan = dev_priv->fifos[0]; + struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + int i; + + nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16)); + for (i = 0; i < ramin->im_pramin->size; i += 4) + BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]); + vfree(ramin->im_backing_suspend); + ramin->im_backing_suspend = NULL; + + /* Poke the relevant regs, and pray it works :) */ + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); + nv_wr32(dev, NV50_PUNK_UNK1710, 0); + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | + NV50_PUNK_BAR_CFG_BASE_VALID); + nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | + NV50_PUNK_BAR1_CTXDMA_VALID); + nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | + NV50_PUNK_BAR3_CTXDMA_VALID); + + for (i = 0; i < 8; i++) + nv_wr32(dev, 0x1900 + (i*4), 0); +} + +int +nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, + uint32_t *sz) +{ + int ret; + + if (gpuobj->im_backing) + return -EINVAL; + + *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); + if (*sz == 0) + return -EINVAL; + + ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, + true, false, &gpuobj->im_backing); + if (ret) { + NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); + return ret; + } + + ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); + nouveau_bo_ref(NULL, &gpuobj->im_backing); + return ret; + } + + gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start; + gpuobj->im_backing_start <<= PAGE_SHIFT; + + return 0; +} + +void +nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (gpuobj && gpuobj->im_backing) { + if (gpuobj->im_bound) + dev_priv->engine.instmem.unbind(dev, gpuobj); + nouveau_bo_unpin(gpuobj->im_backing); + nouveau_bo_ref(NULL, &gpuobj->im_backing); + gpuobj->im_backing = NULL; + } +} + +int +nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; + uint32_t pte, pte_end, vram; + + if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) + return -EINVAL; + + NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", + gpuobj->im_pramin->start, gpuobj->im_pramin->size); + + pte = (gpuobj->im_pramin->start >> 12) << 3; + pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; + vram = gpuobj->im_backing_start; + + NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", + gpuobj->im_pramin->start, pte, pte_end); + NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); + + dev_priv->engine.instmem.prepare_access(dev, true); + while (pte < pte_end) { + nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); + nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); + + pte += 8; + vram += NV50_INSTMEM_PAGE_SIZE; + } + dev_priv->engine.instmem.finish_access(dev); + + nv_wr32(dev, 0x100c80, 0x00040001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + nv_wr32(dev, 0x100c80, 0x00060001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + gpuobj->im_bound = 1; + return 0; +} + +int +nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; + uint32_t pte, pte_end; + + if (gpuobj->im_bound == 0) + return -EINVAL; + + pte = (gpuobj->im_pramin->start >> 12) << 3; + pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; + + dev_priv->engine.instmem.prepare_access(dev, true); + while (pte < pte_end) { + nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); + nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); + pte += 8; + } + dev_priv->engine.instmem.finish_access(dev); + + gpuobj->im_bound = 0; + return 0; +} + +void +nv50_instmem_prepare_access(struct drm_device *dev, bool write) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; + + priv->last_access_wr = write; +} + +void +nv50_instmem_finish_access(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; + + if (priv->last_access_wr) { + nv_wr32(dev, 0x070000, 0x00000001); + if (!nv_wait(0x070000, 0x00000001, 0x00000000)) + NV_ERROR(dev, "PRAMIN flush timeout\n"); + } +} + diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c new file mode 100644 index 000000000000..e0a9c3faa202 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_mc.c @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +int +nv50_mc_init(struct drm_device *dev) +{ + nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); + return 0; +} + +void nv50_mc_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c new file mode 100644 index 000000000000..8c280463a664 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -0,0 +1,309 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +static void +nv50_sor_disconnect(struct nouveau_encoder *nv_encoder) +{ + struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + int ret; + + NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or); + + ret = RING_SPACE(evo, 2); + if (ret) { + NV_ERROR(dev, "no space while disconnecting SOR\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); + OUT_RING(evo, 0); +} + +static void +nv50_sor_dp_link_train(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct bit_displayport_encoder_table *dpe; + int dpe_headerlen; + + dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); + if (!dpe) { + NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); + return; + } + + if (dpe->script0) { + NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); + nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), + nv_encoder->dcb); + } + + if (!nouveau_dp_link_train(encoder)) + NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); + + if (dpe->script1) { + NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); + nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), + nv_encoder->dcb); + } +} + +static void +nv50_sor_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint32_t val; + int or = nv_encoder->or; + + NV_DEBUG(dev, "or %d mode %d\n", or, mode); + + /* wait for it to be done */ + if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), + NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or))); + } + + val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)); + + if (mode == DRM_MODE_DPMS_ON) + val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON; + else + val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON; + + nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | + NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); + if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or), + NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { + NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); + NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, + nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or))); + } + + if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON) + nv50_sor_dp_link_train(encoder); +} + +static void +nv50_sor_save(struct drm_encoder *encoder) +{ + NV_ERROR(encoder->dev, "!!\n"); +} + +static void +nv50_sor_restore(struct drm_encoder *encoder) +{ + NV_ERROR(encoder->dev, "!!\n"); +} + +static bool +nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *connector; + + NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or); + + connector = nouveau_encoder_connector_get(nv_encoder); + if (!connector) { + NV_ERROR(encoder->dev, "Encoder has no connector\n"); + return false; + } + + if (connector->scaling_mode != DRM_MODE_SCALE_NONE && + connector->native_mode) { + int id = adjusted_mode->base.id; + *adjusted_mode = *connector->native_mode; + adjusted_mode->base.id = id; + } + + return true; +} + +static void +nv50_sor_prepare(struct drm_encoder *encoder) +{ +} + +static void +nv50_sor_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; + struct nouveau_channel *evo = dev_priv->evo; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); + uint32_t mode_ctl = 0; + int ret; + + NV_DEBUG(dev, "or %d\n", nv_encoder->or); + + nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); + + switch (nv_encoder->dcb->type) { + case OUTPUT_TMDS: + if (nv_encoder->dcb->sorconf.link & 1) { + if (adjusted_mode->clock < 165000) + mode_ctl = 0x0100; + else + mode_ctl = 0x0500; + } else + mode_ctl = 0x0200; + break; + case OUTPUT_DP: + mode_ctl |= 0x00050000; + if (nv_encoder->dcb->sorconf.link & 1) + mode_ctl |= 0x00000800; + else + mode_ctl |= 0x00000900; + break; + default: + break; + } + + if (crtc->index == 1) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1; + else + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; + + ret = RING_SPACE(evo, 2); + if (ret) { + NV_ERROR(dev, "no space while connecting SOR\n"); + return; + } + BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); + OUT_RING(evo, mode_ctl); +} + +static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { + .dpms = nv50_sor_dpms, + .save = nv50_sor_save, + .restore = nv50_sor_restore, + .mode_fixup = nv50_sor_mode_fixup, + .prepare = nv50_sor_prepare, + .commit = nv50_sor_commit, + .mode_set = nv50_sor_mode_set, + .detect = NULL +}; + +static void +nv50_sor_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (!encoder) + return; + + NV_DEBUG(encoder->dev, "\n"); + + drm_encoder_cleanup(encoder); + + kfree(nv_encoder); +} + +static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { + .destroy = nv50_sor_destroy, +}; + +int +nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) +{ + struct nouveau_encoder *nv_encoder = NULL; + struct drm_encoder *encoder; + bool dum; + int type; + + NV_DEBUG(dev, "\n"); + + switch (entry->type) { + case OUTPUT_TMDS: + NV_INFO(dev, "Detected a TMDS output\n"); + type = DRM_MODE_ENCODER_TMDS; + break; + case OUTPUT_LVDS: + NV_INFO(dev, "Detected a LVDS output\n"); + type = DRM_MODE_ENCODER_LVDS; + + if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) { + NV_ERROR(dev, "Failed parsing LVDS table\n"); + return -EINVAL; + } + break; + case OUTPUT_DP: + NV_INFO(dev, "Detected a DP output\n"); + type = DRM_MODE_ENCODER_TMDS; + break; + default: + return -EINVAL; + } + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + nv_encoder->disconnect = nv50_sor_disconnect; + + drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type); + drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h new file mode 100644 index 000000000000..5998c35237b0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvreg.h @@ -0,0 +1,535 @@ +/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */ +/* + * Copyright 1996-1997 David J. McKay + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */ + +#ifndef __NVREG_H_ +#define __NVREG_H_ + +#define NV_PMC_OFFSET 0x00000000 +#define NV_PMC_SIZE 0x00001000 + +#define NV_PBUS_OFFSET 0x00001000 +#define NV_PBUS_SIZE 0x00001000 + +#define NV_PFIFO_OFFSET 0x00002000 +#define NV_PFIFO_SIZE 0x00002000 + +#define NV_HDIAG_OFFSET 0x00005000 +#define NV_HDIAG_SIZE 0x00001000 + +#define NV_PRAM_OFFSET 0x00006000 +#define NV_PRAM_SIZE 0x00001000 + +#define NV_PVIDEO_OFFSET 0x00008000 +#define NV_PVIDEO_SIZE 0x00001000 + +#define NV_PTIMER_OFFSET 0x00009000 +#define NV_PTIMER_SIZE 0x00001000 + +#define NV_PPM_OFFSET 0x0000A000 +#define NV_PPM_SIZE 0x00001000 + +#define NV_PTV_OFFSET 0x0000D000 +#define NV_PTV_SIZE 0x00001000 + +#define NV_PRMVGA_OFFSET 0x000A0000 +#define NV_PRMVGA_SIZE 0x00020000 + +#define NV_PRMVIO0_OFFSET 0x000C0000 +#define NV_PRMVIO_SIZE 0x00002000 +#define NV_PRMVIO1_OFFSET 0x000C2000 + +#define NV_PFB_OFFSET 0x00100000 +#define NV_PFB_SIZE 0x00001000 + +#define NV_PEXTDEV_OFFSET 0x00101000 +#define NV_PEXTDEV_SIZE 0x00001000 + +#define NV_PME_OFFSET 0x00200000 +#define NV_PME_SIZE 0x00001000 + +#define NV_PROM_OFFSET 0x00300000 +#define NV_PROM_SIZE 0x00010000 + +#define NV_PGRAPH_OFFSET 0x00400000 +#define NV_PGRAPH_SIZE 0x00010000 + +#define NV_PCRTC0_OFFSET 0x00600000 +#define NV_PCRTC0_SIZE 0x00002000 /* empirical */ + +#define NV_PRMCIO0_OFFSET 0x00601000 +#define NV_PRMCIO_SIZE 0x00002000 +#define NV_PRMCIO1_OFFSET 0x00603000 + +#define NV50_DISPLAY_OFFSET 0x00610000 +#define NV50_DISPLAY_SIZE 0x0000FFFF + +#define NV_PRAMDAC0_OFFSET 0x00680000 +#define NV_PRAMDAC0_SIZE 0x00002000 + +#define NV_PRMDIO0_OFFSET 0x00681000 +#define NV_PRMDIO_SIZE 0x00002000 +#define NV_PRMDIO1_OFFSET 0x00683000 + +#define NV_PRAMIN_OFFSET 0x00700000 +#define NV_PRAMIN_SIZE 0x00100000 + +#define NV_FIFO_OFFSET 0x00800000 +#define NV_FIFO_SIZE 0x00800000 + +#define NV_PMC_BOOT_0 0x00000000 +#define NV_PMC_ENABLE 0x00000200 + +#define NV_VIO_VSE2 0x000003c3 +#define NV_VIO_SRX 0x000003c4 + +#define NV_CIO_CRX__COLOR 0x000003d4 +#define NV_CIO_CR__COLOR 0x000003d5 + +#define NV_PBUS_DEBUG_1 0x00001084 +#define NV_PBUS_DEBUG_4 0x00001098 +#define NV_PBUS_DEBUG_DUALHEAD_CTL 0x000010f0 +#define NV_PBUS_POWERCTRL_1 0x00001584 +#define NV_PBUS_POWERCTRL_2 0x00001588 +#define NV_PBUS_POWERCTRL_4 0x00001590 +#define NV_PBUS_PCI_NV_19 0x0000184C +#define NV_PBUS_PCI_NV_20 0x00001850 +# define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0) +# define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0) + +#define NV_PFIFO_RAMHT 0x00002210 + +#define NV_PTV_TV_INDEX 0x0000d220 +#define NV_PTV_TV_DATA 0x0000d224 +#define NV_PTV_HFILTER 0x0000d310 +#define NV_PTV_HFILTER2 0x0000d390 +#define NV_PTV_VFILTER 0x0000d510 + +#define NV_PRMVIO_MISC__WRITE 0x000c03c2 +#define NV_PRMVIO_SRX 0x000c03c4 +#define NV_PRMVIO_SR 0x000c03c5 +# define NV_VIO_SR_RESET_INDEX 0x00 +# define NV_VIO_SR_CLOCK_INDEX 0x01 +# define NV_VIO_SR_PLANE_MASK_INDEX 0x02 +# define NV_VIO_SR_CHAR_MAP_INDEX 0x03 +# define NV_VIO_SR_MEM_MODE_INDEX 0x04 +#define NV_PRMVIO_MISC__READ 0x000c03cc +#define NV_PRMVIO_GRX 0x000c03ce +#define NV_PRMVIO_GX 0x000c03cf +# define NV_VIO_GX_SR_INDEX 0x00 +# define NV_VIO_GX_SREN_INDEX 0x01 +# define NV_VIO_GX_CCOMP_INDEX 0x02 +# define NV_VIO_GX_ROP_INDEX 0x03 +# define NV_VIO_GX_READ_MAP_INDEX 0x04 +# define NV_VIO_GX_MODE_INDEX 0x05 +# define NV_VIO_GX_MISC_INDEX 0x06 +# define NV_VIO_GX_DONT_CARE_INDEX 0x07 +# define NV_VIO_GX_BIT_MASK_INDEX 0x08 + +#define NV_PFB_BOOT_0 0x00100000 +#define NV_PFB_CFG0 0x00100200 +#define NV_PFB_CFG1 0x00100204 +#define NV_PFB_CSTATUS 0x0010020C +#define NV_PFB_REFCTRL 0x00100210 +# define NV_PFB_REFCTRL_VALID_1 (1 << 31) +#define NV_PFB_PAD 0x0010021C +# define NV_PFB_PAD_CKE_NORMAL (1 << 0) +#define NV_PFB_TILE_NV10 0x00100240 +#define NV_PFB_TILE_SIZE_NV10 0x00100244 +#define NV_PFB_REF 0x001002D0 +# define NV_PFB_REF_CMD_REFRESH (1 << 0) +#define NV_PFB_PRE 0x001002D4 +# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0) +#define NV_PFB_CLOSE_PAGE2 0x0010033C +#define NV_PFB_TILE_NV40 0x00100600 +#define NV_PFB_TILE_SIZE_NV40 0x00100604 + +#define NV_PEXTDEV_BOOT_0 0x00101000 +# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12) +#define NV_PEXTDEV_BOOT_3 0x0010100c + +#define NV_PCRTC_INTR_0 0x00600100 +# define NV_PCRTC_INTR_0_VBLANK (1 << 0) +#define NV_PCRTC_INTR_EN_0 0x00600140 +#define NV_PCRTC_START 0x00600800 +#define NV_PCRTC_CONFIG 0x00600804 +# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) +# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) +#define NV_PCRTC_CURSOR_CONFIG 0x00600810 +# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) +# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) +# define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM (1 << 8) +# define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32 (1 << 12) +# define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 (1 << 16) +# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32 (2 << 24) +# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 (4 << 24) +# define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA (1 << 28) + +/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */ +#define NV_PCRTC_GPIO 0x00600818 +#define NV_PCRTC_GPIO_EXT 0x0060081c +#define NV_PCRTC_830 0x00600830 +#define NV_PCRTC_834 0x00600834 +#define NV_PCRTC_850 0x00600850 +#define NV_PCRTC_ENGINE_CTRL 0x00600860 +# define NV_CRTC_FSEL_I2C (1 << 4) +# define NV_CRTC_FSEL_OVERLAY (1 << 12) + +#define NV_PRMCIO_ARX 0x006013c0 +#define NV_PRMCIO_AR__WRITE 0x006013c0 +#define NV_PRMCIO_AR__READ 0x006013c1 +# define NV_CIO_AR_MODE_INDEX 0x10 +# define NV_CIO_AR_OSCAN_INDEX 0x11 +# define NV_CIO_AR_PLANE_INDEX 0x12 +# define NV_CIO_AR_HPP_INDEX 0x13 +# define NV_CIO_AR_CSEL_INDEX 0x14 +#define NV_PRMCIO_INP0 0x006013c2 +#define NV_PRMCIO_CRX__COLOR 0x006013d4 +#define NV_PRMCIO_CR__COLOR 0x006013d5 + /* Standard VGA CRTC registers */ +# define NV_CIO_CR_HDT_INDEX 0x00 /* horizontal display total */ +# define NV_CIO_CR_HDE_INDEX 0x01 /* horizontal display end */ +# define NV_CIO_CR_HBS_INDEX 0x02 /* horizontal blanking start */ +# define NV_CIO_CR_HBE_INDEX 0x03 /* horizontal blanking end */ +# define NV_CIO_CR_HBE_4_0 4:0 +# define NV_CIO_CR_HRS_INDEX 0x04 /* horizontal retrace start */ +# define NV_CIO_CR_HRE_INDEX 0x05 /* horizontal retrace end */ +# define NV_CIO_CR_HRE_4_0 4:0 +# define NV_CIO_CR_HRE_HBE_5 7:7 +# define NV_CIO_CR_VDT_INDEX 0x06 /* vertical display total */ +# define NV_CIO_CR_OVL_INDEX 0x07 /* overflow bits */ +# define NV_CIO_CR_OVL_VDT_8 0:0 +# define NV_CIO_CR_OVL_VDE_8 1:1 +# define NV_CIO_CR_OVL_VRS_8 2:2 +# define NV_CIO_CR_OVL_VBS_8 3:3 +# define NV_CIO_CR_OVL_VDT_9 5:5 +# define NV_CIO_CR_OVL_VDE_9 6:6 +# define NV_CIO_CR_OVL_VRS_9 7:7 +# define NV_CIO_CR_RSAL_INDEX 0x08 /* normally "preset row scan" */ +# define NV_CIO_CR_CELL_HT_INDEX 0x09 /* cell height?! normally "max scan line" */ +# define NV_CIO_CR_CELL_HT_VBS_9 5:5 +# define NV_CIO_CR_CELL_HT_SCANDBL 7:7 +# define NV_CIO_CR_CURS_ST_INDEX 0x0a /* cursor start */ +# define NV_CIO_CR_CURS_END_INDEX 0x0b /* cursor end */ +# define NV_CIO_CR_SA_HI_INDEX 0x0c /* screen start address high */ +# define NV_CIO_CR_SA_LO_INDEX 0x0d /* screen start address low */ +# define NV_CIO_CR_TCOFF_HI_INDEX 0x0e /* cursor offset high */ +# define NV_CIO_CR_TCOFF_LO_INDEX 0x0f /* cursor offset low */ +# define NV_CIO_CR_VRS_INDEX 0x10 /* vertical retrace start */ +# define NV_CIO_CR_VRE_INDEX 0x11 /* vertical retrace end */ +# define NV_CIO_CR_VRE_3_0 3:0 +# define NV_CIO_CR_VDE_INDEX 0x12 /* vertical display end */ +# define NV_CIO_CR_OFFSET_INDEX 0x13 /* sets screen pitch */ +# define NV_CIO_CR_ULINE_INDEX 0x14 /* underline location */ +# define NV_CIO_CR_VBS_INDEX 0x15 /* vertical blank start */ +# define NV_CIO_CR_VBE_INDEX 0x16 /* vertical blank end */ +# define NV_CIO_CR_MODE_INDEX 0x17 /* crtc mode control */ +# define NV_CIO_CR_LCOMP_INDEX 0x18 /* line compare */ + /* Extended VGA CRTC registers */ +# define NV_CIO_CRE_RPC0_INDEX 0x19 /* repaint control 0 */ +# define NV_CIO_CRE_RPC0_OFFSET_10_8 7:5 +# define NV_CIO_CRE_RPC1_INDEX 0x1a /* repaint control 1 */ +# define NV_CIO_CRE_RPC1_LARGE 2:2 +# define NV_CIO_CRE_FF_INDEX 0x1b /* fifo control */ +# define NV_CIO_CRE_ENH_INDEX 0x1c /* enhanced? */ +# define NV_CIO_SR_LOCK_INDEX 0x1f /* crtc lock */ +# define NV_CIO_SR_UNLOCK_RW_VALUE 0x57 +# define NV_CIO_SR_LOCK_VALUE 0x99 +# define NV_CIO_CRE_FFLWM__INDEX 0x20 /* fifo low water mark */ +# define NV_CIO_CRE_21 0x21 /* vga shadow crtc lock */ +# define NV_CIO_CRE_LSR_INDEX 0x25 /* ? */ +# define NV_CIO_CRE_LSR_VDT_10 0:0 +# define NV_CIO_CRE_LSR_VDE_10 1:1 +# define NV_CIO_CRE_LSR_VRS_10 2:2 +# define NV_CIO_CRE_LSR_VBS_10 3:3 +# define NV_CIO_CRE_LSR_HBE_6 4:4 +# define NV_CIO_CR_ARX_INDEX 0x26 /* attribute index -- ro copy of 0x60.3c0 */ +# define NV_CIO_CRE_CHIP_ID_INDEX 0x27 /* chip revision */ +# define NV_CIO_CRE_PIXEL_INDEX 0x28 +# define NV_CIO_CRE_PIXEL_FORMAT 1:0 +# define NV_CIO_CRE_HEB__INDEX 0x2d /* horizontal extra bits? */ +# define NV_CIO_CRE_HEB_HDT_8 0:0 +# define NV_CIO_CRE_HEB_HDE_8 1:1 +# define NV_CIO_CRE_HEB_HBS_8 2:2 +# define NV_CIO_CRE_HEB_HRS_8 3:3 +# define NV_CIO_CRE_HEB_ILC_8 4:4 +# define NV_CIO_CRE_2E 0x2e /* some scratch or dummy reg to force writes to sink in */ +# define NV_CIO_CRE_HCUR_ADDR2_INDEX 0x2f /* cursor */ +# define NV_CIO_CRE_HCUR_ADDR0_INDEX 0x30 /* pixmap */ +# define NV_CIO_CRE_HCUR_ADDR0_ADR 6:0 +# define NV_CIO_CRE_HCUR_ASI 7:7 +# define NV_CIO_CRE_HCUR_ADDR1_INDEX 0x31 /* address */ +# define NV_CIO_CRE_HCUR_ADDR1_ENABLE 0:0 +# define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL 1:1 +# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2 +# define NV_CIO_CRE_LCD__INDEX 0x33 +# define NV_CIO_CRE_LCD_LCD_SELECT 0:0 +# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36 +# define NV_CIO_CRE_DDC0_WR__INDEX 0x37 +# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */ +# define NV_CIO_CRE_SCRATCH3__INDEX 0x3b +# define NV_CIO_CRE_SCRATCH4__INDEX 0x3c +# define NV_CIO_CRE_DDC_STATUS__INDEX 0x3e +# define NV_CIO_CRE_DDC_WR__INDEX 0x3f +# define NV_CIO_CRE_EBR_INDEX 0x41 /* extra bits ? (vertical) */ +# define NV_CIO_CRE_EBR_VDT_11 0:0 +# define NV_CIO_CRE_EBR_VDE_11 2:2 +# define NV_CIO_CRE_EBR_VRS_11 4:4 +# define NV_CIO_CRE_EBR_VBS_11 6:6 +# define NV_CIO_CRE_43 0x43 +# define NV_CIO_CRE_44 0x44 /* head control */ +# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */ +# define NV_CIO_CRE_RCR 0x46 +# define NV_CIO_CRE_RCR_ENDIAN_BIG 7:7 +# define NV_CIO_CRE_47 0x47 /* extended fifo lwm, used on nv30+ */ +# define NV_CIO_CRE_49 0x49 +# define NV_CIO_CRE_4B 0x4b /* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */ +# define NV_CIO_CRE_TVOUT_LATENCY 0x52 +# define NV_CIO_CRE_53 0x53 /* `fp_htiming' according to Haiku */ +# define NV_CIO_CRE_54 0x54 /* `fp_vtiming' according to Haiku */ +# define NV_CIO_CRE_57 0x57 /* index reg for cr58 */ +# define NV_CIO_CRE_58 0x58 /* data reg for cr57 */ +# define NV_CIO_CRE_59 0x59 /* related to on/off-chip-ness of digital outputs */ +# define NV_CIO_CRE_5B 0x5B /* newer colour saturation reg */ +# define NV_CIO_CRE_85 0x85 +# define NV_CIO_CRE_86 0x86 +#define NV_PRMCIO_INP0__COLOR 0x006013da + +#define NV_PRAMDAC_CU_START_POS 0x00680300 +# define NV_PRAMDAC_CU_START_POS_X 15:0 +# define NV_PRAMDAC_CU_START_POS_Y 31:16 +#define NV_RAMDAC_NV10_CURSYNC 0x00680404 + +#define NV_PRAMDAC_NVPLL_COEFF 0x00680500 +#define NV_PRAMDAC_MPLL_COEFF 0x00680504 +#define NV_PRAMDAC_VPLL_COEFF 0x00680508 +# define NV30_RAMDAC_ENABLE_VCO2 (8 << 4) + +#define NV_PRAMDAC_PLL_COEFF_SELECT 0x0068050c +# define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE (4 << 0) +# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL (1 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL (2 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL (4 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 (8 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 (1 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 (2 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 (4 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2 (8 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP (1 << 20) +# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2 (1 << 28) +# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2 (2 << 28) + +#define NV_PRAMDAC_PLL_SETUP_CONTROL 0x00680510 +#define NV_RAMDAC_VPLL2 0x00680520 +#define NV_PRAMDAC_SEL_CLK 0x00680524 +#define NV_RAMDAC_DITHER_NV11 0x00680528 +#define NV_PRAMDAC_DACCLK 0x0068052c +# define NV_PRAMDAC_DACCLK_SEL_DACCLK (1 << 0) + +#define NV_RAMDAC_NVPLL_B 0x00680570 +#define NV_RAMDAC_MPLL_B 0x00680574 +#define NV_RAMDAC_VPLL_B 0x00680578 +#define NV_RAMDAC_VPLL2_B 0x0068057c +# define NV31_RAMDAC_ENABLE_VCO2 (8 << 28) +#define NV_PRAMDAC_580 0x00680580 +# define NV_RAMDAC_580_VPLL1_ACTIVE (1 << 8) +# define NV_RAMDAC_580_VPLL2_ACTIVE (1 << 28) + +#define NV_PRAMDAC_GENERAL_CONTROL 0x00680600 +# define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON (3 << 4) +# define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL (1 << 8) +# define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL (1 << 12) +# define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM (2 << 16) +# define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS (1 << 20) +# define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG (2 << 28) +#define NV_PRAMDAC_TEST_CONTROL 0x00680608 +# define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED (1 << 12) +# define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF (1 << 16) +# define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI (1 << 28) +#define NV_PRAMDAC_TESTPOINT_DATA 0x00680610 +# define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK (8 << 28) +#define NV_PRAMDAC_630 0x00680630 +#define NV_PRAMDAC_634 0x00680634 + +#define NV_PRAMDAC_TV_SETUP 0x00680700 +#define NV_PRAMDAC_TV_VTOTAL 0x00680720 +#define NV_PRAMDAC_TV_VSKEW 0x00680724 +#define NV_PRAMDAC_TV_VSYNC_DELAY 0x00680728 +#define NV_PRAMDAC_TV_HTOTAL 0x0068072c +#define NV_PRAMDAC_TV_HSKEW 0x00680730 +#define NV_PRAMDAC_TV_HSYNC_DELAY 0x00680734 +#define NV_PRAMDAC_TV_HSYNC_DELAY2 0x00680738 + +#define NV_PRAMDAC_TV_SETUP 0x00680700 + +#define NV_PRAMDAC_FP_VDISPLAY_END 0x00680800 +#define NV_PRAMDAC_FP_VTOTAL 0x00680804 +#define NV_PRAMDAC_FP_VCRTC 0x00680808 +#define NV_PRAMDAC_FP_VSYNC_START 0x0068080c +#define NV_PRAMDAC_FP_VSYNC_END 0x00680810 +#define NV_PRAMDAC_FP_VVALID_START 0x00680814 +#define NV_PRAMDAC_FP_VVALID_END 0x00680818 +#define NV_PRAMDAC_FP_HDISPLAY_END 0x00680820 +#define NV_PRAMDAC_FP_HTOTAL 0x00680824 +#define NV_PRAMDAC_FP_HCRTC 0x00680828 +#define NV_PRAMDAC_FP_HSYNC_START 0x0068082c +#define NV_PRAMDAC_FP_HSYNC_END 0x00680830 +#define NV_PRAMDAC_FP_HVALID_START 0x00680834 +#define NV_PRAMDAC_FP_HVALID_END 0x00680838 + +#define NV_RAMDAC_FP_DITHER 0x0068083c +#define NV_PRAMDAC_FP_TG_CONTROL 0x00680848 +# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS (1 << 0) +# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE (2 << 0) +# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS (1 << 4) +# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE (2 << 4) +# define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE (0 << 8) +# define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER (1 << 8) +# define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE (2 << 8) +# define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG (1 << 20) +# define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 (1 << 24) +# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS (1 << 28) +# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE (2 << 28) +#define NV_PRAMDAC_FP_MARGIN_COLOR 0x0068084c +#define NV_PRAMDAC_850 0x00680850 +#define NV_PRAMDAC_85C 0x0068085c +#define NV_PRAMDAC_FP_DEBUG_0 0x00680880 +# define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE (1 << 0) +# define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE (1 << 4) +/* This doesn't seem to be essential for tmds, but still often set */ +# define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED (8 << 4) +# define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR (1 << 8) +# define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR (1 << 12) +# define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND (1 << 20) +# define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND (1 << 24) +# define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK (1 << 28) +#define NV_PRAMDAC_FP_DEBUG_1 0x00680884 +# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE 11:0 +# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE (1 << 12) +# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE 27:16 +# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE (1 << 28) +#define NV_PRAMDAC_FP_DEBUG_2 0x00680888 +#define NV_PRAMDAC_FP_DEBUG_3 0x0068088C + +/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */ +#define NV_PRAMDAC_FP_TMDS_CONTROL 0x006808b0 +# define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE (1 << 16) +#define NV_PRAMDAC_FP_TMDS_DATA 0x006808b4 + +#define NV_PRAMDAC_8C0 0x006808c0 + +/* Some kind of switch */ +#define NV_PRAMDAC_900 0x00680900 +#define NV_PRAMDAC_A20 0x00680A20 +#define NV_PRAMDAC_A24 0x00680A24 +#define NV_PRAMDAC_A34 0x00680A34 + +#define NV_PRAMDAC_CTV 0x00680c00 + +/* names fabricated from NV_USER_DAC info */ +#define NV_PRMDIO_PIXEL_MASK 0x006813c6 +# define NV_PRMDIO_PIXEL_MASK_MASK 0xff +#define NV_PRMDIO_READ_MODE_ADDRESS 0x006813c7 +#define NV_PRMDIO_WRITE_MODE_ADDRESS 0x006813c8 +#define NV_PRMDIO_PALETTE_DATA 0x006813c9 + +#define NV_PGRAPH_DEBUG_0 0x00400080 +#define NV_PGRAPH_DEBUG_1 0x00400084 +#define NV_PGRAPH_DEBUG_2_NV04 0x00400088 +#define NV_PGRAPH_DEBUG_2 0x00400620 +#define NV_PGRAPH_DEBUG_3 0x0040008c +#define NV_PGRAPH_DEBUG_4 0x00400090 +#define NV_PGRAPH_INTR 0x00400100 +#define NV_PGRAPH_INTR_EN 0x00400140 +#define NV_PGRAPH_CTX_CONTROL 0x00400144 +#define NV_PGRAPH_CTX_CONTROL_NV04 0x00400170 +#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C +#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540 +#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544 +#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548 +#define NV_PGRAPH_BETA_AND 0x00400608 +#define NV_PGRAPH_LIMIT_VIOL_PIX 0x00400610 +#define NV_PGRAPH_BOFFSET0 0x00400640 +#define NV_PGRAPH_BOFFSET1 0x00400644 +#define NV_PGRAPH_BOFFSET2 0x00400648 +#define NV_PGRAPH_BLIMIT0 0x00400684 +#define NV_PGRAPH_BLIMIT1 0x00400688 +#define NV_PGRAPH_BLIMIT2 0x0040068c +#define NV_PGRAPH_STATUS 0x00400700 +#define NV_PGRAPH_SURFACE 0x00400710 +#define NV_PGRAPH_STATE 0x00400714 +#define NV_PGRAPH_FIFO 0x00400720 +#define NV_PGRAPH_PATTERN_SHAPE 0x00400810 +#define NV_PGRAPH_TILE 0x00400b00 + +#define NV_PVIDEO_INTR_EN 0x00008140 +#define NV_PVIDEO_BUFFER 0x00008700 +#define NV_PVIDEO_STOP 0x00008704 +#define NV_PVIDEO_UVPLANE_BASE(buff) (0x00008800+(buff)*4) +#define NV_PVIDEO_UVPLANE_LIMIT(buff) (0x00008808+(buff)*4) +#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff) (0x00008820+(buff)*4) +#define NV_PVIDEO_BASE(buff) (0x00008900+(buff)*4) +#define NV_PVIDEO_LIMIT(buff) (0x00008908+(buff)*4) +#define NV_PVIDEO_LUMINANCE(buff) (0x00008910+(buff)*4) +#define NV_PVIDEO_CHROMINANCE(buff) (0x00008918+(buff)*4) +#define NV_PVIDEO_OFFSET_BUFF(buff) (0x00008920+(buff)*4) +#define NV_PVIDEO_SIZE_IN(buff) (0x00008928+(buff)*4) +#define NV_PVIDEO_POINT_IN(buff) (0x00008930+(buff)*4) +#define NV_PVIDEO_DS_DX(buff) (0x00008938+(buff)*4) +#define NV_PVIDEO_DT_DY(buff) (0x00008940+(buff)*4) +#define NV_PVIDEO_POINT_OUT(buff) (0x00008948+(buff)*4) +#define NV_PVIDEO_SIZE_OUT(buff) (0x00008950+(buff)*4) +#define NV_PVIDEO_FORMAT(buff) (0x00008958+(buff)*4) +# define NV_PVIDEO_FORMAT_PLANAR (1 << 0) +# define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8 (1 << 16) +# define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY (1 << 20) +# define NV_PVIDEO_FORMAT_MATRIX_ITURBT709 (1 << 24) +#define NV_PVIDEO_COLOR_KEY 0x00008B00 + +/* NV04 overlay defines from VIDIX & Haiku */ +#define NV_PVIDEO_INTR_EN_0 0x00680140 +#define NV_PVIDEO_STEP_SIZE 0x00680200 +#define NV_PVIDEO_CONTROL_Y 0x00680204 +#define NV_PVIDEO_CONTROL_X 0x00680208 +#define NV_PVIDEO_BUFF0_START_ADDRESS 0x0068020c +#define NV_PVIDEO_BUFF0_PITCH_LENGTH 0x00680214 +#define NV_PVIDEO_BUFF0_OFFSET 0x0068021c +#define NV_PVIDEO_BUFF1_START_ADDRESS 0x00680210 +#define NV_PVIDEO_BUFF1_PITCH_LENGTH 0x00680218 +#define NV_PVIDEO_BUFF1_OFFSET 0x00680220 +#define NV_PVIDEO_OE_STATE 0x00680224 +#define NV_PVIDEO_SU_STATE 0x00680228 +#define NV_PVIDEO_RM_STATE 0x0068022c +#define NV_PVIDEO_WINDOW_START 0x00680230 +#define NV_PVIDEO_WINDOW_SIZE 0x00680234 +#define NV_PVIDEO_FIFO_THRES_SIZE 0x00680238 +#define NV_PVIDEO_FIFO_BURST_LENGTH 0x0068023c +#define NV_PVIDEO_KEY 0x00680240 +#define NV_PVIDEO_OVERLAY 0x00680244 +#define NV_PVIDEO_RED_CSC_OFFSET 0x00680280 +#define NV_PVIDEO_GREEN_CSC_OFFSET 0x00680284 +#define NV_PVIDEO_BLUE_CSC_OFFSET 0x00680288 +#define NV_PVIDEO_CSC_ADJUST 0x0068028c + +#endif diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d21b3469f6d7..da74e216b71d 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -103,6 +103,8 @@ source "drivers/staging/line6/Kconfig" source "drivers/gpu/drm/radeon/Kconfig" +source "drivers/gpu/drm/nouveau/Kconfig" + source "drivers/staging/octeon/Kconfig" source "drivers/staging/serqt_usb2/Kconfig" diff --git a/include/drm/Kbuild b/include/drm/Kbuild index b940fdfa3b25..cfa6af43c9ea 100644 --- a/include/drm/Kbuild +++ b/include/drm/Kbuild @@ -8,3 +8,4 @@ unifdef-y += radeon_drm.h unifdef-y += sis_drm.h unifdef-y += savage_drm.h unifdef-y += via_drm.h +unifdef-y += nouveau_drm.h diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h new file mode 100644 index 000000000000..8390b437a1f8 --- /dev/null +++ b/include/drm/i2c/ch7006.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_CH7006_H__ +#define __DRM_I2C_CH7006_H__ + +/** + * struct ch7006_encoder_params + * + * Describes how the ch7006 is wired up with the GPU. It should be + * used as the @params parameter of its @set_config method. + * + * See "http://www.chrontel.com/pdf/7006.pdf" for their precise + * meaning. + */ +struct ch7006_encoder_params { + enum { + CH7006_FORMAT_RGB16 = 0, + CH7006_FORMAT_YCrCb24m16, + CH7006_FORMAT_RGB24m16, + CH7006_FORMAT_RGB15, + CH7006_FORMAT_RGB24m12C, + CH7006_FORMAT_RGB24m12I, + CH7006_FORMAT_RGB24m8, + CH7006_FORMAT_RGB16m8, + CH7006_FORMAT_RGB15m8, + CH7006_FORMAT_YCrCb24m8, + } input_format; + + enum { + CH7006_CLOCK_SLAVE = 0, + CH7006_CLOCK_MASTER, + } clock_mode; + + enum { + CH7006_CLOCK_EDGE_NEG = 0, + CH7006_CLOCK_EDGE_POS, + } clock_edge; + + int xcm, pcm; + + enum { + CH7006_SYNC_SLAVE = 0, + CH7006_SYNC_MASTER, + } sync_direction; + + enum { + CH7006_SYNC_SEPARATED = 0, + CH7006_SYNC_EMBEDDED, + } sync_encoding; + + enum { + CH7006_POUT_1_8V = 0, + CH7006_POUT_3_3V, + } pout_level; + + enum { + CH7006_ACTIVE_HSYNC = 0, + CH7006_ACTIVE_DSTART, + } active_detect; +}; + +#endif diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h new file mode 100644 index 000000000000..1e67c441ea82 --- /dev/null +++ b/include/drm/nouveau_drm.h @@ -0,0 +1,220 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_DRM_H__ +#define __NOUVEAU_DRM_H__ + +#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15 + +struct drm_nouveau_channel_alloc { + uint32_t fb_ctxdma_handle; + uint32_t tt_ctxdma_handle; + + int channel; + + /* Notifier memory */ + uint32_t notifier_handle; + + /* DRM-enforced subchannel assignments */ + struct { + uint32_t handle; + uint32_t grclass; + } subchan[8]; + uint32_t nr_subchan; +}; + +struct drm_nouveau_channel_free { + int channel; +}; + +struct drm_nouveau_grobj_alloc { + int channel; + uint32_t handle; + int class; +}; + +struct drm_nouveau_notifierobj_alloc { + uint32_t channel; + uint32_t handle; + uint32_t size; + uint32_t offset; +}; + +struct drm_nouveau_gpuobj_free { + int channel; + uint32_t handle; +}; + +/* FIXME : maybe unify {GET,SET}PARAMs */ +#define NOUVEAU_GETPARAM_PCI_VENDOR 3 +#define NOUVEAU_GETPARAM_PCI_DEVICE 4 +#define NOUVEAU_GETPARAM_BUS_TYPE 5 +#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 +#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 +#define NOUVEAU_GETPARAM_FB_SIZE 8 +#define NOUVEAU_GETPARAM_AGP_SIZE 9 +#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 +#define NOUVEAU_GETPARAM_CHIPSET_ID 11 +#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 +struct drm_nouveau_getparam { + uint64_t param; + uint64_t value; +}; + +struct drm_nouveau_setparam { + uint64_t param; + uint64_t value; +}; + +#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) +#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) +#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) +#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) + +struct drm_nouveau_gem_info { + uint32_t handle; + uint32_t domain; + uint64_t size; + uint64_t offset; + uint64_t map_handle; + uint32_t tile_mode; + uint32_t tile_flags; +}; + +struct drm_nouveau_gem_new { + struct drm_nouveau_gem_info info; + uint32_t channel_hint; + uint32_t align; +}; + +struct drm_nouveau_gem_pushbuf_bo { + uint64_t user_priv; + uint32_t handle; + uint32_t read_domains; + uint32_t write_domains; + uint32_t valid_domains; + uint32_t presumed_ok; + uint32_t presumed_domain; + uint64_t presumed_offset; +}; + +#define NOUVEAU_GEM_RELOC_LOW (1 << 0) +#define NOUVEAU_GEM_RELOC_HIGH (1 << 1) +#define NOUVEAU_GEM_RELOC_OR (1 << 2) +struct drm_nouveau_gem_pushbuf_reloc { + uint32_t bo_index; + uint32_t reloc_index; + uint32_t flags; + uint32_t data; + uint32_t vor; + uint32_t tor; +}; + +#define NOUVEAU_GEM_MAX_BUFFERS 1024 +#define NOUVEAU_GEM_MAX_RELOCS 1024 + +struct drm_nouveau_gem_pushbuf { + uint32_t channel; + uint32_t nr_dwords; + uint32_t nr_buffers; + uint32_t nr_relocs; + uint64_t dwords; + uint64_t buffers; + uint64_t relocs; +}; + +struct drm_nouveau_gem_pushbuf_call { + uint32_t channel; + uint32_t handle; + uint32_t offset; + uint32_t nr_buffers; + uint32_t nr_relocs; + uint32_t nr_dwords; + uint64_t buffers; + uint64_t relocs; + uint32_t suffix0; + uint32_t suffix1; + /* below only accessed for CALL2 */ + uint64_t vram_available; + uint64_t gart_available; +}; + +struct drm_nouveau_gem_pin { + uint32_t handle; + uint32_t domain; + uint64_t offset; +}; + +struct drm_nouveau_gem_unpin { + uint32_t handle; +}; + +#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 +#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 +#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 +struct drm_nouveau_gem_cpu_prep { + uint32_t handle; + uint32_t flags; +}; + +struct drm_nouveau_gem_cpu_fini { + uint32_t handle; +}; + +struct drm_nouveau_gem_tile { + uint32_t handle; + uint32_t offset; + uint32_t size; + uint32_t tile_mode; + uint32_t tile_flags; +}; + +enum nouveau_bus_type { + NV_AGP = 0, + NV_PCI = 1, + NV_PCIE = 2, +}; + +struct drm_nouveau_sarea { +}; + +#define DRM_NOUVEAU_CARD_INIT 0x00 +#define DRM_NOUVEAU_GETPARAM 0x01 +#define DRM_NOUVEAU_SETPARAM 0x02 +#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 +#define DRM_NOUVEAU_CHANNEL_FREE 0x04 +#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 +#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 +#define DRM_NOUVEAU_GPUOBJ_FREE 0x07 +#define DRM_NOUVEAU_GEM_NEW 0x40 +#define DRM_NOUVEAU_GEM_PUSHBUF 0x41 +#define DRM_NOUVEAU_GEM_PUSHBUF_CALL 0x42 +#define DRM_NOUVEAU_GEM_PIN 0x43 /* !KMS only */ +#define DRM_NOUVEAU_GEM_UNPIN 0x44 /* !KMS only */ +#define DRM_NOUVEAU_GEM_CPU_PREP 0x45 +#define DRM_NOUVEAU_GEM_CPU_FINI 0x46 +#define DRM_NOUVEAU_GEM_INFO 0x47 +#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48 + +#endif /* __NOUVEAU_DRM_H__ */ -- cgit v1.2.3 From f8b7256096a20436f6d0926747e3ac3d64c81d24 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 30 Nov 2009 17:37:04 -0500 Subject: Unify sys_mmap* New helper - sys_mmap_pgoff(); switch syscalls to using it. Acked-by: David S. Miller Signed-off-by: Al Viro --- arch/alpha/kernel/osf_sys.c | 19 +++---- arch/arm/kernel/entry-common.S | 4 +- arch/arm/kernel/sys_arm.c | 30 +---------- arch/avr32/include/asm/syscalls.h | 4 -- arch/avr32/kernel/sys_avr32.c | 31 ------------ arch/avr32/kernel/syscall-stubs.S | 2 +- arch/blackfin/kernel/sys_bfin.c | 33 ------------ arch/blackfin/mach-common/entry.S | 2 +- arch/cris/kernel/sys_cris.c | 30 ++--------- arch/frv/kernel/sys_frv.c | 66 +----------------------- arch/h8300/kernel/sys_h8300.c | 83 +------------------------------ arch/h8300/kernel/syscalls.S | 2 +- arch/ia64/kernel/sys_ia64.c | 37 +------------- arch/m32r/kernel/sys_m32r.c | 24 --------- arch/m32r/kernel/syscall_table.S | 2 +- arch/m68k/kernel/sys_m68k.c | 83 +++---------------------------- arch/m68knommu/kernel/sys_m68k.c | 38 +------------- arch/m68knommu/kernel/syscalltable.S | 2 +- arch/microblaze/kernel/sys_microblaze.c | 38 ++------------ arch/microblaze/kernel/syscall_table.S | 2 +- arch/mips/kernel/linux32.c | 19 +------ arch/mips/kernel/syscall.c | 29 +---------- arch/mn10300/kernel/entry.S | 2 +- arch/mn10300/kernel/sys_mn10300.c | 31 +----------- arch/parisc/kernel/sys_parisc.c | 30 ++--------- arch/powerpc/kernel/syscalls.c | 15 +----- arch/s390/kernel/compat_linux.c | 32 ++---------- arch/s390/kernel/sys_s390.c | 30 +---------- arch/score/kernel/sys_score.c | 25 ++-------- arch/sh/kernel/sys_sh.c | 28 +---------- arch/sparc/kernel/sys_sparc_32.c | 31 ++---------- arch/sparc/kernel/sys_sparc_64.c | 22 +++----- arch/um/kernel/syscall.c | 28 +---------- arch/um/sys-i386/shared/sysdep/syscalls.h | 4 -- arch/x86/ia32/ia32entry.S | 2 +- arch/x86/ia32/sys_ia32.c | 43 +--------------- arch/x86/include/asm/sys_ia32.h | 3 -- arch/x86/include/asm/syscalls.h | 2 - arch/x86/kernel/sys_i386_32.c | 27 +--------- arch/x86/kernel/sys_x86_64.c | 17 +------ arch/x86/kernel/syscall_table_32.S | 2 +- arch/xtensa/include/asm/syscall.h | 2 - arch/xtensa/include/asm/unistd.h | 2 +- arch/xtensa/kernel/syscall.c | 25 ---------- include/linux/syscalls.h | 4 ++ mm/util.c | 29 +++++++++++ 46 files changed, 109 insertions(+), 907 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 9a3334ae282e..62619f25132f 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -178,25 +178,18 @@ SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { - struct file *file = NULL; - unsigned long ret = -EBADF; + unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - down_write(¤t->mm->mmap_sem); - ret = do_mmap(file, addr, len, prot, flags, off); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f0fe95b7085d..2c1db77d7848 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -416,12 +416,12 @@ sys_mmap2: tst r5, #PGOFF_MASK moveq r5, r5, lsr #PAGE_SHIFT - 12 streq r5, [sp, #4] - beq do_mmap2 + beq sys_mmap_pgoff mov r0, #-EINVAL mov pc, lr #else str r5, [sp, #4] - b do_mmap2 + b sys_mmap_pgoff #endif ENDPROC(sys_mmap2) diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 3b897444a9bd..ae4027bd01bd 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -28,34 +28,6 @@ #include #include -/* common code for old and new mmaps */ -inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EINVAL; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - error = -EBADF; - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - struct mmap_arg_struct { unsigned long addr; unsigned long len; @@ -77,7 +49,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) if (a.offset & ~PAGE_MASK) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); out: return error; } diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h index 483d666c27c0..66a197266637 100644 --- a/arch/avr32/include/asm/syscalls.h +++ b/arch/avr32/include/asm/syscalls.h @@ -29,10 +29,6 @@ asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *, struct pt_regs *); asmlinkage int sys_rt_sigreturn(struct pt_regs *); -/* kernel/sys_avr32.c */ -asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long, off_t); - /* mm/cache.c */ asmlinkage int sys_cacheflush(int, void __user *, size_t); diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c index 5d2daeaf356f..459349b5ed5a 100644 --- a/arch/avr32/kernel/sys_avr32.c +++ b/arch/avr32/kernel/sys_avr32.c @@ -5,39 +5,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include -#include -#include -#include #include -#include -#include -#include - -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, off_t offset) -{ - int error = -EBADF; - struct file *file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - return error; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, offset); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); - return error; -} - int kernel_execve(const char *file, char **argv, char **envp) { register long scno asm("r8") = __NR_execve; diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S index f7244cd02fbb..0447a3e2ba64 100644 --- a/arch/avr32/kernel/syscall-stubs.S +++ b/arch/avr32/kernel/syscall-stubs.S @@ -61,7 +61,7 @@ __sys_execve: __sys_mmap2: pushm lr st.w --sp, ARG6 - call sys_mmap2 + call sys_mmap_pgoff sub sp, -4 popm pc diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c index afcef129d4e8..2e7f8e10bf87 100644 --- a/arch/blackfin/kernel/sys_bfin.c +++ b/arch/blackfin/kernel/sys_bfin.c @@ -22,39 +22,6 @@ #include #include -/* common code for old and new mmaps */ -static inline long -do_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file *file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); - out: - return error; -} - -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - return do_mmap2(addr, len, prot, flags, fd, pgoff); -} - asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) { return sram_alloc_with_lsl(size, flags); diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index a50637a8b9bd..f3f8bb46b517 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -1422,7 +1422,7 @@ ENTRY(_sys_call_table) .long _sys_ni_syscall /* streams2 */ .long _sys_vfork /* 190 */ .long _sys_getrlimit - .long _sys_mmap2 + .long _sys_mmap_pgoff .long _sys_truncate64 .long _sys_ftruncate64 .long _sys_stat64 /* 195 */ diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c index 2ad962c7e88e..c2bbb1ac98a9 100644 --- a/arch/cris/kernel/sys_cris.c +++ b/arch/cris/kernel/sys_cris.c @@ -26,31 +26,6 @@ #include #include -/* common code for old and new mmaps */ -static inline long -do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - asmlinkage unsigned long old_mmap(unsigned long __user *args) { unsigned long buffer[6]; @@ -63,7 +38,7 @@ asmlinkage unsigned long old_mmap(unsigned long __user *args) if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */ goto out; - err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3], + err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5] >> PAGE_SHIFT); out: return err; @@ -73,7 +48,8 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { - return do_mmap2(addr, len, prot, flags, fd, pgoff); + /* bug(?): 8Kb pages here */ + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } /* diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c index 2b6b5289cdcc..1d3d4c9e2521 100644 --- a/arch/frv/kernel/sys_frv.c +++ b/arch/frv/kernel/sys_frv.c @@ -31,9 +31,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { - int error = -EBADF; - struct file * file = NULL; - /* As with sparc32, make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE we have.... */ @@ -41,69 +38,10 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, trying to map something we can't */ if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) return -EINVAL; - pgoff >>= PAGE_SHIFT - 12; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - -#if 0 /* DAVIDM - do we want this */ -struct mmap_arg_struct64 { - __u32 addr; - __u32 len; - __u32 prot; - __u32 flags; - __u64 offset; /* 64 bits */ - __u32 fd; -}; - -asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) -{ - int error = -EFAULT; - struct file * file = NULL; - struct mmap_arg_struct64 a; - unsigned long pgoff; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - - if ((long)a.offset & ~PAGE_MASK) - return -EINVAL; - - pgoff = a.offset >> PAGE_SHIFT; - if ((a.offset >> PAGE_SHIFT) != pgoff) - return -EINVAL; - - if (!(a.flags & MAP_ANONYMOUS)) { - error = -EBADF; - file = fget(a.fd); - if (!file) - goto out; - } - a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); -out: - return error; + return sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); } -#endif /* * sys_ipc() is the de-multiplexer for the SysV IPC calls.. diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c index 8cb5d73a0e35..b5969db0ca10 100644 --- a/arch/h8300/kernel/sys_h8300.c +++ b/arch/h8300/kernel/sys_h8300.c @@ -26,39 +26,6 @@ #include #include -/* common code for old and new mmaps */ -static inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - return do_mmap2(addr, len, prot, flags, fd, pgoff); -} - /* * Perform the select(nd, in, out, ex, tv) and mmap() system * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to @@ -87,57 +54,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg) if (a.offset & ~PAGE_MASK) goto out; - a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -out: - return error; -} - -#if 0 /* DAVIDM - do we want this */ -struct mmap_arg_struct64 { - __u32 addr; - __u32 len; - __u32 prot; - __u32 flags; - __u64 offset; /* 64 bits */ - __u32 fd; -}; - -asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) -{ - int error = -EFAULT; - struct file * file = NULL; - struct mmap_arg_struct64 a; - unsigned long pgoff; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - - if ((long)a.offset & ~PAGE_MASK) - return -EINVAL; - - pgoff = a.offset >> PAGE_SHIFT; - if ((a.offset >> PAGE_SHIFT) != pgoff) - return -EINVAL; - - if (!(a.flags & MAP_ANONYMOUS)) { - error = -EBADF; - file = fget(a.fd); - if (!file) - goto out; - } - a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); out: return error; } -#endif struct sel_arg_struct { unsigned long n; diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S index 4eb67faac633..2d69881eda6a 100644 --- a/arch/h8300/kernel/syscalls.S +++ b/arch/h8300/kernel/syscalls.S @@ -206,7 +206,7 @@ SYMBOL_NAME_LABEL(sys_call_table) .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */ .long SYMBOL_NAME(sys_vfork) /* 190 */ .long SYMBOL_NAME(sys_getrlimit) - .long SYMBOL_NAME(sys_mmap2) + .long SYMBOL_NAME(sys_mmap_pgoff) .long SYMBOL_NAME(sys_truncate64) .long SYMBOL_NAME(sys_ftruncate64) .long SYMBOL_NAME(sys_stat64) /* 195 */ diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 92ed83f34036..ae384a2974c2 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -185,39 +185,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, return 0; } -static inline unsigned long -do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff) -{ - struct file *file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - return -EBADF; - - if (!file->f_op || !file->f_op->mmap) { - addr = -ENODEV; - goto out; - } - } - - /* Careful about overflows.. */ - len = PAGE_ALIGN(len); - if (!len || len > TASK_SIZE) { - addr = -EINVAL; - goto out; - } - - down_write(¤t->mm->mmap_sem); - addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - -out: if (file) - fput(file); - return addr; -} - /* * mmap2() is like mmap() except that the offset is expressed in units * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces @@ -226,7 +193,7 @@ out: if (file) asmlinkage unsigned long sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) { - addr = do_mmap2(addr, len, prot, flags, fd, pgoff); + addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; @@ -238,7 +205,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo if (offset_in_page(off) != 0) return -EINVAL; - addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index 305ac852bbed..d3c865c5a6ba 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c @@ -76,30 +76,6 @@ asmlinkage int sys_tas(int __user *addr) return oldval; } -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file *file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - /* * sys_ipc() is the de-multiplexer for the SysV IPC calls.. * diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S index aa3bf4cfab37..60536e271233 100644 --- a/arch/m32r/kernel/syscall_table.S +++ b/arch/m32r/kernel/syscall_table.S @@ -191,7 +191,7 @@ ENTRY(sys_call_table) .long sys_ni_syscall /* streams2 */ .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap2 + .long sys_mmap_pgoff .long sys_truncate64 .long sys_ftruncate64 .long sys_stat64 /* 195 */ diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 7deb402bfc75..218f441de667 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -29,37 +29,16 @@ #include #include -/* common code for old and new mmaps */ -static inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { - return do_mmap2(addr, len, prot, flags, fd, pgoff); + /* + * This is wrong for sun3 - there PAGE_SIZE is 8Kb, + * so we need to shift the argument down by 1; m68k mmap64(3) + * (in libc) expects the last argument of mmap2 in 4Kb units. + */ + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } /* @@ -90,57 +69,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) if (a.offset & ~PAGE_MASK) goto out; - a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -out: - return error; -} - -#if 0 -struct mmap_arg_struct64 { - __u32 addr; - __u32 len; - __u32 prot; - __u32 flags; - __u64 offset; /* 64 bits */ - __u32 fd; -}; - -asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) -{ - int error = -EFAULT; - struct file * file = NULL; - struct mmap_arg_struct64 a; - unsigned long pgoff; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - - if ((long)a.offset & ~PAGE_MASK) - return -EINVAL; - - pgoff = a.offset >> PAGE_SHIFT; - if ((a.offset >> PAGE_SHIFT) != pgoff) - return -EINVAL; - - if (!(a.flags & MAP_ANONYMOUS)) { - error = -EBADF; - file = fget(a.fd); - if (!file) - goto out; - } - a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); out: return error; } -#endif struct sel_arg_struct { unsigned long n; diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c index efdd090778a3..b67cbc735a9b 100644 --- a/arch/m68knommu/kernel/sys_m68k.c +++ b/arch/m68knommu/kernel/sys_m68k.c @@ -27,39 +27,6 @@ #include #include -/* common code for old and new mmaps */ -static inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - return do_mmap2(addr, len, prot, flags, fd, pgoff); -} - /* * Perform the select(nd, in, out, ex, tv) and mmap() system * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to @@ -88,9 +55,8 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg) if (a.offset & ~PAGE_MASK) goto out; - a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); out: return error; } diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index 23535cc415ae..486837efa3d7 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S @@ -210,7 +210,7 @@ ENTRY(sys_call_table) .long sys_ni_syscall /* streams2 */ .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap2 + .long sys_mmap_pgoff .long sys_truncate64 .long sys_ftruncate64 .long sys_stat64 /* 195 */ diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index 07cabed4b947..9f3c205fb75b 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c @@ -62,46 +62,14 @@ out: return error; } -asmlinkage long -sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - struct file *file = NULL; - int ret = -EBADF; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) { - printk(KERN_INFO "no fd in mmap\r\n"); - goto out; - } - } - - down_write(¤t->mm->mmap_sem); - ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); -out: - return ret; -} - asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t pgoff) { - int err = -EINVAL; - - if (pgoff & ~PAGE_MASK) { - printk(KERN_INFO "no pagemask in mmap\r\n"); - goto out; - } + if (pgoff & ~PAGE_MASK) + return -EINVAL; - err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); -out: - return err; + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); } /* diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index c1ab1dc10898..b96f365ea6b1 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -196,7 +196,7 @@ ENTRY(sys_call_table) .long sys_ni_syscall /* reserved for streams2 */ .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap2 /* mmap2 */ + .long sys_mmap_pgoff /* mmap2 */ .long sys_truncate64 .long sys_ftruncate64 .long sys_stat64 /* 195 */ diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 1a2793efdc4e..f042563c924f 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -67,28 +67,13 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { - struct file * file = NULL; unsigned long error; error = -EINVAL; if (pgoff & (~PAGE_MASK >> 12)) goto out; - pgoff >>= PAGE_SHIFT-12; - - if (!(flags & MAP_ANONYMOUS)) { - error = -EBADF; - file = fget(fd); - if (!file) - goto out; - } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); - + error = sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT-12)); out: return error; } diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index fe0d79805603..c25b2e7dcb7b 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -129,31 +129,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, } } -/* common code for old and new mmaps */ -static inline unsigned long -do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long fd, unsigned long pgoff) -{ - unsigned long error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) @@ -164,7 +139,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, if (offset & ~PAGE_MASK) goto out; - result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); + result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); out: return result; @@ -177,7 +152,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, if (pgoff & (~PAGE_MASK >> 12)) return -EINVAL; - return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); } save_static_function(sys_fork); diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index a94e7ea3faa6..c9ee6c009d79 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S @@ -578,7 +578,7 @@ ENTRY(sys_call_table) .long sys_ni_syscall /* reserved for streams2 */ .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap2 + .long sys_mmap_pgoff .long sys_truncate64 .long sys_ftruncate64 .long sys_stat64 /* 195 */ diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c index ec4100dfcb7d..17cc6ce04e84 100644 --- a/arch/mn10300/kernel/sys_mn10300.c +++ b/arch/mn10300/kernel/sys_mn10300.c @@ -23,42 +23,13 @@ #include -/* - * memory mapping syscall - */ -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - struct file *file = NULL; - long error = -EINVAL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - error = -EBADF; - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - asmlinkage long old_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long offset) { if (offset & ~PAGE_MASK) return -EINVAL; - return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); + return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); } struct sel_arg_struct { diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 71b31957c8f1..9147391afb03 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -110,37 +110,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } -static unsigned long do_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, unsigned long fd, - unsigned long pgoff) -{ - struct file * file = NULL; - unsigned long error = -EBADF; - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file != NULL) - fput(file); -out: - return error; -} - asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE we have. */ - return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); + return sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); } asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, @@ -148,7 +125,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, unsigned long offset) { if (!(offset & ~PAGE_MASK)) { - return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); + return sys_mmap_pgoff(addr, len, prot, flags, fd, + offset >> PAGE_SHIFT); } else { return -EINVAL; } diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index c04832c4a02e..3370e62e43d4 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -140,7 +140,6 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off, int shift) { - struct file * file = NULL; unsigned long ret = -EINVAL; if (!arch_validate_prot(prot)) @@ -151,20 +150,8 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, goto out; off >>= shift; } - - ret = -EBADF; - if (!(flags & MAP_ANONYMOUS)) { - if (!(file = fget(fd))) - goto out; - } - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - down_write(¤t->mm->mmap_sem); - ret = do_mmap_pgoff(file, addr, len, prot, flags, off); - up_write(¤t->mm->mmap_sem); - if (file) - fput(file); + ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off); out: return ret; } diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 11556aa6bf17..22c9e557bb22 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -624,33 +624,6 @@ struct mmap_arg_struct_emu31 { u32 offset; }; -/* common code for old and new mmaps */ -static inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - struct file * file = NULL; - unsigned long error = -EBADF; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - - asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) { @@ -664,7 +637,8 @@ old32_mmap(struct mmap_arg_struct_emu31 __user *arg) if (a.offset & ~PAGE_MASK) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); out: return error; } @@ -677,7 +651,7 @@ sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) if (copy_from_user(&a, arg, sizeof(a))) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); out: return error; } diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index e9d94f61d500..86a74c9c9e63 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -32,32 +32,6 @@ #include #include "entry.h" -/* common code for old and new mmaps */ -static inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - long error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - /* * Perform the select(nd, in, out, ex, tv) and mmap() system * calls. Linux for S/390 isn't able to handle more than 5 @@ -81,7 +55,7 @@ SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg) if (copy_from_user(&a, arg, sizeof(a))) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); out: return error; } @@ -98,7 +72,7 @@ SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg) if (a.offset & ~PAGE_MASK) goto out; - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); + error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); out: return error; } diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 001249469866..3d6a67dd628c 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -36,34 +36,15 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { - int error = -EBADF; - struct file *file = NULL; - - if (pgoff & (~PAGE_MASK >> 12)) - return -EINVAL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - return error; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); - - return error; + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t pgoff) { - return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); + /* where's the alignment check? */ + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); } asmlinkage long diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 8aa5d1ceaf14..71399cde03b5 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -28,37 +28,13 @@ #include #include -static inline long -do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, int fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file *file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - asmlinkage int old_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, int fd, unsigned long off) { if (off & ~PAGE_MASK) return -EINVAL; - return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); + return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT); } asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, @@ -74,7 +50,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, pgoff >>= PAGE_SHIFT - 12; - return do_mmap2(addr, len, prot, flags, fd, pgoff); + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } /* diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 10c43bea32c7..36f6f26d9cec 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -234,31 +234,6 @@ int sparc_mmap_check(unsigned long addr, unsigned long len) } /* Linux version of mmap */ -static unsigned long do_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, unsigned long fd, - unsigned long pgoff) -{ - struct file * file = NULL; - unsigned long retval = -EBADF; - - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - len = PAGE_ALIGN(len); - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - down_write(¤t->mm->mmap_sem); - retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return retval; -} asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, @@ -266,14 +241,16 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, { /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE we have. */ - return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); + return sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); } asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off) { - return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + /* no alignment check? */ + return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); } long sparc_remap_file_pages(unsigned long start, unsigned long size, diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index d498b32c75f6..8f9cd58497de 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -572,23 +572,13 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { - struct file * file = NULL; - unsigned long retval = -EBADF; + unsigned long retval = -EINVAL; - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - len = PAGE_ALIGN(len); - - down_write(¤t->mm->mmap_sem); - retval = do_mmap(file, addr, len, prot, flags, off); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return retval; } diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index a4625c7b2bf9..cccab850c27e 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c @@ -8,6 +8,7 @@ #include "linux/mm.h" #include "linux/sched.h" #include "linux/utsname.h" +#include "linux/syscalls.h" #include "asm/current.h" #include "asm/mman.h" #include "asm/uaccess.h" @@ -37,31 +38,6 @@ long sys_vfork(void) return ret; } -/* common code for old and new mmaps */ -long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - long error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); - out: - return error; -} - long old_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long offset) @@ -70,7 +46,7 @@ long old_mmap(unsigned long addr, unsigned long len, if (offset & ~PAGE_MASK) goto out; - err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); + err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); out: return err; } diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h index 905698197e35..e7787679e317 100644 --- a/arch/um/sys-i386/shared/sysdep/syscalls.h +++ b/arch/um/sys-i386/shared/sysdep/syscalls.h @@ -20,7 +20,3 @@ extern syscall_handler_t *sys_call_table[]; #define EXECUTE_SYSCALL(syscall, regs) \ ((long (*)(struct syscall_args)) \ (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) - -extern long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 4eefdca9832b..53147ad85b96 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -696,7 +696,7 @@ ia32_sys_call_table: .quad quiet_ni_syscall /* streams2 */ .quad stub32_vfork /* 190 */ .quad compat_sys_getrlimit - .quad sys32_mmap2 + .quad sys_mmap_pgoff .quad sys32_truncate64 .quad sys32_ftruncate64 .quad sys32_stat64 /* 195 */ diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index df82c0e48ded..422572c77923 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -155,9 +155,6 @@ struct mmap_arg_struct { asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) { struct mmap_arg_struct a; - struct file *file = NULL; - unsigned long retval; - struct mm_struct *mm ; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; @@ -165,22 +162,8 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) if (a.offset & ~PAGE_MASK) return -EINVAL; - if (!(a.flags & MAP_ANONYMOUS)) { - file = fget(a.fd); - if (!file) - return -EBADF; - } - - mm = current->mm; - down_write(&mm->mmap_sem); - retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, + return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset>>PAGE_SHIFT); - if (file) - fput(file); - - up_write(&mm->mmap_sem); - - return retval; } asmlinkage long sys32_mprotect(unsigned long start, size_t len, @@ -483,30 +466,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, return ret; } -asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - struct mm_struct *mm = current->mm; - unsigned long error; - struct file *file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - return -EBADF; - } - - down_write(&mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(&mm->mmap_sem); - - if (file) - fput(file); - return error; -} - asmlinkage long sys32_olduname(struct oldold_utsname __user *name) { char *arch = "x86_64"; diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 9af9decb38c3..4a5a089e1c62 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -57,9 +57,6 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); asmlinkage long sys32_personality(unsigned long); asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); -asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long, unsigned long); - struct oldold_utsname; struct old_utsname; asmlinkage long sys32_olduname(struct oldold_utsname __user *); diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 372b76edd63f..1bb6e395881c 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -55,8 +55,6 @@ struct sel_arg_struct; struct oldold_utsname; struct old_utsname; -asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long, unsigned long); asmlinkage int old_mmap(struct mmap_arg_struct __user *); asmlinkage int old_select(struct sel_arg_struct __user *); asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 1884a8d12bfa..dee1ff7cba58 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c @@ -24,31 +24,6 @@ #include -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file *file = NULL; - struct mm_struct *mm = current->mm; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(&mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(&mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - /* * Perform the select(nd, in, out, ex, tv) and mmap() system * calls. Linux/i386 didn't use to be able to handle more than @@ -77,7 +52,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) if (a.offset & ~PAGE_MASK) goto out; - err = sys_mmap2(a.addr, a.len, a.prot, a.flags, + err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); out: return err; diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 45e00eb09c3a..8aa2057efd12 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -23,26 +23,11 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, fd, unsigned long, off) { long error; - struct file *file; - error = -EINVAL; if (off & ~PAGE_MASK) goto out; - error = -EBADF; - file = NULL; - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); + error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return error; } diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 70c2125d55b9..15228b5d3eb7 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -191,7 +191,7 @@ ENTRY(sys_call_table) .long sys_ni_syscall /* reserved for streams2 */ .long ptregs_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap2 + .long sys_mmap_pgoff .long sys_truncate64 .long sys_ftruncate64 .long sys_stat64 /* 195 */ diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 05cebf8f62b1..4352dbe1186a 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -13,8 +13,6 @@ struct sigaction; asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*); asmlinkage long xtensa_pipe(int __user *); -asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long, unsigned long); asmlinkage long xtensa_ptrace(long, long, long, long); asmlinkage long xtensa_sigreturn(struct pt_regs*); asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index 4e55dc763021..fbf318b3af3e 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -189,7 +189,7 @@ __SYSCALL( 79, sys_fremovexattr, 2) /* File Map / Shared Memory Operations */ #define __NR_mmap2 80 -__SYSCALL( 80, xtensa_mmap2, 6) +__SYSCALL( 80, sys_mmap_pgoff, 6) #define __NR_munmap 81 __SYSCALL( 81, sys_munmap, 2) #define __NR_mprotect 82 diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index ac15ecbdf919..1e67bab775c1 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -57,31 +57,6 @@ asmlinkage long xtensa_pipe(int __user *userfds) return error; } - -asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EBADF; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) { unsigned long ret; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bc70c5810fec..939a61507ac5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -834,4 +834,8 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]); asmlinkage long sys_perf_event_open( struct perf_event_attr __user *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags); + +asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); #endif diff --git a/mm/util.c b/mm/util.c index 7c35ad95f927..3bf81b294ae8 100644 --- a/mm/util.c +++ b/mm/util.c @@ -4,6 +4,10 @@ #include #include #include +#include +#include +#include +#include #include #define CREATE_TRACE_POINTS @@ -268,6 +272,31 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start, } EXPORT_SYMBOL_GPL(get_user_pages_fast); +SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, pgoff) +{ + struct file * file = NULL; + unsigned long retval = -EBADF; + + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + + down_write(¤t->mm->mmap_sem); + retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); +out: + return retval; +} + /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); -- cgit v1.2.3 From 03889384cee7a198a79447c1ea6aca2c8e54d155 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 11 Dec 2009 09:48:22 -0500 Subject: tracing: Add trace_dump_stack() I've been asked a few times about how to find out what is calling some location in the kernel. One way is to use dynamic function tracing and implement the func_stack_trace. But this only finds out who is calling a particular function. It does not tell you who is calling that function and entering a specific if conditional. I have myself implemented a quick version of trace_dump_stack() for this purpose a few times, and just needed it now. This is when I realized that this would be a good tool to have in the kernel like trace_printk(). Using trace_dump_stack() is similar to dump_stack() except that it writes to the trace buffer instead and can be used in critical locations. For example: @@ -5485,8 +5485,12 @@ need_resched_nonpreemptible: if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) prev->state = TASK_RUNNING; - else + else { deactivate_task(rq, prev, 1); + trace_printk("Deactivating task %s:%d\n", + prev->comm, prev->pid); + trace_dump_stack(); + } switch_count = &prev->nvcsw; } Produces: <...>-3249 [001] 296.105269: schedule: Deactivating task ntpd:3249 <...>-3249 [001] 296.105270: => schedule => schedule_hrtimeout_range => poll_schedule_timeout => do_select => core_sys_select => sys_select => system_call_fastpath Signed-off-by: Steven Rostedt --- include/linux/kernel.h | 2 ++ kernel/trace/trace.c | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3fa4c590cf12..5ad4199fb073 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -492,6 +492,8 @@ extern int __trace_printk(unsigned long ip, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); +extern void trace_dump_stack(void); + /* * The double __builtin_constant_p is because gcc will give us an error * if we try to allocate the static variable to fmt if it is not a diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 88bd9ae2a9ed..f531301b7a3b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1151,6 +1151,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, __ftrace_trace_stack(tr->buffer, flags, skip, pc); } +/** + * trace_dump_stack - record a stack back trace in the trace buffer + */ +void trace_dump_stack(void) +{ + unsigned long flags; + + if (tracing_disabled || tracing_selftest_running) + return 0; + + local_save_flags(flags); + + /* skipping 3 traces, seems to get us at the caller of this function */ + __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); +} + void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { -- cgit v1.2.3 From 073120cc28ad9f6003452c8bb9d15a87b1820201 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Wed, 28 Oct 2009 19:51:17 +0100 Subject: Driver Core: devtmpfs: use sys_mount() Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- drivers/base/devtmpfs.c | 9 ++------- include/linux/device.h | 2 +- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 1cf498fd2b52..880a203b6688 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -332,9 +332,8 @@ out: * If configured, or requested by the commandline, devtmpfs will be * auto-mounted after the kernel mounted the root filesystem. */ -int devtmpfs_mount(const char *mountpoint) +int devtmpfs_mount(const char *mntdir) { - struct path path; int err; if (!dev_mount) @@ -343,15 +342,11 @@ int devtmpfs_mount(const char *mountpoint) if (!dev_mnt) return 0; - err = kern_path(mountpoint, LOOKUP_FOLLOW, &path); - if (err) - return err; - err = do_add_mount(dev_mnt, &path, 0, NULL); + err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); if (err) printk(KERN_INFO "devtmpfs: error mounting %i\n", err); else printk(KERN_INFO "devtmpfs: mounted\n"); - path_put(&path); return err; } diff --git a/include/linux/device.h b/include/linux/device.h index 2ea3e4921812..2a73d9bcbc9c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -558,7 +558,7 @@ extern void wait_for_device_probe(void); #ifdef CONFIG_DEVTMPFS extern int devtmpfs_create_node(struct device *dev); extern int devtmpfs_delete_node(struct device *dev); -extern int devtmpfs_mount(const char *mountpoint); +extern int devtmpfs_mount(const char *mntdir); #else static inline int devtmpfs_create_node(struct device *dev) { return 0; } static inline int devtmpfs_delete_node(struct device *dev) { return 0; } -- cgit v1.2.3 From 9ebfbd45f9d4ee9cd72529cf99e5f300eb398e67 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 29 Oct 2009 12:36:02 +0100 Subject: firmware_class: make request_firmware_nowait more useful Unfortunately, one cannot hold on to the struct firmware that request_firmware_nowait() hands off, which is needed in some cases. Allow this by requiring the callback to free it (via release_firmware). Additionally, give it a gfp_t parameter -- all the current users call it from a GFP_KERNEL context so the GFP_ATOMIC isn't necessary. This also marks an API break which is useful in a sense, although that is obviously not the primary purpose of this change. Signed-off-by: Johannes Berg Acked-by: Marcel Holtmann Cc: Ming Lei Cc: Catalin Marinas Cc: David Woodhouse Cc: Pavel Roskin Cc: Abhay Salunke Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_class.c | 14 ++++++-------- drivers/firmware/dell_rbu.c | 9 +++++++-- drivers/serial/ucc_uart.c | 8 +++++--- drivers/staging/comedi/drivers/usbdux.c | 5 ++++- drivers/staging/comedi/drivers/usbduxfast.c | 5 ++++- drivers/usb/atm/ueagle-atm.c | 7 ++++--- include/linux/firmware.h | 5 +++-- 7 files changed, 33 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 7376367bcb80..a95024166b66 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -601,12 +601,9 @@ request_firmware_work_func(void *arg) } ret = _request_firmware(&fw, fw_work->name, fw_work->device, fw_work->uevent); - if (ret < 0) - fw_work->cont(NULL, fw_work->context); - else { - fw_work->cont(fw, fw_work->context); - release_firmware(fw); - } + + fw_work->cont(fw, fw_work->context); + module_put(fw_work->module); kfree(fw_work); return ret; @@ -619,6 +616,7 @@ request_firmware_work_func(void *arg) * is non-zero else the firmware copy must be done manually. * @name: name of firmware file * @device: device for which firmware is being loaded + * @gfp: allocation flags * @context: will be passed over to @cont, and * @fw may be %NULL if firmware request fails. * @cont: function will be called asynchronously when the firmware @@ -631,12 +629,12 @@ request_firmware_work_func(void *arg) int request_firmware_nowait( struct module *module, int uevent, - const char *name, struct device *device, void *context, + const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { struct task_struct *task; struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), - GFP_ATOMIC); + gfp); if (!fw_work) return -ENOMEM; diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c index b4704e150b28..b3a0cf57442e 100644 --- a/drivers/firmware/dell_rbu.c +++ b/drivers/firmware/dell_rbu.c @@ -544,9 +544,12 @@ static void callbackfn_rbu(const struct firmware *fw, void *context) { rbu_data.entry_created = 0; - if (!fw || !fw->size) + if (!fw) return; + if (!fw->size) + goto out; + spin_lock(&rbu_data.lock); if (!strcmp(image_type, "mono")) { if (!img_update_realloc(fw->size)) @@ -568,6 +571,8 @@ static void callbackfn_rbu(const struct firmware *fw, void *context) } else pr_debug("invalid image type specified.\n"); spin_unlock(&rbu_data.lock); + out: + release_firmware(fw); } static ssize_t read_rbu_image_type(struct kobject *kobj, @@ -615,7 +620,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj, spin_unlock(&rbu_data.lock); req_firm_rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, "dell_rbu", - &rbu_device->dev, &context, + &rbu_device->dev, GFP_KERNEL, &context, callbackfn_rbu); if (req_firm_rc) { printk(KERN_ERR diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c index 46de564aaea0..465f2fae1025 100644 --- a/drivers/serial/ucc_uart.c +++ b/drivers/serial/ucc_uart.c @@ -1179,16 +1179,18 @@ static void uart_firmware_cont(const struct firmware *fw, void *context) if (firmware->header.length != fw->size) { dev_err(dev, "invalid firmware\n"); - return; + goto out; } ret = qe_upload_firmware(firmware); if (ret) { dev_err(dev, "could not load firmware\n"); - return; + goto out; } firmware_loaded = 1; + out: + release_firmware(fw); } static int ucc_uart_probe(struct of_device *ofdev, @@ -1247,7 +1249,7 @@ static int ucc_uart_probe(struct of_device *ofdev, */ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, filename, &ofdev->dev, - &ofdev->dev, uart_firmware_cont); + GFP_KERNEL, &ofdev->dev, uart_firmware_cont); if (ret) { dev_err(&ofdev->dev, "could not load firmware %s\n", diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index cca4e869f0ec..dfcd12bec86b 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -2327,9 +2327,11 @@ static void usbdux_firmware_request_complete_handler(const struct firmware *fw, if (ret) { dev_err(&usbdev->dev, "Could not upload firmware (err=%d)\n", ret); - return; + goto out; } comedi_usb_auto_config(usbdev, BOARDNAME); + out: + release_firmware(fw); } /* allocate memory for the urbs and initialise them */ @@ -2580,6 +2582,7 @@ static int usbduxsub_probe(struct usb_interface *uinterf, FW_ACTION_HOTPLUG, "usbdux_firmware.bin", &udev->dev, + GFP_KERNEL, usbduxsub + index, usbdux_firmware_request_complete_handler); diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index d143222579c2..2e675cce7dbf 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -1451,10 +1451,12 @@ static void usbduxfast_firmware_request_complete_handler(const struct firmware if (ret) { dev_err(&usbdev->dev, "Could not upload firmware (err=%d)\n", ret); - return; + goto out; } comedi_usb_auto_config(usbdev, BOARDNAME); + out: + release_firmware(fw); } /* @@ -1569,6 +1571,7 @@ static int usbduxfastsub_probe(struct usb_interface *uinterf, FW_ACTION_HOTPLUG, "usbduxfast_firmware.bin", &udev->dev, + GFP_KERNEL, usbduxfastsub + index, usbduxfast_firmware_request_complete_handler); diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index bba4d3eabe0f..c5395246886d 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -667,12 +667,12 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte else uea_info(usb, "firmware uploaded\n"); - uea_leaves(usb); - return; + goto err; err_fw_corrupted: uea_err(usb, "firmware is corrupted\n"); err: + release_firmware(fw_entry); uea_leaves(usb); } @@ -705,7 +705,8 @@ static int uea_load_firmware(struct usb_device *usb, unsigned int ver) break; } - ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev, usb, uea_upload_pre_firmware); + ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev, + GFP_KERNEL, usb, uea_upload_pre_firmware); if (ret) uea_err(usb, "firmware %s is not available\n", fw_name); else diff --git a/include/linux/firmware.h b/include/linux/firmware.h index d31544628436..043811f0d277 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -4,6 +4,7 @@ #include #include #include +#include #define FW_ACTION_NOHOTPLUG 0 #define FW_ACTION_HOTPLUG 1 @@ -38,7 +39,7 @@ int request_firmware(const struct firmware **fw, const char *name, struct device *device); int request_firmware_nowait( struct module *module, int uevent, - const char *name, struct device *device, void *context, + const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)); void release_firmware(const struct firmware *fw); @@ -51,7 +52,7 @@ static inline int request_firmware(const struct firmware **fw, } static inline int request_firmware_nowait( struct module *module, int uevent, - const char *name, struct device *device, void *context, + const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { return -EINVAL; -- cgit v1.2.3 From 832b6af198aefe6034310e124594cc8b833c0ef9 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 20 Nov 2009 16:08:56 -0800 Subject: sysfs: Propagate renames to the vfs on demand By teaching sysfs_revalidate to hide a dentry for a sysfs_dirent if the sysfs_dirent has been renamed, and by teaching sysfs_lookup to return the original dentry if the sysfs dirent has been renamed. I can show the results of renames correctly without having to update the dcache during the directory rename. This massively simplifies the rename logic allowing a lot of weird sysfs special cases to be removed along with a lot of now unnecesary helper code. Acked-by: Tejun Heo Signed-off-by: Eric W. Biederman Signed-off-by: Greg Kroah-Hartman --- fs/namei.c | 22 ------- fs/sysfs/dir.c | 158 ++++++++++---------------------------------------- fs/sysfs/inode.c | 12 ---- fs/sysfs/sysfs.h | 1 - include/linux/namei.h | 1 - 5 files changed, 32 insertions(+), 162 deletions(-) (limited to 'include') diff --git a/fs/namei.c b/fs/namei.c index d11f404667e9..d3c190c35fcc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1279,28 +1279,6 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) return __lookup_hash(&this, base, NULL); } -/** - * lookup_one_noperm - bad hack for sysfs - * @name: pathname component to lookup - * @base: base directory to lookup from - * - * This is a variant of lookup_one_len that doesn't perform any permission - * checks. It's a horrible hack to work around the braindead sysfs - * architecture and should not be used anywhere else. - * - * DON'T USE THIS FUNCTION EVER, thanks. - */ -struct dentry *lookup_one_noperm(const char *name, struct dentry *base) -{ - int err; - struct qstr this; - - err = __lookup_one_len(name, &this, base, strlen(name)); - if (err) - return ERR_PTR(err); - return __lookup_hash(&this, base, NULL); -} - int user_path_at(int dfd, const char __user *name, unsigned flags, struct path *path) { diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index f3af45e47eaa..97954c69ff0e 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -25,7 +25,6 @@ #include "sysfs.h" DEFINE_MUTEX(sysfs_mutex); -DEFINE_MUTEX(sysfs_rename_mutex); DEFINE_SPINLOCK(sysfs_assoc_lock); static DEFINE_SPINLOCK(sysfs_ino_lock); @@ -84,46 +83,6 @@ static void sysfs_unlink_sibling(struct sysfs_dirent *sd) } } -/** - * sysfs_get_dentry - get dentry for the given sysfs_dirent - * @sd: sysfs_dirent of interest - * - * Get dentry for @sd. Dentry is looked up if currently not - * present. This function descends from the root looking up - * dentry for each step. - * - * LOCKING: - * mutex_lock(sysfs_rename_mutex) - * - * RETURNS: - * Pointer to found dentry on success, ERR_PTR() value on error. - */ -struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd) -{ - struct dentry *dentry = dget(sysfs_sb->s_root); - - while (dentry->d_fsdata != sd) { - struct sysfs_dirent *cur; - struct dentry *parent; - - /* find the first ancestor which hasn't been looked up */ - cur = sd; - while (cur->s_parent != dentry->d_fsdata) - cur = cur->s_parent; - - /* look it up */ - parent = dentry; - mutex_lock(&parent->d_inode->i_mutex); - dentry = lookup_one_noperm(cur->s_name, parent); - mutex_unlock(&parent->d_inode->i_mutex); - dput(parent); - - if (IS_ERR(dentry)) - break; - } - return dentry; -} - /** * sysfs_get_active - get an active reference to sysfs_dirent * @sd: sysfs_dirent to get an active reference to @@ -315,6 +274,14 @@ static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd) if (sd->s_flags & SYSFS_FLAG_REMOVED) goto out_bad; + /* The sysfs dirent has been moved? */ + if (dentry->d_parent->d_fsdata != sd->s_parent) + goto out_bad; + + /* The sysfs dirent has been renamed */ + if (strcmp(dentry->d_name.name, sd->s_name) != 0) + goto out_bad; + mutex_unlock(&sysfs_mutex); out_valid: return 1; @@ -322,6 +289,12 @@ out_bad: /* Remove the dentry from the dcache hashes. * If this is a deleted dentry we use d_drop instead of d_delete * so sysfs doesn't need to cope with negative dentries. + * + * If this is a dentry that has simply been renamed we + * use d_drop to remove it from the dcache lookup on its + * old parent. If this dentry persists later when a lookup + * is performed at its new name the dentry will be readded + * to the dcache hashes. */ is_dir = (sysfs_type(sd) == SYSFS_DIR); mutex_unlock(&sysfs_mutex); @@ -705,10 +678,15 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry, } /* instantiate and hash dentry */ - dentry->d_op = &sysfs_dentry_ops; - dentry->d_fsdata = sysfs_get(sd); - d_instantiate(dentry, inode); - d_rehash(dentry); + ret = d_find_alias(inode); + if (!ret) { + dentry->d_op = &sysfs_dentry_ops; + dentry->d_fsdata = sysfs_get(sd); + d_add(dentry, inode); + } else { + d_move(ret, dentry); + iput(inode); + } out_unlock: mutex_unlock(&sysfs_mutex); @@ -785,62 +763,32 @@ void sysfs_remove_dir(struct kobject * kobj) int sysfs_rename_dir(struct kobject * kobj, const char *new_name) { struct sysfs_dirent *sd = kobj->sd; - struct dentry *parent = NULL; - struct dentry *old_dentry = NULL, *new_dentry = NULL; const char *dup_name = NULL; int error; - mutex_lock(&sysfs_rename_mutex); + mutex_lock(&sysfs_mutex); error = 0; if (strcmp(sd->s_name, new_name) == 0) goto out; /* nothing to rename */ - /* get the original dentry */ - old_dentry = sysfs_get_dentry(sd); - if (IS_ERR(old_dentry)) { - error = PTR_ERR(old_dentry); - old_dentry = NULL; - goto out; - } - - parent = old_dentry->d_parent; - - /* lock parent and get dentry for new name */ - mutex_lock(&parent->d_inode->i_mutex); - mutex_lock(&sysfs_mutex); - error = -EEXIST; if (sysfs_find_dirent(sd->s_parent, new_name)) - goto out_unlock; - - error = -ENOMEM; - new_dentry = d_alloc_name(parent, new_name); - if (!new_dentry) - goto out_unlock; + goto out; /* rename sysfs_dirent */ error = -ENOMEM; new_name = dup_name = kstrdup(new_name, GFP_KERNEL); if (!new_name) - goto out_unlock; + goto out; dup_name = sd->s_name; sd->s_name = new_name; - /* rename */ - d_add(new_dentry, NULL); - d_move(old_dentry, new_dentry); - error = 0; - out_unlock: + out: mutex_unlock(&sysfs_mutex); - mutex_unlock(&parent->d_inode->i_mutex); kfree(dup_name); - dput(old_dentry); - dput(new_dentry); - out: - mutex_unlock(&sysfs_rename_mutex); return error; } @@ -848,12 +796,11 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj) { struct sysfs_dirent *sd = kobj->sd; struct sysfs_dirent *new_parent_sd; - struct dentry *old_parent, *new_parent = NULL; - struct dentry *old_dentry = NULL, *new_dentry = NULL; int error; - mutex_lock(&sysfs_rename_mutex); BUG_ON(!sd->s_parent); + + mutex_lock(&sysfs_mutex); new_parent_sd = (new_parent_kobj && new_parent_kobj->sd) ? new_parent_kobj->sd : &sysfs_root; @@ -861,61 +808,20 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj) if (sd->s_parent == new_parent_sd) goto out; /* nothing to move */ - /* get dentries */ - old_dentry = sysfs_get_dentry(sd); - if (IS_ERR(old_dentry)) { - error = PTR_ERR(old_dentry); - old_dentry = NULL; - goto out; - } - old_parent = old_dentry->d_parent; - - new_parent = sysfs_get_dentry(new_parent_sd); - if (IS_ERR(new_parent)) { - error = PTR_ERR(new_parent); - new_parent = NULL; - goto out; - } - -again: - mutex_lock(&old_parent->d_inode->i_mutex); - if (!mutex_trylock(&new_parent->d_inode->i_mutex)) { - mutex_unlock(&old_parent->d_inode->i_mutex); - goto again; - } - mutex_lock(&sysfs_mutex); - error = -EEXIST; if (sysfs_find_dirent(new_parent_sd, sd->s_name)) - goto out_unlock; - - error = -ENOMEM; - new_dentry = d_alloc_name(new_parent, sd->s_name); - if (!new_dentry) - goto out_unlock; - - error = 0; - d_add(new_dentry, NULL); - d_move(old_dentry, new_dentry); + goto out; /* Remove from old parent's list and insert into new parent's list. */ sysfs_unlink_sibling(sd); sysfs_get(new_parent_sd); - drop_nlink(old_parent->d_inode); sysfs_put(sd->s_parent); sd->s_parent = new_parent_sd; - inc_nlink(new_parent->d_inode); sysfs_link_sibling(sd); - out_unlock: + error = 0; +out: mutex_unlock(&sysfs_mutex); - mutex_unlock(&new_parent->d_inode->i_mutex); - mutex_unlock(&old_parent->d_inode->i_mutex); - out: - dput(new_parent); - dput(old_dentry); - dput(new_dentry); - mutex_unlock(&sysfs_rename_mutex); return error; } diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 1ffd5559926f..9f783d4e4b51 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -205,17 +205,6 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr) inode->i_ctime = iattr->ia_ctime; } - -/* - * sysfs has a different i_mutex lock order behavior for i_mutex than other - * filesystems; sysfs i_mutex is called in many places with subsystem locks - * held. At the same time, many of the VFS locking rules do not apply to - * sysfs at all (cross directory rename for example). To untangle this mess - * (which gives false positives in lockdep), we're giving sysfs inodes their - * own class for i_mutex. - */ -static struct lock_class_key sysfs_inode_imutex_key; - static int sysfs_count_nlink(struct sysfs_dirent *sd) { struct sysfs_dirent *child; @@ -268,7 +257,6 @@ static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode) inode->i_mapping->a_ops = &sysfs_aops; inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info; inode->i_op = &sysfs_inode_operations; - lockdep_set_class(&inode->i_mutex, &sysfs_inode_imutex_key); set_default_inode_attr(inode, sd->s_mode); sysfs_refresh_inode(sd, inode); diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 90b35012abb2..98a15bf1efe1 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -103,7 +103,6 @@ extern struct kmem_cache *sysfs_dir_cachep; * dir.c */ extern struct mutex sysfs_mutex; -extern struct mutex sysfs_rename_mutex; extern spinlock_t sysfs_assoc_lock; extern const struct file_operations sysfs_dir_operations; diff --git a/include/linux/namei.h b/include/linux/namei.h index ec0f607b364a..028946750289 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -76,7 +76,6 @@ extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); extern void release_open_intent(struct nameidata *); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); -extern struct dentry *lookup_one_noperm(const char *, struct dentry *); extern int follow_down(struct path *); extern int follow_up(struct path *); -- cgit v1.2.3 From c60e0504c8e4fa14179d0687d80ef25148dd6dd4 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 27 Nov 2009 17:38:51 +0900 Subject: Driver Core: Early platform driver buffer Add early_platform_init_buffer() support and update the early platform driver code to allow passing parameters to the driver on the kernel command line. early_platform_init_buffer() simply allows early platform drivers to provide a pointer and length to a memory area where the remaining part of the kernel command line option will be stored. Needed to pass baud rate and other serial port options to the reworked early serial console code on SuperH. Signed-off-by: Magnus Damm Signed-off-by: Greg Kroah-Hartman --- drivers/base/platform.c | 29 ++++++++++++++++++++++------- include/linux/platform_device.h | 20 +++++++++++++++----- 2 files changed, 37 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4fa954b07ac4..9d2ee25deaf5 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1000,7 +1000,7 @@ static __initdata LIST_HEAD(early_platform_device_list); int __init early_platform_driver_register(struct early_platform_driver *epdrv, char *buf) { - unsigned long index; + char *tmp; int n; /* Simply add the driver to the end of the global list. @@ -1019,13 +1019,28 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv, if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { list_move(&epdrv->list, &early_platform_driver_list); - if (!strcmp(buf, epdrv->pdrv->driver.name)) + /* Allow passing parameters after device name */ + if (buf[n] == '\0' || buf[n] == ',') epdrv->requested_id = -1; - else if (buf[n] == '.' && strict_strtoul(&buf[n + 1], 10, - &index) == 0) - epdrv->requested_id = index; - else - epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; + else { + epdrv->requested_id = simple_strtoul(&buf[n + 1], + &tmp, 10); + + if (buf[n] != '.' || (tmp == &buf[n + 1])) { + epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; + n = 0; + } else + n += strcspn(&buf[n + 1], ",") + 1; + } + + if (buf[n] == ',') + n++; + + if (epdrv->bufsize) { + memcpy(epdrv->buffer, &buf[n], + min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); + epdrv->buffer[epdrv->bufsize - 1] = '\0'; + } } return 0; diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 3c6675c2444b..71ff887ca44e 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -83,6 +83,8 @@ struct early_platform_driver { struct platform_driver *pdrv; struct list_head list; int requested_id; + char *buffer; + int bufsize; }; #define EARLY_PLATFORM_ID_UNSET -2 @@ -102,21 +104,29 @@ extern int early_platform_driver_probe(char *class_str, int nr_probe, int user_only); extern void early_platform_cleanup(void); +#define early_platform_init(class_string, platdrv) \ + early_platform_init_buffer(class_string, platdrv, NULL, 0) #ifndef MODULE -#define early_platform_init(class_string, platform_driver) \ +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ static __initdata struct early_platform_driver early_driver = { \ .class_str = class_string, \ - .pdrv = platform_driver, \ + .buffer = buf, \ + .bufsize = bufsiz, \ + .pdrv = platdrv, \ .requested_id = EARLY_PLATFORM_ID_UNSET, \ }; \ -static int __init early_platform_driver_setup_func(char *buf) \ +static int __init early_platform_driver_setup_func(char *buffer) \ { \ - return early_platform_driver_register(&early_driver, buf); \ + return early_platform_driver_register(&early_driver, buffer); \ } \ early_param(class_string, early_platform_driver_setup_func) #else /* MODULE */ -#define early_platform_init(class_string, platform_driver) +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ +static inline char *early_platform_driver_setup_func(void) \ +{ \ + return bufsiz ? buf : NULL; \ +} #endif /* MODULE */ #endif /* _PLATFORM_DEVICE_H_ */ -- cgit v1.2.3 From 4c1bd3d7a7d114dabd58f62f386ac4bfd268be1f Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Mon, 24 Aug 2009 14:44:30 +0100 Subject: USB: make urb scatter-gather support more generic The WHCI HCD will also support urbs with scatter-gather lists. Add a usb_bus field to indicated how many sg list elements are supported by the HCD. Use this to decide whether to pass the scatter-list to the HCD or not. Make the usb-storage driver use this new field. Signed-off-by: David Vrabel Cc: Alan Stern Cc: Sarah Sharp Cc: Matthew Dharm Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/message.c | 8 +------- drivers/usb/host/xhci-pci.c | 2 ++ drivers/usb/storage/usb.c | 10 ++++++++++ include/linux/usb.h | 1 + 4 files changed, 14 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index e80f1af438c8..8d874cad6581 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -393,13 +393,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, if (io->entries <= 0) return io->entries; - /* If we're running on an xHCI host controller, queue the whole scatter - * gather list with one call to urb_enqueue(). This is only for bulk, - * as that endpoint type does not care how the data gets broken up - * across frames. - */ - if (usb_pipebulk(pipe) && - bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) { + if (dev->bus->sg_tablesize > 0) { io->urbs = kmalloc(sizeof *io->urbs, mem_flags); use_sg = true; } else { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 06595ec27bb7..e097008d6fb1 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -54,6 +54,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; + hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1; + xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 01e43a13a6bf..1599d86154c4 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -843,6 +843,15 @@ static int usb_stor_scan_thread(void * __us) complete_and_exit(&us->scanning_done, 0); } +static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf) +{ + struct usb_device *usb_dev = interface_to_usbdev(intf); + + if (usb_dev->bus->sg_tablesize) { + return usb_dev->bus->sg_tablesize; + } + return SG_ALL; +} /* First part of general USB mass-storage probing */ int usb_stor_probe1(struct us_data **pus, @@ -871,6 +880,7 @@ int usb_stor_probe1(struct us_data **pus, * Allow 16-byte CDBs and thus > 2TB */ host->max_cmd_len = 16; + host->sg_tablesize = usb_stor_sg_tablesize(intf); *pus = us = host_to_us(host); memset(us, 0, sizeof(struct us_data)); mutex_init(&(us->dev_mutex)); diff --git a/include/linux/usb.h b/include/linux/usb.h index a34fa89f1474..6e91ee4f5b81 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -331,6 +331,7 @@ struct usb_bus { u8 otg_port; /* 0, or number of OTG/HNP port */ unsigned is_b_host:1; /* true during some HNP roleswitches */ unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */ + unsigned sg_tablesize; /* 0 or largest number of sg list entries */ int devnum_next; /* Next open device number in * round-robin allocation */ -- cgit v1.2.3 From 5242658d1b97771d658991cf29be32bcf81d5859 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 21 Oct 2009 00:03:38 +0200 Subject: USB gadget: Handle endpoint requests at the function level Control requests targeted at an endpoint (that is sent to EP0 but specifying the target endpoint address in wIndex) are dispatched to the current configuration's setup callback, requiring all gadget drivers to dispatch the requests to the correct function driver. To avoid this, record which endpoints are used by each function in the composite driver SET CONFIGURATION handler and dispatch requests targeted at endpoints to the correct function. Signed-off-by: Laurent Pinchart Cc: David Brownell Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/composite.c | 54 +++++++++++++++++++++++++++++++++++------- include/linux/usb/composite.h | 1 + 2 files changed, 47 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d05397ec8a18..8498f1a114d5 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -373,6 +373,8 @@ static void reset_config(struct usb_composite_dev *cdev) list_for_each_entry(f, &cdev->config->functions, list) { if (f->disable) f->disable(f); + + bitmap_zero(f->endpoints, 32); } cdev->config = NULL; } @@ -418,10 +420,35 @@ static int set_config(struct usb_composite_dev *cdev, /* Initialize all interfaces by setting them to altsetting zero. */ for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) { struct usb_function *f = c->interface[tmp]; + struct usb_descriptor_header **descriptors; if (!f) break; + /* + * Record which endpoints are used by the function. This is used + * to dispatch control requests targeted at that endpoint to the + * function's setup callback instead of the current + * configuration's setup callback. + */ + if (gadget->speed == USB_SPEED_HIGH) + descriptors = f->hs_descriptors; + else + descriptors = f->descriptors; + + for (; *descriptors; ++descriptors) { + struct usb_endpoint_descriptor *ep; + int addr; + + if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT) + continue; + + ep = (struct usb_endpoint_descriptor *)*descriptors; + addr = ((ep->bEndpointAddress & 0x80) >> 3) + | (ep->bEndpointAddress & 0x0f); + set_bit(addr, f->endpoints); + } + result = f->set_alt(f, tmp, 0); if (result < 0) { DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", @@ -688,6 +715,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct usb_function *f = NULL; + u8 endp; /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion @@ -800,23 +828,33 @@ unknown: ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); - /* functions always handle their interfaces ... punt other - * recipients (endpoint, other, WUSB, ...) to the current + /* functions always handle their interfaces and endpoints... + * punt other recipients (other, WUSB, ...) to the current * configuration code. * * REVISIT it could make sense to let the composite device * take such requests too, if that's ever needed: to work * in config 0, etc. */ - if ((ctrl->bRequestType & USB_RECIP_MASK) - == USB_RECIP_INTERFACE) { + switch (ctrl->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_INTERFACE: f = cdev->config->interface[intf]; - if (f && f->setup) - value = f->setup(f, ctrl); - else + break; + + case USB_RECIP_ENDPOINT: + endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); + list_for_each_entry(f, &cdev->config->functions, list) { + if (test_bit(endp, f->endpoints)) + break; + } + if (&f->list == &cdev->config->functions) f = NULL; + break; } - if (value < 0 && !f) { + + if (f && f->setup) + value = f->setup(f, ctrl); + else { struct usb_configuration *c; c = cdev->config; diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 4f6bb3d2160e..738ea1a691cb 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -127,6 +127,7 @@ struct usb_function { /* private: */ /* internals */ struct list_head list; + DECLARE_BITMAP(endpoints, 32); }; int usb_add_function(struct usb_configuration *, struct usb_function *); -- cgit v1.2.3 From 91c8a5a9985d5bf9c55f6f82f183f57b050b2a3a Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 15 Oct 2009 17:09:34 +0300 Subject: USB OTG: add support for ulpi connected external transceivers This adds support for OTG transceivers directly connected to the ULPI interface. In particular, the following details are added - a struct for low level io functions (read/write) - a priv field to be used as 'viewport' by low level access functions - an (*init) and (*shutdown) callbacks, along with static inline helpers - a (*set_vbus) callback to switch the port power on and off - a flags field for per-transceiver settings - some defines for the flags bitmask to configure platform specific details Signed-off-by: Daniel Mack Cc: Heikki Krogerus Cc: David Brownell Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/otg.h | 68 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 2443c0e7a80c..52bb917641f0 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -33,6 +33,23 @@ enum usb_otg_state { OTG_STATE_A_VBUS_ERR, }; +#define USB_OTG_PULLUP_ID (1 << 0) +#define USB_OTG_PULLDOWN_DP (1 << 1) +#define USB_OTG_PULLDOWN_DM (1 << 2) +#define USB_OTG_EXT_VBUS_INDICATOR (1 << 3) +#define USB_OTG_DRV_VBUS (1 << 4) +#define USB_OTG_DRV_VBUS_EXT (1 << 5) + +struct otg_transceiver; + +/* for transceivers connected thru an ULPI interface, the user must + * provide access ops + */ +struct otg_io_access_ops { + int (*read)(struct otg_transceiver *otg, u32 reg); + int (*write)(struct otg_transceiver *otg, u32 val, u32 reg); +}; + /* * the otg driver needs to interact with both device side and host side * usb controllers. it decides which controller is active at a given @@ -42,6 +59,7 @@ enum usb_otg_state { struct otg_transceiver { struct device *dev; const char *label; + unsigned int flags; u8 default_a; enum usb_otg_state state; @@ -49,10 +67,17 @@ struct otg_transceiver { struct usb_bus *host; struct usb_gadget *gadget; + struct otg_io_access_ops *io_ops; + void __iomem *io_priv; + /* to pass extra port status to the root hub */ u16 port_status; u16 port_change; + /* initialize/shutdown the OTG controller */ + int (*init)(struct otg_transceiver *otg); + void (*shutdown)(struct otg_transceiver *otg); + /* bind/unbind the host controller */ int (*set_host)(struct otg_transceiver *otg, struct usb_bus *host); @@ -65,6 +90,10 @@ struct otg_transceiver { int (*set_power)(struct otg_transceiver *otg, unsigned mA); + /* effective for A-peripheral, ignored for B devices */ + int (*set_vbus)(struct otg_transceiver *otg, + bool enabled); + /* for non-OTG B devices: set transceiver into suspend mode */ int (*set_suspend)(struct otg_transceiver *otg, int suspend); @@ -85,6 +114,38 @@ extern int otg_set_transceiver(struct otg_transceiver *); extern void usb_nop_xceiv_register(void); extern void usb_nop_xceiv_unregister(void); +/* helpers for direct access thru low-level io interface */ +static inline int otg_io_read(struct otg_transceiver *otg, u32 reg) +{ + if (otg->io_ops && otg->io_ops->read) + return otg->io_ops->read(otg, reg); + + return -EINVAL; +} + +static inline int otg_io_write(struct otg_transceiver *otg, u32 reg, u32 val) +{ + if (otg->io_ops && otg->io_ops->write) + return otg->io_ops->write(otg, reg, val); + + return -EINVAL; +} + +static inline int +otg_init(struct otg_transceiver *otg) +{ + if (otg->init) + return otg->init(otg); + + return 0; +} + +static inline void +otg_shutdown(struct otg_transceiver *otg) +{ + if (otg->shutdown) + otg->shutdown(otg); +} /* for usb host and peripheral controller drivers */ extern struct otg_transceiver *otg_get_transceiver(void); @@ -97,6 +158,12 @@ otg_start_hnp(struct otg_transceiver *otg) return otg->start_hnp(otg); } +/* Context: can sleep */ +static inline int +otg_set_vbus(struct otg_transceiver *otg, bool enabled) +{ + return otg->set_vbus(otg, enabled); +} /* for HCDs */ static inline int @@ -105,7 +172,6 @@ otg_set_host(struct otg_transceiver *otg, struct usb_bus *host) return otg->set_host(otg, host); } - /* for usb peripheral controller drivers */ /* Context: can sleep */ -- cgit v1.2.3 From 2d57a95f09cf71c4c642e5be15f8b700d17ee90c Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 15 Oct 2009 17:09:35 +0300 Subject: USB OTG: Add generic driver for ULPI OTG transceiver This adds a minimal generic driver for ULPI connected transceivers, using the OTG framework functions recently introduced. The driver got a table to match the ULPI chips, which currently only has one entry for NXP's ISP 1504 transceiver. Signed-off-by: Daniel Mack Cc: Heikki Krogerus Cc: David Brownell Cc: Sascha Hauer Signed-off-by: Greg Kroah-Hartman --- drivers/usb/Makefile | 2 + drivers/usb/otg/Kconfig | 9 ++++ drivers/usb/otg/Makefile | 1 + drivers/usb/otg/ulpi.c | 136 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/usb/ulpi.h | 7 +++ 5 files changed, 155 insertions(+) create mode 100644 drivers/usb/otg/ulpi.c create mode 100644 include/linux/usb/ulpi.h (limited to 'include') diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index be3c9b80bc9f..473aa1a20de9 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -44,3 +44,5 @@ obj-y += early/ obj-$(CONFIG_USB_ATM) += atm/ obj-$(CONFIG_USB_SPEEDTOUCH) += atm/ + +obj-$(CONFIG_USB_ULPI) += otg/ diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index aa884d072f0b..de56b3d743d7 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig @@ -41,6 +41,15 @@ config ISP1301_OMAP This driver can also be built as a module. If so, the module will be called isp1301_omap. +config USB_ULPI + bool "Generic ULPI Transceiver Driver" + depends on ARM + help + Enable this to support ULPI connected USB OTG transceivers which + are likely found on embedded boards. + + The only chip currently supported is NXP's ISP1504 + config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" depends on TWL4030_CORE && REGULATOR_TWL4030 diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile index 208167856529..aeb49a8ec412 100644 --- a/drivers/usb/otg/Makefile +++ b/drivers/usb/otg/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o +obj-$(CONFIG_USB_ULPI) += ulpi.o ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c new file mode 100644 index 000000000000..896527456b7e --- /dev/null +++ b/drivers/usb/otg/ulpi.c @@ -0,0 +1,136 @@ +/* + * Generic ULPI USB transceiver support + * + * Copyright (C) 2009 Daniel Mack + * + * Based on sources from + * + * Sascha Hauer + * Freescale Semiconductors + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include + +/* ULPI register addresses */ +#define ULPI_VID_LOW 0x00 /* Vendor ID low */ +#define ULPI_VID_HIGH 0x01 /* Vendor ID high */ +#define ULPI_PID_LOW 0x02 /* Product ID low */ +#define ULPI_PID_HIGH 0x03 /* Product ID high */ +#define ULPI_ITFCTL 0x07 /* Interface Control */ +#define ULPI_OTGCTL 0x0A /* OTG Control */ + +/* add to above register address to access Set/Clear functions */ +#define ULPI_REG_SET 0x01 +#define ULPI_REG_CLEAR 0x02 + +/* ULPI OTG Control Register bits */ +#define ID_PULL_UP (1 << 0) /* enable ID Pull Up */ +#define DP_PULL_DOWN (1 << 1) /* enable DP Pull Down */ +#define DM_PULL_DOWN (1 << 2) /* enable DM Pull Down */ +#define DISCHRG_VBUS (1 << 3) /* Discharge Vbus */ +#define CHRG_VBUS (1 << 4) /* Charge Vbus */ +#define DRV_VBUS (1 << 5) /* Drive Vbus */ +#define DRV_VBUS_EXT (1 << 6) /* Drive Vbus external */ +#define USE_EXT_VBUS_IND (1 << 7) /* Use ext. Vbus indicator */ + +#define ULPI_ID(vendor, product) (((vendor) << 16) | (product)) + +#define TR_FLAG(flags, a, b) (((flags) & a) ? b : 0) + +/* ULPI hardcoded IDs, used for probing */ +static unsigned int ulpi_ids[] = { + ULPI_ID(0x04cc, 0x1504), /* NXP ISP1504 */ +}; + +static int ulpi_set_flags(struct otg_transceiver *otg) +{ + unsigned int flags = 0; + + if (otg->flags & USB_OTG_PULLUP_ID) + flags |= ID_PULL_UP; + + if (otg->flags & USB_OTG_PULLDOWN_DM) + flags |= DM_PULL_DOWN; + + if (otg->flags & USB_OTG_PULLDOWN_DP) + flags |= DP_PULL_DOWN; + + if (otg->flags & USB_OTG_EXT_VBUS_INDICATOR) + flags |= USE_EXT_VBUS_IND; + + return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET); +} + +static int ulpi_init(struct otg_transceiver *otg) +{ + int i, vid, pid; + + vid = (otg_io_read(otg, ULPI_VID_HIGH) << 8) | + otg_io_read(otg, ULPI_VID_LOW); + pid = (otg_io_read(otg, ULPI_PID_HIGH) << 8) | + otg_io_read(otg, ULPI_PID_LOW); + + pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid); + + for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++) + if (ulpi_ids[i] == ULPI_ID(vid, pid)) + return ulpi_set_flags(otg); + + pr_err("ULPI ID does not match any known transceiver.\n"); + return -ENODEV; +} + +static int ulpi_set_vbus(struct otg_transceiver *otg, bool on) +{ + unsigned int flags = otg_io_read(otg, ULPI_OTGCTL); + + flags &= ~(DRV_VBUS | DRV_VBUS_EXT); + + if (on) { + if (otg->flags & USB_OTG_DRV_VBUS) + flags |= DRV_VBUS; + + if (otg->flags & USB_OTG_DRV_VBUS_EXT) + flags |= DRV_VBUS_EXT; + } + + return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET); +} + +struct otg_transceiver * +otg_ulpi_create(struct otg_io_access_ops *ops, + unsigned int flags) +{ + struct otg_transceiver *otg; + + otg = kzalloc(sizeof(*otg), GFP_KERNEL); + if (!otg) + return NULL; + + otg->label = "ULPI"; + otg->flags = flags; + otg->io_ops = ops; + otg->init = ulpi_init; + otg->set_vbus = ulpi_set_vbus; + + return otg; +} +EXPORT_SYMBOL_GPL(otg_ulpi_create); + diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h new file mode 100644 index 000000000000..20675c6ebc4d --- /dev/null +++ b/include/linux/usb/ulpi.h @@ -0,0 +1,7 @@ +#ifndef __LINUX_USB_ULPI_H +#define __LINUX_USB_ULPI_H + +struct otg_transceiver *otg_ulpi_create(struct otg_io_access_ops *ops, + unsigned int flags); + +#endif /* __LINUX_USB_ULPI_H */ -- cgit v1.2.3 From fb34d53752d5bec5acc73422e462a9c68aeeaa2a Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Fri, 13 Nov 2009 11:53:59 -0500 Subject: USB: remove the auto_pm flag This patch (as1302) removes the auto_pm flag from struct usb_device. The flag's only purpose was to distinguish between autosuspends and external suspends, but that information is now available in the pm_message_t argument passed to suspend methods. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- Documentation/usb/power-management.txt | 9 +++++---- drivers/bluetooth/btusb.c | 2 +- drivers/hid/usbhid/hid-core.c | 8 ++++---- drivers/net/wimax/i2400m/usb.c | 7 ++----- drivers/usb/core/driver.c | 4 ---- drivers/usb/serial/option.c | 2 +- drivers/usb/serial/sierra.c | 2 +- include/linux/usb.h | 2 -- 8 files changed, 14 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt index ad642615ad4c..8817368203d6 100644 --- a/Documentation/usb/power-management.txt +++ b/Documentation/usb/power-management.txt @@ -423,15 +423,16 @@ an URB had completed too recently. External suspend calls should never be allowed to fail in this way, only autosuspend calls. The driver can tell them apart by checking -udev->auto_pm; this flag will be set to 1 for internal PM events -(autosuspend or autoresume) and 0 for external PM events. +the PM_EVENT_AUTO bit in the message.event argument to the suspend +method; this bit will be set for internal PM events (autosuspend) and +clear for external PM events. Many of the ingredients in the autosuspend framework are oriented towards interfaces: The usb_interface structure contains the pm_usage_cnt field, and the usb_autopm_* routines take an interface pointer as their argument. But somewhat confusingly, a few of the -pieces (usb_mark_last_busy() and udev->auto_pm) use the usb_device -structure instead. Drivers need to keep this straight; they can call +pieces (i.e., usb_mark_last_busy()) use the usb_device structure +instead. Drivers need to keep this straight; they can call interface_to_usbdev() to find the device structure for a given interface. diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 44bc8bbabf54..4d2905996751 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1066,7 +1066,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) return 0; spin_lock_irq(&data->txlock); - if (!(interface_to_usbdev(intf)->auto_pm && data->tx_in_flight)) { + if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) { set_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); } else { diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 0258289f3b3e..e2997a8d5e1b 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -1253,10 +1253,9 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; - struct usb_device *udev = interface_to_usbdev(intf); int status; - if (udev->auto_pm) { + if (message.event & PM_EVENT_AUTO) { spin_lock_irq(&usbhid->lock); /* Sync with error handler */ if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) @@ -1281,7 +1280,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) return -EIO; } - if (!ignoreled && udev->auto_pm) { + if (!ignoreled && (message.event & PM_EVENT_AUTO)) { spin_lock_irq(&usbhid->lock); if (test_bit(HID_LED_ON, &usbhid->iofl)) { spin_unlock_irq(&usbhid->lock); @@ -1294,7 +1293,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) hid_cancel_delayed_stuff(usbhid); hid_cease_io(usbhid); - if (udev->auto_pm && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { + if ((message.event & PM_EVENT_AUTO) && + test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { /* lost race against keypresses */ status = hid_start_in(hid); if (status < 0) diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 47e84ef355c5..3b48681f8a0d 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -579,7 +579,7 @@ void i2400mu_disconnect(struct usb_interface *iface) * * As well, the device might refuse going to sleep for whichever * reason. In this case we just fail. For system suspend/hibernate, - * we *can't* fail. We look at usb_dev->auto_pm to see if the + * we *can't* fail. We check PM_EVENT_AUTO to see if the * suspend call comes from the USB stack or from the system and act * in consequence. * @@ -591,14 +591,11 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg) int result = 0; struct device *dev = &iface->dev; struct i2400mu *i2400mu = usb_get_intfdata(iface); -#ifdef CONFIG_PM - struct usb_device *usb_dev = i2400mu->usb_dev; -#endif unsigned is_autosuspend = 0; struct i2400m *i2400m = &i2400mu->i2400m; #ifdef CONFIG_PM - if (usb_dev->auto_pm > 0) + if (pm_msg.event & PM_EVENT_AUTO) is_autosuspend = 1; #endif diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 4f864472c5c4..8016a296010e 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1341,7 +1341,6 @@ static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt) int status = 0; usb_pm_lock(udev); - udev->auto_pm = 1; udev->pm_usage_cnt += inc_usage_cnt; WARN_ON(udev->pm_usage_cnt < 0); if (inc_usage_cnt) @@ -1473,7 +1472,6 @@ static int usb_autopm_do_interface(struct usb_interface *intf, if (intf->condition == USB_INTERFACE_UNBOUND) status = -ENODEV; else { - udev->auto_pm = 1; atomic_add(inc_usage_cnt, &intf->pm_usage_cnt); udev->last_busy = jiffies; if (inc_usage_cnt >= 0 && @@ -1707,7 +1705,6 @@ int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg) do_unbind_rebind(udev, DO_UNBIND); usb_pm_lock(udev); - udev->auto_pm = 0; status = usb_suspend_both(udev, msg); usb_pm_unlock(udev); return status; @@ -1730,7 +1727,6 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg) int status; usb_pm_lock(udev); - udev->auto_pm = 0; status = usb_resume_both(udev, msg); udev->last_busy = jiffies; usb_pm_unlock(udev); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 0d46bbec44b7..8751ec79a159 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1313,7 +1313,7 @@ static int option_suspend(struct usb_serial *serial, pm_message_t message) dbg("%s entered", __func__); - if (serial->dev->auto_pm) { + if (message.event & PM_EVENT_AUTO) { spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; spin_unlock_irq(&intfdata->susp_lock); diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index c5c41aed106d..ac1b6449fb6a 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -1005,7 +1005,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message) struct sierra_intf_private *intfdata; int b; - if (serial->dev->auto_pm) { + if (message.event & PM_EVENT_AUTO) { intfdata = serial->private; spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; diff --git a/include/linux/usb.h b/include/linux/usb.h index 6e91ee4f5b81..4b6f6db544ee 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -429,7 +429,6 @@ struct usb_tt; * @last_busy: time of last use * @autosuspend_delay: in jiffies * @connect_time: time device was first connected - * @auto_pm: autosuspend/resume in progress * @do_remote_wakeup: remote wakeup should be enabled * @reset_resume: needs reset instead of resume * @autosuspend_disabled: autosuspend disabled by the user @@ -514,7 +513,6 @@ struct usb_device { int autosuspend_delay; unsigned long connect_time; - unsigned auto_pm:1; unsigned do_remote_wakeup:1; unsigned reset_resume:1; unsigned autosuspend_disabled:1; -- cgit v1.2.3 From 8e4ceb38eb5bbaef22fc00abe9bc11e26bea2ab5 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 7 Dec 2009 13:01:37 -0500 Subject: USB: prepare for changover to Runtime PM framework This patch (as1303) revises the USB Power Management infrastructure to make it compatible with the new driver-model Runtime PM framework: Drivers are no longer allowed to access intf->pm_usage_cnt directly; the PM framework manages its own usage counters. usb_autopm_set_interface() is eliminated, because it directly sets intf->pm_usage_cnt. usb_autopm_enable() and usb_autopm_disable() are eliminated, because they call usb_autopm_set_interface(). usb_autopm_get_interface_no_resume() and usb_autopm_put_interface_no_suspend() are added. They correspond to pm_runtime_get_noresume() and pm_runtime_put_noidle() in the PM framework. The power/level attribute no longer accepts "suspend", only "on" and "auto". The PM framework doesn't allow devices to be forced into a suspended mode. The hub driver contains the only code that violates the new guidelines. It is updated to use the new interface routines instead. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- Documentation/usb/power-management.txt | 60 ++++++++++++++++------------------ drivers/usb/core/driver.c | 31 ------------------ drivers/usb/core/hub.c | 45 +++++++++++++++++-------- drivers/usb/core/sysfs.c | 25 +++----------- include/linux/usb.h | 26 ++++++--------- 5 files changed, 74 insertions(+), 113 deletions(-) (limited to 'include') diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt index 8817368203d6..c7c1dc2f8017 100644 --- a/Documentation/usb/power-management.txt +++ b/Documentation/usb/power-management.txt @@ -2,7 +2,7 @@ Alan Stern - October 5, 2007 + November 10, 2009 @@ -123,9 +123,9 @@ relevant attribute files are: wakeup, level, and autosuspend. power/level - This file contains one of three words: "on", "auto", - or "suspend". You can write those words to the file - to change the device's setting. + This file contains one of two words: "on" or "auto". + You can write those words to the file to change the + device's setting. "on" means that the device should be resumed and autosuspend is not allowed. (Of course, system @@ -134,10 +134,10 @@ relevant attribute files are: wakeup, level, and autosuspend. "auto" is the normal state in which the kernel is allowed to autosuspend and autoresume the device. - "suspend" means that the device should remain - suspended, and autoresume is not allowed. (But remote - wakeup may still be allowed, since it is controlled - separately by the power/wakeup attribute.) + (In kernels up to 2.6.32, you could also specify + "suspend", meaning that the device should remain + suspended and autoresume was not allowed. This + setting is no longer supported.) power/autosuspend @@ -313,13 +313,14 @@ three of the methods listed above. In addition, a driver indicates that it supports autosuspend by setting the .supports_autosuspend flag in its usb_driver structure. It is then responsible for informing the USB core whenever one of its interfaces becomes busy or idle. The -driver does so by calling these five functions: +driver does so by calling these six functions: int usb_autopm_get_interface(struct usb_interface *intf); void usb_autopm_put_interface(struct usb_interface *intf); - int usb_autopm_set_interface(struct usb_interface *intf); int usb_autopm_get_interface_async(struct usb_interface *intf); void usb_autopm_put_interface_async(struct usb_interface *intf); + void usb_autopm_get_interface_no_resume(struct usb_interface *intf); + void usb_autopm_put_interface_no_suspend(struct usb_interface *intf); The functions work by maintaining a counter in the usb_interface structure. When intf->pm_usage_count is > 0 then the interface is @@ -331,11 +332,13 @@ considered to be idle, and the kernel may autosuspend the device. associated with the device itself rather than any of its interfaces. This field is used only by the USB core.) -The driver owns intf->pm_usage_count; it can modify the value however -and whenever it likes. A nice aspect of the non-async usb_autopm_* -routines is that the changes they make are protected by the usb_device -structure's PM mutex (udev->pm_mutex); however drivers may change -pm_usage_count without holding the mutex. Drivers using the async +Drivers must not modify intf->pm_usage_count directly; its value +should be changed only be using the functions listed above. Drivers +are responsible for insuring that the overall change to pm_usage_count +during their lifetime balances out to 0 (it may be necessary for the +disconnect method to call usb_autopm_put_interface() one or more times +to fulfill this requirement). The first two routines use the PM mutex +in struct usb_device for mutual exclusion; drivers using the async routines are responsible for their own synchronization and mutual exclusion. @@ -347,11 +350,6 @@ exclusion. attempts an autosuspend if the new value is <= 0 and the device isn't suspended. - usb_autopm_set_interface() leaves pm_usage_count alone. - It attempts an autoresume if the value is > 0 and the device - is suspended, and it attempts an autosuspend if the value is - <= 0 and the device isn't suspended. - usb_autopm_get_interface_async() and usb_autopm_put_interface_async() do almost the same things as their non-async counterparts. The differences are: they do @@ -360,13 +358,11 @@ exclusion. such as an URB's completion handler, but when they return the device will not generally not yet be in the desired state. -There also are a couple of utility routines drivers can use: - - usb_autopm_enable() sets pm_usage_cnt to 0 and then calls - usb_autopm_set_interface(), which will attempt an autosuspend. - - usb_autopm_disable() sets pm_usage_cnt to 1 and then calls - usb_autopm_set_interface(), which will attempt an autoresume. + usb_autopm_get_interface_no_resume() and + usb_autopm_put_interface_no_suspend() merely increment or + decrement the pm_usage_count value; they do not attempt to + carry out an autoresume or an autosuspend. Hence they can be + called in an atomic context. The conventional usage pattern is that a driver calls usb_autopm_get_interface() in its open routine and @@ -400,11 +396,11 @@ though, setting this flag won't cause the kernel to autoresume it. Normally a driver would set this flag in its probe method, at which time the device is guaranteed not to be autosuspended.) -The usb_autopm_* routines have to run in a sleepable process context; -they must not be called from an interrupt handler or while holding a -spinlock. In fact, the entire autosuspend mechanism is not well geared -toward interrupt-driven operation. However there is one thing a -driver can do in an interrupt handler: +The synchronous usb_autopm_* routines have to run in a sleepable +process context; they must not be called from an interrupt handler or +while holding a spinlock. In fact, the entire autosuspend mechanism +is not well geared toward interrupt-driven operation. However there +is one thing a driver can do in an interrupt handler: usb_mark_last_busy(struct usb_device *udev); diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 8016a296010e..7a05bab73960 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -948,8 +948,6 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg) done: dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); - if (status == 0) - udev->autoresume_disabled = 0; return status; } @@ -1280,11 +1278,6 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) /* Propagate the resume up the tree, if necessary */ if (udev->state == USB_STATE_SUSPENDED) { - if ((msg.event & PM_EVENT_AUTO) && - udev->autoresume_disabled) { - status = -EPERM; - goto done; - } if (parent) { status = usb_autoresume_device(parent); if (status == 0) { @@ -1638,8 +1631,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf) if (intf->condition == USB_INTERFACE_UNBOUND) status = -ENODEV; - else if (udev->autoresume_disabled) - status = -EPERM; else { atomic_inc(&intf->pm_usage_cnt); if (atomic_read(&intf->pm_usage_cnt) > 0 && @@ -1652,28 +1643,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf) } EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async); -/** - * usb_autopm_set_interface - set a USB interface's autosuspend state - * @intf: the usb_interface whose state should be set - * - * This routine sets the autosuspend state of @intf's device according - * to @intf's usage counter, which the caller must have set previously. - * If the counter is <= 0, the device is autosuspended (if it isn't - * already suspended and if nothing else prevents the autosuspend). If - * the counter is > 0, the device is autoresumed (if it isn't already - * awake). - */ -int usb_autopm_set_interface(struct usb_interface *intf) -{ - int status; - - status = usb_autopm_do_interface(intf, 0); - dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, atomic_read(&intf->pm_usage_cnt)); - return status; -} -EXPORT_SYMBOL_GPL(usb_autopm_set_interface); - #else void usb_autosuspend_work(struct work_struct *work) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 5413d712cae0..b38fd6730e2a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -71,6 +71,7 @@ struct usb_hub { unsigned mA_per_port; /* current for each child */ + unsigned init_done:1; unsigned limited_power:1; unsigned quiescing:1; unsigned disconnected:1; @@ -375,12 +376,13 @@ static void kick_khubd(struct usb_hub *hub) { unsigned long flags; - /* Suppress autosuspend until khubd runs */ - atomic_set(&to_usb_interface(hub->intfdev)->pm_usage_cnt, 1); - spin_lock_irqsave(&hub_event_lock, flags); if (!hub->disconnected && list_empty(&hub->event_list)) { list_add_tail(&hub->event_list, &hub_event_list); + + /* Suppress autosuspend until khubd runs */ + usb_autopm_get_interface_no_resume( + to_usb_interface(hub->intfdev)); wake_up(&khubd_wait); } spin_unlock_irqrestore(&hub_event_lock, flags); @@ -665,7 +667,7 @@ int usb_remove_device(struct usb_device *udev) } enum hub_activation_type { - HUB_INIT, HUB_INIT2, HUB_INIT3, + HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */ HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME, }; @@ -710,8 +712,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) msecs_to_jiffies(delay)); /* Suppress autosuspend until init is done */ - atomic_set(&to_usb_interface(hub->intfdev)-> - pm_usage_cnt, 1); + usb_autopm_get_interface_no_resume( + to_usb_interface(hub->intfdev)); return; /* Continues at init2: below */ } else { hub_power_on(hub, true); @@ -818,6 +820,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) } init3: hub->quiescing = 0; + hub->init_done = 1; status = usb_submit_urb(hub->urb, GFP_NOIO); if (status < 0) @@ -827,6 +830,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Scan all ports that need attention */ kick_khubd(hub); + + /* Allow autosuspend if it was suppressed */ + if (type <= HUB_INIT3) + usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); } /* Implement the continuations for the delays above */ @@ -854,6 +861,11 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) int i; cancel_delayed_work_sync(&hub->init_work); + if (!hub->init_done) { + hub->init_done = 1; + usb_autopm_put_interface_no_suspend( + to_usb_interface(hub->intfdev)); + } /* khubd and related activity won't re-trigger */ hub->quiescing = 1; @@ -1176,7 +1188,10 @@ static void hub_disconnect(struct usb_interface *intf) /* Take the hub off the event list and don't let it be added again */ spin_lock_irq(&hub_event_lock); - list_del_init(&hub->event_list); + if (!list_empty(&hub->event_list)) { + list_del_init(&hub->event_list); + usb_autopm_put_interface_no_suspend(intf); + } hub->disconnected = 1; spin_unlock_irq(&hub_event_lock); @@ -3235,7 +3250,7 @@ static void hub_events(void) * disconnected while waiting for the lock to succeed. */ usb_lock_device(hdev); if (unlikely(hub->disconnected)) - goto loop; + goto loop2; /* If the hub has died, clean up after it */ if (hdev->state == USB_STATE_NOTATTACHED) { @@ -3384,11 +3399,15 @@ static void hub_events(void) } } -loop_autopm: - /* Allow autosuspend if we're not going to run again */ - if (list_empty(&hub->event_list)) - usb_autopm_enable(intf); -loop: + loop_autopm: + /* Balance the usb_autopm_get_interface() above */ + usb_autopm_put_interface_no_suspend(intf); + loop: + /* Balance the usb_autopm_get_interface_no_resume() in + * kick_khubd() and allow autosuspend. + */ + usb_autopm_put_interface(intf); + loop2: usb_unlock_device(hdev); kref_put(&hub->kref, hub_release); diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index ae763974be25..15477008b631 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -327,7 +327,6 @@ static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR, static const char on_string[] = "on"; static const char auto_string[] = "auto"; -static const char suspend_string[] = "suspend"; static ssize_t show_level(struct device *dev, struct device_attribute *attr, char *buf) @@ -335,13 +334,8 @@ show_level(struct device *dev, struct device_attribute *attr, char *buf) struct usb_device *udev = to_usb_device(dev); const char *p = auto_string; - if (udev->state == USB_STATE_SUSPENDED) { - if (udev->autoresume_disabled) - p = suspend_string; - } else { - if (udev->autosuspend_disabled) - p = on_string; - } + if (udev->state != USB_STATE_SUSPENDED && udev->autosuspend_disabled) + p = on_string; return sprintf(buf, "%s\n", p); } @@ -353,7 +347,7 @@ set_level(struct device *dev, struct device_attribute *attr, int len = count; char *cp; int rc = 0; - int old_autosuspend_disabled, old_autoresume_disabled; + int old_autosuspend_disabled; cp = memchr(buf, '\n', count); if (cp) @@ -361,7 +355,6 @@ set_level(struct device *dev, struct device_attribute *attr, usb_lock_device(udev); old_autosuspend_disabled = udev->autosuspend_disabled; - old_autoresume_disabled = udev->autoresume_disabled; /* Setting the flags without calling usb_pm_lock is a subject to * races, but who cares... @@ -369,28 +362,18 @@ set_level(struct device *dev, struct device_attribute *attr, if (len == sizeof on_string - 1 && strncmp(buf, on_string, len) == 0) { udev->autosuspend_disabled = 1; - udev->autoresume_disabled = 0; rc = usb_external_resume_device(udev, PMSG_USER_RESUME); } else if (len == sizeof auto_string - 1 && strncmp(buf, auto_string, len) == 0) { udev->autosuspend_disabled = 0; - udev->autoresume_disabled = 0; rc = usb_external_resume_device(udev, PMSG_USER_RESUME); - } else if (len == sizeof suspend_string - 1 && - strncmp(buf, suspend_string, len) == 0) { - udev->autosuspend_disabled = 0; - udev->autoresume_disabled = 1; - rc = usb_external_suspend_device(udev, PMSG_USER_SUSPEND); - } else rc = -EINVAL; - if (rc) { + if (rc) udev->autosuspend_disabled = old_autosuspend_disabled; - udev->autoresume_disabled = old_autoresume_disabled; - } usb_unlock_device(udev); return (rc < 0 ? rc : count); } diff --git a/include/linux/usb.h b/include/linux/usb.h index 4b6f6db544ee..6af3581e1114 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -432,7 +432,6 @@ struct usb_tt; * @do_remote_wakeup: remote wakeup should be enabled * @reset_resume: needs reset instead of resume * @autosuspend_disabled: autosuspend disabled by the user - * @autoresume_disabled: autoresume disabled by the user * @skip_sys_resume: skip the next system resume * @wusb_dev: if this is a Wireless USB device, link to the WUSB * specific data for the device. @@ -516,7 +515,6 @@ struct usb_device { unsigned do_remote_wakeup:1; unsigned reset_resume:1; unsigned autosuspend_disabled:1; - unsigned autoresume_disabled:1; unsigned skip_sys_resume:1; #endif struct wusb_dev *wusb_dev; @@ -542,22 +540,20 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id); /* USB autosuspend and autoresume */ #ifdef CONFIG_USB_SUSPEND -extern int usb_autopm_set_interface(struct usb_interface *intf); extern int usb_autopm_get_interface(struct usb_interface *intf); extern void usb_autopm_put_interface(struct usb_interface *intf); extern int usb_autopm_get_interface_async(struct usb_interface *intf); extern void usb_autopm_put_interface_async(struct usb_interface *intf); -static inline void usb_autopm_enable(struct usb_interface *intf) +static inline void usb_autopm_get_interface_no_resume( + struct usb_interface *intf) { - atomic_set(&intf->pm_usage_cnt, 0); - usb_autopm_set_interface(intf); + atomic_inc(&intf->pm_usage_cnt); } - -static inline void usb_autopm_disable(struct usb_interface *intf) +static inline void usb_autopm_put_interface_no_suspend( + struct usb_interface *intf) { - atomic_set(&intf->pm_usage_cnt, 1); - usb_autopm_set_interface(intf); + atomic_dec(&intf->pm_usage_cnt); } static inline void usb_mark_last_busy(struct usb_device *udev) @@ -567,12 +563,8 @@ static inline void usb_mark_last_busy(struct usb_device *udev) #else -static inline int usb_autopm_set_interface(struct usb_interface *intf) -{ return 0; } - static inline int usb_autopm_get_interface(struct usb_interface *intf) { return 0; } - static inline int usb_autopm_get_interface_async(struct usb_interface *intf) { return 0; } @@ -580,9 +572,11 @@ static inline void usb_autopm_put_interface(struct usb_interface *intf) { } static inline void usb_autopm_put_interface_async(struct usb_interface *intf) { } -static inline void usb_autopm_enable(struct usb_interface *intf) +static inline void usb_autopm_get_interface_no_resume( + struct usb_interface *intf) { } -static inline void usb_autopm_disable(struct usb_interface *intf) +static inline void usb_autopm_put_interface_no_suspend( + struct usb_interface *intf) { } static inline void usb_mark_last_busy(struct usb_device *udev) { } -- cgit v1.2.3 From a0bb108112a872c0b0c4b3ef4974f95fb75b155d Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 7 Dec 2009 16:39:16 -0500 Subject: USB: usb-storage: add BAD_SENSE flag This patch (as1311) fixes a problem in usb-storage: Some devices are pretty broken when it comes to reporting sense data. The information they send back indicates that they have more than 18 bytes of sense data available, but when the system asks for more than 18 they fail or hang. The symptom is that probing fails with multiple resets. The patch adds a new BAD_SENSE flag to indicate that usb-storage should never ask for more than 18 bytes of sense data. The flag can be set in an unusual_devs entry or via the "quirks=" module parameter, and it is set automatically whenever a REQUEST SENSE command for more than 18 bytes fails or times out. An unusual_devs entry is added for the Agfa photo frame, which uses a Prolific chip having this bug. Signed-off-by: Alan Stern Tested-by: Daniel Kukula Cc: stable Signed-off-by: Greg Kroah-Hartman --- Documentation/kernel-parameters.txt | 2 ++ drivers/usb/storage/transport.c | 17 +++++++++++++---- drivers/usb/storage/unusual_devs.h | 7 +++++++ drivers/usb/storage/usb.c | 3 +++ include/linux/usb_usual.h | 4 +++- 5 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 777dc8a32df8..3f886e298f62 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2663,6 +2663,8 @@ and is between 256 and 4096 characters. It is defined in the file to a common usb-storage quirk flag as follows: a = SANE_SENSE (collect more than 18 bytes of sense data); + b = BAD_SENSE (don't collect more than 18 + bytes of sense data); c = FIX_CAPACITY (decrease the reported device capacity by one sector); h = CAPACITY_HEURISTICS (decrease the diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 589f6b4404f0..cc313d16d727 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -666,10 +666,11 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) * to wait for at least one CHECK_CONDITION to determine * SANE_SENSE support */ - if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) && + if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) && result == USB_STOR_TRANSPORT_GOOD && !(us->fflags & US_FL_SANE_SENSE) && - !(srb->cmnd[2] & 0x20)) { + !(us->fflags & US_FL_BAD_SENSE) && + !(srb->cmnd[2] & 0x20))) { US_DEBUGP("-- SAT supported, increasing auto-sense\n"); us->fflags |= US_FL_SANE_SENSE; } @@ -718,6 +719,12 @@ Retry_Sense: if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { US_DEBUGP("-- auto-sense aborted\n"); srb->result = DID_ABORT << 16; + + /* If SANE_SENSE caused this problem, disable it */ + if (sense_size != US_SENSE_SIZE) { + us->fflags &= ~US_FL_SANE_SENSE; + us->fflags |= US_FL_BAD_SENSE; + } goto Handle_Errors; } @@ -727,10 +734,11 @@ Retry_Sense: * (small) sense request. This fixes some USB GSM modems */ if (temp_result == USB_STOR_TRANSPORT_FAILED && - (us->fflags & US_FL_SANE_SENSE) && - sense_size != US_SENSE_SIZE) { + sense_size != US_SENSE_SIZE) { US_DEBUGP("-- auto-sense failure, retry small sense\n"); sense_size = US_SENSE_SIZE; + us->fflags &= ~US_FL_SANE_SENSE; + us->fflags |= US_FL_BAD_SENSE; goto Retry_Sense; } @@ -754,6 +762,7 @@ Retry_Sense: */ if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) && !(us->fflags & US_FL_SANE_SENSE) && + !(us->fflags & US_FL_BAD_SENSE) && (srb->sense_buffer[0] & 0x7C) == 0x70) { US_DEBUGP("-- SANE_SENSE support enabled\n"); us->fflags |= US_FL_SANE_SENSE; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index d4f034ebaa8a..64a0a2c27e12 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -818,6 +818,13 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* Reported by Daniel Kukula */ +UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100, + "Prolific Technology, Inc.", + "Prolific Storage Gadget", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_BAD_SENSE ), + /* Reported by Rogerio Brito */ UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001, "Prolific Technology, Inc.", diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 1599d86154c4..f5c0264caa33 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -463,6 +463,9 @@ static void adjust_quirks(struct us_data *us) case 'a': f |= US_FL_SANE_SENSE; break; + case 'b': + f |= US_FL_BAD_SENSE; + break; case 'c': f |= US_FL_FIX_CAPACITY; break; diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 3d15fb9bc116..a4b947e470a5 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -56,7 +56,9 @@ US_FLAG(SANE_SENSE, 0x00008000) \ /* Sane Sense (> 18 bytes) */ \ US_FLAG(CAPACITY_OK, 0x00010000) \ - /* READ CAPACITY response is correct */ + /* READ CAPACITY response is correct */ \ + US_FLAG(BAD_SENSE, 0x00020000) \ + /* Bad Sense (never more than 18 bytes) */ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; -- cgit v1.2.3 From 91017f9cf5fcfb601b8d583c896ac7de7d200c57 Mon Sep 17 00:00:00 2001 From: Sarah Sharp Date: Thu, 3 Dec 2009 09:44:34 -0800 Subject: USB: Refactor code to find alternate interface settings. Refactor out the code to find alternate interface settings into usb_find_alt_setting(). Print a debugging message and return null if the alt setting is not found. While we're at it, correct a bug in the refactored code. The interfaces in the configuration's interface cache are not necessarily in numerical order, so we can't just use the interface number as an array index. Loop through the interface caches, looking for the correct interface. Signed-off-by: Sarah Sharp Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hcd.c | 12 ++---------- drivers/usb/core/usb.c | 37 +++++++++++++++++++++++++++++++++++++ include/linux/usb.h | 4 ++++ 3 files changed, 43 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index daac0427bfd5..fc235b02ff27 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1606,7 +1606,6 @@ int usb_hcd_check_bandwidth(struct usb_device *udev, struct usb_interface *new_intf) { int num_intfs, i, j; - struct usb_interface_cache *intf_cache; struct usb_host_interface *alt = NULL; int ret = 0; struct usb_hcd *hcd; @@ -1654,15 +1653,8 @@ int usb_hcd_check_bandwidth(struct usb_device *udev, } } for (i = 0; i < num_intfs; ++i) { - - /* Dig the endpoints for alt setting 0 out of the - * interface cache for this interface - */ - intf_cache = new_config->intf_cache[i]; - for (j = 0; j < intf_cache->num_altsetting; j++) { - if (intf_cache->altsetting[j].desc.bAlternateSetting == 0) - alt = &intf_cache->altsetting[j]; - } + /* Set up endpoints for alternate interface setting 0 */ + alt = usb_find_alt_setting(new_config, i, 0); if (!alt) { printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i); continue; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index d1e9440799de..99e54586a545 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -63,6 +63,43 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); #endif +/** + * usb_find_alt_setting() - Given a configuration, find the alternate setting + * for the given interface. + * @config - the configuration to search (not necessarily the current config). + * @iface_num - interface number to search in + * @alt_num - alternate interface setting number to search for. + * + * Search the configuration's interface cache for the given alt setting. + */ +struct usb_host_interface *usb_find_alt_setting( + struct usb_host_config *config, + unsigned int iface_num, + unsigned int alt_num) +{ + struct usb_interface_cache *intf_cache = NULL; + int i; + + for (i = 0; i < config->desc.bNumInterfaces; i++) { + if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber + == iface_num) { + intf_cache = config->intf_cache[i]; + break; + } + } + if (!intf_cache) + return NULL; + for (i = 0; i < intf_cache->num_altsetting; i++) + if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num) + return &intf_cache->altsetting[i]; + + printk(KERN_DEBUG "Did not find alt setting %u for intf %u, " + "config %u\n", alt_num, iface_num, + config->desc.bConfigurationValue); + return NULL; +} +EXPORT_SYMBOL_GPL(usb_find_alt_setting); + /** * usb_ifnum_to_if - get the interface object with a given interface number * @dev: the device whose current configuration is considered diff --git a/include/linux/usb.h b/include/linux/usb.h index 6af3581e1114..e101a2d04d75 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -619,6 +619,10 @@ extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, unsigned ifnum); extern struct usb_host_interface *usb_altnum_to_altsetting( const struct usb_interface *intf, unsigned int altnum); +extern struct usb_host_interface *usb_find_alt_setting( + struct usb_host_config *config, + unsigned int iface_num, + unsigned int alt_num); /** -- cgit v1.2.3 From 60c2ffd3d2cf12008747d920ae118df119006003 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 9 Dec 2009 20:58:16 +0000 Subject: net: fix compat_sys_recvmmsg parameter type compat_sys_recvmmsg has a compat_timespec parameter and not a timespec parameter. This way we also get rid of an odd cast. Cc: Arnaldo Carvalho de Melo Signed-off-by: Heiko Carstens Acked-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/net/compat.h | 2 +- net/compat.c | 12 +++++------- 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/compat.h b/include/net/compat.h index 3c7d4e38fa1d..28d5428ec6a2 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -46,7 +46,7 @@ extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsi extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned); extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *, unsigned, unsigned, - struct timespec __user *); + struct compat_timespec __user *); extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *); extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); diff --git a/net/compat.c b/net/compat.c index e1a56ade803b..c4d9131a5872 100644 --- a/net/compat.c +++ b/net/compat.c @@ -754,26 +754,24 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, - struct timespec __user *timeout) + struct compat_timespec __user *timeout) { int datagrams; struct timespec ktspec; - struct compat_timespec __user *utspec; if (timeout == NULL) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, NULL); - utspec = (struct compat_timespec __user *)timeout; - if (get_user(ktspec.tv_sec, &utspec->tv_sec) || - get_user(ktspec.tv_nsec, &utspec->tv_nsec)) + if (get_user(ktspec.tv_sec, &timeout->tv_sec) || + get_user(ktspec.tv_nsec, &timeout->tv_nsec)) return -EFAULT; datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, &ktspec); if (datagrams > 0 && - (put_user(ktspec.tv_sec, &utspec->tv_sec) || - put_user(ktspec.tv_nsec, &utspec->tv_nsec))) + (put_user(ktspec.tv_sec, &timeout->tv_sec) || + put_user(ktspec.tv_nsec, &timeout->tv_nsec))) datagrams = -EFAULT; return datagrams; -- cgit v1.2.3 From f53a2ade0bb9f2a81f473e6469155172a96b7c38 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 9 Oct 2009 12:56:41 +0100 Subject: tty: esp: remove broken driver The ESP driver has been marked broken for years. It's an old ISA device that clearly nobody cares about any more. Remove it Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- Documentation/serial/hayes-esp.txt | 154 --- drivers/char/Kconfig | 13 - drivers/char/Makefile | 1 - drivers/char/esp.c | 2533 ------------------------------------ include/linux/Kbuild | 1 - include/linux/hayesesp.h | 114 -- 6 files changed, 2816 deletions(-) delete mode 100644 Documentation/serial/hayes-esp.txt delete mode 100644 drivers/char/esp.c delete mode 100644 include/linux/hayesesp.h (limited to 'include') diff --git a/Documentation/serial/hayes-esp.txt b/Documentation/serial/hayes-esp.txt deleted file mode 100644 index 09b5d5856758..000000000000 --- a/Documentation/serial/hayes-esp.txt +++ /dev/null @@ -1,154 +0,0 @@ -HAYES ESP DRIVER VERSION 2.1 - -A big thanks to the people at Hayes, especially Alan Adamson. Their support -has enabled me to provide enhancements to the driver. - -Please report your experiences with this driver to me (arobinso@nyx.net). I -am looking for both positive and negative feedback. - -*** IMPORTANT CHANGES FOR 2.1 *** -Support for PIO mode. Five situations will cause PIO mode to be used: -1) A multiport card is detected. PIO mode will always be used. (8 port cards -do not support DMA). -2) The DMA channel is set to an invalid value (anything other than 1 or 3). -3) The DMA buffer/channel could not be allocated. The port will revert to PIO -mode until it is reopened. -4) Less than a specified number of bytes need to be transferred to/from the -FIFOs. PIO mode will be used for that transfer only. -5) A port needs to do a DMA transfer and another port is already using the -DMA channel. PIO mode will be used for that transfer only. - -Since the Hayes ESP seems to conflict with other cards (notably sound cards) -when using DMA, DMA is turned off by default. To use DMA, it must be turned -on explicitly, either with the "dma=" option described below or with -setserial. A multiport card can be forced into DMA mode by using setserial; -however, most multiport cards don't support DMA. - -The latest version of setserial allows the enhanced configuration of the ESP -card to be viewed and modified. -*** - -This package contains the files needed to compile a module to support the Hayes -ESP card. The drivers are basically a modified version of the serial drivers. - -Features: - -- Uses the enhanced mode of the ESP card, allowing a wider range of - interrupts and features than compatibility mode -- Uses DMA and 16 bit PIO mode to transfer data to and from the ESP's FIFOs, - reducing CPU load -- Supports primary and secondary ports - - -If the driver is compiled as a module, the IRQs to use can be specified by -using the irq= option. The format is: - -irq=[0x100],[0x140],[0x180],[0x200],[0x240],[0x280],[0x300],[0x380] - -The address in brackets is the base address of the card. The IRQ of -nonexistent cards can be set to 0. If an IRQ of a card that does exist is set -to 0, the driver will attempt to guess at the correct IRQ. For example, to set -the IRQ of the card at address 0x300 to 12, the insmod command would be: - -insmod esp irq=0,0,0,0,0,0,12,0 - -The custom divisor can be set by using the divisor= option. The format is the -same as for the irq= option. Each divisor value is a series of hex digits, -with each digit representing the divisor to use for a corresponding port. The -divisor value is constructed RIGHT TO LEFT. Specifying a nonzero divisor value -will automatically set the spd_cust flag. To calculate the divisor to use for -a certain baud rate, divide the port's base baud (generally 921600) by the -desired rate. For example, to set the divisor of the primary port at 0x300 to -4 and the divisor of the secondary port at 0x308 to 8, the insmod command would -be: - -insmod esp divisor=0,0,0,0,0,0,0x84,0 - -The dma= option can be used to set the DMA channel. The channel can be either -1 or 3. Specifying any other value will force the driver to use PIO mode. -For example, to set the DMA channel to 3, the insmod command would be: - -insmod esp dma=3 - -The rx_trigger= and tx_trigger= options can be used to set the FIFO trigger -levels. They specify when the ESP card should send an interrupt. Larger -values will decrease the number of interrupts; however, a value too high may -result in data loss. Valid values are 1 through 1023, with 768 being the -default. For example, to set the receive trigger level to 512 bytes and the -transmit trigger level to 700 bytes, the insmod command would be: - -insmod esp rx_trigger=512 tx_trigger=700 - -The flow_off= and flow_on= options can be used to set the hardware flow off/ -flow on levels. The flow on level must be lower than the flow off level, and -the flow off level should be higher than rx_trigger. Valid values are 1 -through 1023, with 1016 being the default flow off level and 944 being the -default flow on level. For example, to set the flow off level to 1000 bytes -and the flow on level to 935 bytes, the insmod command would be: - -insmod esp flow_off=1000 flow_on=935 - -The rx_timeout= option can be used to set the receive timeout value. This -value indicates how long after receiving the last character that the ESP card -should wait before signalling an interrupt. Valid values are 0 though 255, -with 128 being the default. A value too high will increase latency, and a -value too low will cause unnecessary interrupts. For example, to set the -receive timeout to 255, the insmod command would be: - -insmod esp rx_timeout=255 - -The pio_threshold= option sets the threshold (in number of characters) for -using PIO mode instead of DMA mode. For example, if this value is 32, -transfers of 32 bytes or less will always use PIO mode. - -insmod esp pio_threshold=32 - -Multiple options can be listed on the insmod command line by separating each -option with a space. For example: - -insmod esp dma=3 trigger=512 - -The esp module can be automatically loaded when needed. To cause this to -happen, add the following lines to /etc/modprobe.conf (replacing the last line -with options for your configuration): - -alias char-major-57 esp -alias char-major-58 esp -options esp irq=0,0,0,0,0,0,3,0 divisor=0,0,0,0,0,0,0x4,0 - -You may also need to run 'depmod -a'. - -Devices must be created manually. To create the devices, note the output from -the module after it is inserted. The output will appear in the location where -kernel messages usually appear (usually /var/adm/messages). Create two devices -for each 'tty' mentioned, one with major of 57 and the other with major of 58. -The minor number should be the same as the tty number reported. The commands -would be (replace ? with the tty number): - -mknod /dev/ttyP? c 57 ? -mknod /dev/cup? c 58 ? - -For example, if the following line appears: - -Oct 24 18:17:23 techno kernel: ttyP8 at 0x0140 (irq = 3) is an ESP primary port - -...two devices should be created: - -mknod /dev/ttyP8 c 57 8 -mknod /dev/cup8 c 58 8 - -You may need to set the permissions on the devices: - -chmod 666 /dev/ttyP* -chmod 666 /dev/cup* - -The ESP module and the serial module should not conflict (they can be used at -the same time). After the ESP module has been loaded the ports on the ESP card -will no longer be accessible by the serial driver. - -If I/O errors are experienced when accessing the port, check for IRQ and DMA -conflicts ('cat /proc/interrupts' and 'cat /proc/dma' for a list of IRQs and -DMAs currently in use). - -Enjoy! -Andrew J. Robinson diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 6aad99ec4e0f..6f31c9472100 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -201,19 +201,6 @@ config DIGIEPCA To compile this driver as a module, choose M here: the module will be called epca. -config ESPSERIAL - tristate "Hayes ESP serial port support" - depends on SERIAL_NONSTANDARD && ISA && ISA_DMA_API && BROKEN - help - This is a driver which supports Hayes ESP serial ports. Both single - port cards and multiport cards are supported. Make sure to read - . - - To compile this driver as a module, choose M here: the - module will be called esp. - - If unsure, say N. - config MOXA_INTELLIO tristate "Moxa Intellio support" depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI) diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 19a79dd79eee..f957edf7e45d 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o obj-$(CONFIG_AUDIT) += tty_audit.o obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o -obj-$(CONFIG_ESPSERIAL) += esp.o obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o diff --git a/drivers/char/esp.c b/drivers/char/esp.c deleted file mode 100644 index b19d43cd9542..000000000000 --- a/drivers/char/esp.c +++ /dev/null @@ -1,2533 +0,0 @@ -/* - * esp.c - driver for Hayes ESP serial cards - * - * --- Notices from serial.c, upon which this driver is based --- - * - * Copyright (C) 1991, 1992 Linus Torvalds - * - * Extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92. Now - * much more extensible to support other serial cards based on the - * 16450/16550A UART's. Added support for the AST FourPort and the - * Accent Async board. - * - * set_serial_info fixed to set the flags, custom divisor, and uart - * type fields. Fix suggested by Michael K. Johnson 12/12/92. - * - * 11/95: TIOCMIWAIT, TIOCGICOUNT by Angelo Haritsis - * - * 03/96: Modularised by Angelo Haritsis - * - * rs_set_termios fixed to look also for changes of the input - * flags INPCK, BRKINT, PARMRK, IGNPAR and IGNBRK. - * Bernd Anhäupl 05/17/96. - * - * --- End of notices from serial.c --- - * - * Support for the ESP serial card by Andrew J. Robinson - * (Card detection routine taken from a patch - * by Dennis J. Boylan). Patches to allow use with 2.1.x contributed - * by Chris Faylor. - * - * Most recent changes: (Andrew J. Robinson) - * Support for PIO mode. This allows the driver to work properly with - * multiport cards. - * - * Arnaldo Carvalho de Melo - - * several cleanups, use module_init/module_exit, etc - * - * This module exports the following rs232 io functions: - * - * int espserial_init(void); - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include - -#define NR_PORTS 64 /* maximum number of ports */ -#define NR_PRIMARY 8 /* maximum number of primary ports */ -#define REGION_SIZE 8 /* size of io region to request */ - -/* The following variables can be set by giving module options */ -static int irq[NR_PRIMARY]; /* IRQ for each base port */ -static unsigned int divisor[NR_PRIMARY]; /* custom divisor for each port */ -static unsigned int dma = ESP_DMA_CHANNEL; /* DMA channel */ -static unsigned int rx_trigger = ESP_RX_TRIGGER; -static unsigned int tx_trigger = ESP_TX_TRIGGER; -static unsigned int flow_off = ESP_FLOW_OFF; -static unsigned int flow_on = ESP_FLOW_ON; -static unsigned int rx_timeout = ESP_RX_TMOUT; -static unsigned int pio_threshold = ESP_PIO_THRESHOLD; - -MODULE_LICENSE("GPL"); - -module_param_array(irq, int, NULL, 0); -module_param_array(divisor, uint, NULL, 0); -module_param(dma, uint, 0); -module_param(rx_trigger, uint, 0); -module_param(tx_trigger, uint, 0); -module_param(flow_off, uint, 0); -module_param(flow_on, uint, 0); -module_param(rx_timeout, uint, 0); -module_param(pio_threshold, uint, 0); - -/* END */ - -static char *dma_buffer; -static int dma_bytes; -static struct esp_pio_buffer *free_pio_buf; - -#define DMA_BUFFER_SZ 1024 - -#define WAKEUP_CHARS 1024 - -static char serial_name[] __initdata = "ESP serial driver"; -static char serial_version[] __initdata = "2.2"; - -static struct tty_driver *esp_driver; - -/* - * Serial driver configuration section. Here are the various options: - * - * SERIAL_PARANOIA_CHECK - * Check the magic number for the esp_structure where - * ever possible. - */ - -#undef SERIAL_PARANOIA_CHECK -#define SERIAL_DO_RESTART - -#undef SERIAL_DEBUG_INTR -#undef SERIAL_DEBUG_OPEN -#undef SERIAL_DEBUG_FLOW - -#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) -#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ - tty->name, info->port.flags, \ - serial_driver.refcount, \ - info->port.count, tty->count, s) -#else -#define DBG_CNT(s) -#endif - -static struct esp_struct *ports; - -static void change_speed(struct esp_struct *info); -static void rs_wait_until_sent(struct tty_struct *, int); - -/* - * The ESP card has a clock rate of 14.7456 MHz (that is, 2**ESPC_SCALE - * times the normal 1.8432 Mhz clock of most serial boards). - */ -#define BASE_BAUD ((1843200 / 16) * (1 << ESPC_SCALE)) - -/* Standard COM flags (except for COM4, because of the 8514 problem) */ -#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) - -static inline int serial_paranoia_check(struct esp_struct *info, - char *name, const char *routine) -{ -#ifdef SERIAL_PARANOIA_CHECK - static const char badmagic[] = KERN_WARNING - "Warning: bad magic number for serial struct (%s) in %s\n"; - static const char badinfo[] = KERN_WARNING - "Warning: null esp_struct for (%s) in %s\n"; - - if (!info) { - printk(badinfo, name, routine); - return 1; - } - if (info->magic != ESP_MAGIC) { - printk(badmagic, name, routine); - return 1; - } -#endif - return 0; -} - -static inline unsigned int serial_in(struct esp_struct *info, int offset) -{ - return inb(info->io_port + offset); -} - -static inline void serial_out(struct esp_struct *info, int offset, - unsigned char value) -{ - outb(value, info->io_port+offset); -} - -/* - * ------------------------------------------------------------ - * rs_stop() and rs_start() - * - * This routines are called before setting or resetting tty->stopped. - * They enable or disable transmitter interrupts, as necessary. - * ------------------------------------------------------------ - */ -static void rs_stop(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "rs_stop")) - return; - - spin_lock_irqsave(&info->lock, flags); - if (info->IER & UART_IER_THRI) { - info->IER &= ~UART_IER_THRI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - } - spin_unlock_irqrestore(&info->lock, flags); -} - -static void rs_start(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "rs_start")) - return; - - spin_lock_irqsave(&info->lock, flags); - if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) { - info->IER |= UART_IER_THRI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - } - spin_unlock_irqrestore(&info->lock, flags); -} - -/* - * ---------------------------------------------------------------------- - * - * Here starts the interrupt handling routines. All of the following - * subroutines are declared as inline and are folded into - * rs_interrupt(). They were separated out for readability's sake. - * - * Note: rs_interrupt() is a "fast" interrupt, which means that it - * runs with interrupts turned off. People who may want to modify - * rs_interrupt() should try to keep the interrupt handler as fast as - * possible. After you are done making modifications, it is not a bad - * idea to do: - * - * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c - * - * and look at the resulting assemble code in serial.s. - * - * - Ted Ts'o (tytso@mit.edu), 7-Mar-93 - * ----------------------------------------------------------------------- - */ - -static DEFINE_SPINLOCK(pio_lock); - -static inline struct esp_pio_buffer *get_pio_buffer(void) -{ - struct esp_pio_buffer *buf; - unsigned long flags; - - spin_lock_irqsave(&pio_lock, flags); - if (free_pio_buf) { - buf = free_pio_buf; - free_pio_buf = buf->next; - } else { - buf = kmalloc(sizeof(struct esp_pio_buffer), GFP_ATOMIC); - } - spin_unlock_irqrestore(&pio_lock, flags); - return buf; -} - -static inline void release_pio_buffer(struct esp_pio_buffer *buf) -{ - unsigned long flags; - spin_lock_irqsave(&pio_lock, flags); - buf->next = free_pio_buf; - free_pio_buf = buf; - spin_unlock_irqrestore(&pio_lock, flags); -} - -static inline void receive_chars_pio(struct esp_struct *info, int num_bytes) -{ - struct tty_struct *tty = info->port.tty; - int i; - struct esp_pio_buffer *pio_buf; - struct esp_pio_buffer *err_buf; - unsigned char status_mask; - - pio_buf = get_pio_buffer(); - - if (!pio_buf) - return; - - err_buf = get_pio_buffer(); - - if (!err_buf) { - release_pio_buffer(pio_buf); - return; - } - - status_mask = (info->read_status_mask >> 2) & 0x07; - - for (i = 0; i < num_bytes - 1; i += 2) { - *((unsigned short *)(pio_buf->data + i)) = - inw(info->io_port + UART_ESI_RX); - err_buf->data[i] = serial_in(info, UART_ESI_RWS); - err_buf->data[i + 1] = (err_buf->data[i] >> 3) & status_mask; - err_buf->data[i] &= status_mask; - } - - if (num_bytes & 0x0001) { - pio_buf->data[num_bytes - 1] = serial_in(info, UART_ESI_RX); - err_buf->data[num_bytes - 1] = - (serial_in(info, UART_ESI_RWS) >> 3) & status_mask; - } - - /* make sure everything is still ok since interrupts were enabled */ - tty = info->port.tty; - - if (!tty) { - release_pio_buffer(pio_buf); - release_pio_buffer(err_buf); - info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; - return; - } - - status_mask = (info->ignore_status_mask >> 2) & 0x07; - - for (i = 0; i < num_bytes; i++) { - if (!(err_buf->data[i] & status_mask)) { - int flag = 0; - - if (err_buf->data[i] & 0x04) { - flag = TTY_BREAK; - if (info->port.flags & ASYNC_SAK) - do_SAK(tty); - } else if (err_buf->data[i] & 0x02) - flag = TTY_FRAME; - else if (err_buf->data[i] & 0x01) - flag = TTY_PARITY; - tty_insert_flip_char(tty, pio_buf->data[i], flag); - } - } - - tty_schedule_flip(tty); - - info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; - release_pio_buffer(pio_buf); - release_pio_buffer(err_buf); -} - -static void program_isa_dma(int dma, int dir, unsigned long addr, int len) -{ - unsigned long flags; - - flags = claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - set_dma_mode(dma, dir); - set_dma_addr(dma, addr); - set_dma_count(dma, len); - enable_dma(dma); - release_dma_lock(flags); -} - -static void receive_chars_dma(struct esp_struct *info, int num_bytes) -{ - info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; - dma_bytes = num_bytes; - info->stat_flags |= ESP_STAT_DMA_RX; - - program_isa_dma(dma, DMA_MODE_READ, isa_virt_to_bus(dma_buffer), - dma_bytes); - serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX); -} - -static inline void receive_chars_dma_done(struct esp_struct *info, - int status) -{ - struct tty_struct *tty = info->port.tty; - int num_bytes; - unsigned long flags; - - flags = claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - - info->stat_flags &= ~ESP_STAT_DMA_RX; - num_bytes = dma_bytes - get_dma_residue(dma); - release_dma_lock(flags); - - info->icount.rx += num_bytes; - - if (num_bytes > 0) { - tty_insert_flip_string(tty, dma_buffer, num_bytes - 1); - - status &= (0x1c & info->read_status_mask); - - /* Is the status significant or do we throw the last byte ? */ - if (!(status & info->ignore_status_mask)) { - int statflag = 0; - - if (status & 0x10) { - statflag = TTY_BREAK; - (info->icount.brk)++; - if (info->port.flags & ASYNC_SAK) - do_SAK(tty); - } else if (status & 0x08) { - statflag = TTY_FRAME; - info->icount.frame++; - } else if (status & 0x04) { - statflag = TTY_PARITY; - info->icount.parity++; - } - tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], - statflag); - } - tty_schedule_flip(tty); - } - - if (dma_bytes != num_bytes) { - num_bytes = dma_bytes - num_bytes; - dma_bytes = 0; - receive_chars_dma(info, num_bytes); - } else - dma_bytes = 0; -} - -/* Caller must hold info->lock */ - -static inline void transmit_chars_pio(struct esp_struct *info, - int space_avail) -{ - int i; - struct esp_pio_buffer *pio_buf; - - pio_buf = get_pio_buffer(); - - if (!pio_buf) - return; - - while (space_avail && info->xmit_cnt) { - if (info->xmit_tail + space_avail <= ESP_XMIT_SIZE) { - memcpy(pio_buf->data, - &(info->xmit_buf[info->xmit_tail]), - space_avail); - } else { - i = ESP_XMIT_SIZE - info->xmit_tail; - memcpy(pio_buf->data, - &(info->xmit_buf[info->xmit_tail]), i); - memcpy(&(pio_buf->data[i]), info->xmit_buf, - space_avail - i); - } - - info->xmit_cnt -= space_avail; - info->xmit_tail = (info->xmit_tail + space_avail) & - (ESP_XMIT_SIZE - 1); - - for (i = 0; i < space_avail - 1; i += 2) { - outw(*((unsigned short *)(pio_buf->data + i)), - info->io_port + UART_ESI_TX); - } - - if (space_avail & 0x0001) - serial_out(info, UART_ESI_TX, - pio_buf->data[space_avail - 1]); - - if (info->xmit_cnt) { - serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); - serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL); - space_avail = serial_in(info, UART_ESI_STAT1) << 8; - space_avail |= serial_in(info, UART_ESI_STAT2); - - if (space_avail > info->xmit_cnt) - space_avail = info->xmit_cnt; - } - } - - if (info->xmit_cnt < WAKEUP_CHARS) { - if (info->port.tty) - tty_wakeup(info->port.tty); - -#ifdef SERIAL_DEBUG_INTR - printk("THRE..."); -#endif - - if (info->xmit_cnt <= 0) { - info->IER &= ~UART_IER_THRI; - serial_out(info, UART_ESI_CMD1, - ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - } - } - - release_pio_buffer(pio_buf); -} - -/* Caller must hold info->lock */ -static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes) -{ - dma_bytes = num_bytes; - - if (info->xmit_tail + dma_bytes <= ESP_XMIT_SIZE) { - memcpy(dma_buffer, &(info->xmit_buf[info->xmit_tail]), - dma_bytes); - } else { - int i = ESP_XMIT_SIZE - info->xmit_tail; - memcpy(dma_buffer, &(info->xmit_buf[info->xmit_tail]), - i); - memcpy(&(dma_buffer[i]), info->xmit_buf, dma_bytes - i); - } - - info->xmit_cnt -= dma_bytes; - info->xmit_tail = (info->xmit_tail + dma_bytes) & (ESP_XMIT_SIZE - 1); - - if (info->xmit_cnt < WAKEUP_CHARS) { - if (info->port.tty) - tty_wakeup(info->port.tty); - -#ifdef SERIAL_DEBUG_INTR - printk("THRE..."); -#endif - - if (info->xmit_cnt <= 0) { - info->IER &= ~UART_IER_THRI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - } - } - - info->stat_flags |= ESP_STAT_DMA_TX; - - program_isa_dma(dma, DMA_MODE_WRITE, isa_virt_to_bus(dma_buffer), - dma_bytes); - serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX); -} - -static inline void transmit_chars_dma_done(struct esp_struct *info) -{ - int num_bytes; - unsigned long flags; - - flags = claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - - num_bytes = dma_bytes - get_dma_residue(dma); - info->icount.tx += dma_bytes; - release_dma_lock(flags); - - if (dma_bytes != num_bytes) { - dma_bytes -= num_bytes; - memmove(dma_buffer, dma_buffer + num_bytes, dma_bytes); - - program_isa_dma(dma, DMA_MODE_WRITE, - isa_virt_to_bus(dma_buffer), dma_bytes); - - serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX); - } else { - dma_bytes = 0; - info->stat_flags &= ~ESP_STAT_DMA_TX; - } -} - -static void check_modem_status(struct esp_struct *info) -{ - int status; - - serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); - status = serial_in(info, UART_ESI_STAT2); - - if (status & UART_MSR_ANY_DELTA) { - /* update input line counters */ - if (status & UART_MSR_TERI) - info->icount.rng++; - if (status & UART_MSR_DDSR) - info->icount.dsr++; - if (status & UART_MSR_DDCD) - info->icount.dcd++; - if (status & UART_MSR_DCTS) - info->icount.cts++; - wake_up_interruptible(&info->port.delta_msr_wait); - } - - if ((info->port.flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) { -#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR)) - printk("ttys%d CD now %s...", info->line, - (status & UART_MSR_DCD) ? "on" : "off"); -#endif - if (status & UART_MSR_DCD) - wake_up_interruptible(&info->port.open_wait); - else { -#ifdef SERIAL_DEBUG_OPEN - printk("scheduling hangup..."); -#endif - tty_hangup(info->port.tty); - } - } -} - -/* - * This is the serial driver's interrupt routine - */ -static irqreturn_t rs_interrupt_single(int irq, void *dev_id) -{ - struct esp_struct *info; - unsigned err_status; - unsigned int scratch; - -#ifdef SERIAL_DEBUG_INTR - printk("rs_interrupt_single(%d)...", irq); -#endif - info = (struct esp_struct *)dev_id; - err_status = 0; - scratch = serial_in(info, UART_ESI_SID); - - spin_lock(&info->lock); - - if (!info->port.tty) { - spin_unlock(&info->lock); - return IRQ_NONE; - } - - if (scratch & 0x04) { /* error */ - serial_out(info, UART_ESI_CMD1, ESI_GET_ERR_STAT); - err_status = serial_in(info, UART_ESI_STAT1); - serial_in(info, UART_ESI_STAT2); - - if (err_status & 0x01) - info->stat_flags |= ESP_STAT_RX_TIMEOUT; - - if (err_status & 0x20) /* UART status */ - check_modem_status(info); - - if (err_status & 0x80) /* Start break */ - wake_up_interruptible(&info->break_wait); - } - - if ((scratch & 0x88) || /* DMA completed or timed out */ - (err_status & 0x1c) /* receive error */) { - if (info->stat_flags & ESP_STAT_DMA_RX) - receive_chars_dma_done(info, err_status); - else if (info->stat_flags & ESP_STAT_DMA_TX) - transmit_chars_dma_done(info); - } - - if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) && - ((scratch & 0x01) || (info->stat_flags & ESP_STAT_RX_TIMEOUT)) && - (info->IER & UART_IER_RDI)) { - int num_bytes; - - serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); - serial_out(info, UART_ESI_CMD1, ESI_GET_RX_AVAIL); - num_bytes = serial_in(info, UART_ESI_STAT1) << 8; - num_bytes |= serial_in(info, UART_ESI_STAT2); - - num_bytes = tty_buffer_request_room(info->port.tty, num_bytes); - - if (num_bytes) { - if (dma_bytes || - (info->stat_flags & ESP_STAT_USE_PIO) || - (num_bytes <= info->config.pio_threshold)) - receive_chars_pio(info, num_bytes); - else - receive_chars_dma(info, num_bytes); - } - } - - if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) && - (scratch & 0x02) && (info->IER & UART_IER_THRI)) { - if ((info->xmit_cnt <= 0) || info->port.tty->stopped) { - info->IER &= ~UART_IER_THRI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - } else { - int num_bytes; - - serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); - serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL); - num_bytes = serial_in(info, UART_ESI_STAT1) << 8; - num_bytes |= serial_in(info, UART_ESI_STAT2); - - if (num_bytes > info->xmit_cnt) - num_bytes = info->xmit_cnt; - - if (num_bytes) { - if (dma_bytes || - (info->stat_flags & ESP_STAT_USE_PIO) || - (num_bytes <= info->config.pio_threshold)) - transmit_chars_pio(info, num_bytes); - else - transmit_chars_dma(info, num_bytes); - } - } - } - - info->last_active = jiffies; - -#ifdef SERIAL_DEBUG_INTR - printk("end.\n"); -#endif - spin_unlock(&info->lock); - return IRQ_HANDLED; -} - -/* - * ------------------------------------------------------------------- - * Here ends the serial interrupt routines. - * ------------------------------------------------------------------- - */ - -/* - * --------------------------------------------------------------- - * Low level utility subroutines for the serial driver: routines to - * figure out the appropriate timeout for an interrupt chain, routines - * to initialize and startup a serial port, and routines to shutdown a - * serial port. Useful stuff like that. - * - * Caller should hold lock - * --------------------------------------------------------------- - */ - -static void esp_basic_init(struct esp_struct *info) -{ - /* put ESPC in enhanced mode */ - serial_out(info, UART_ESI_CMD1, ESI_SET_MODE); - - if (info->stat_flags & ESP_STAT_NEVER_DMA) - serial_out(info, UART_ESI_CMD2, 0x01); - else - serial_out(info, UART_ESI_CMD2, 0x31); - - /* disable interrupts for now */ - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, 0x00); - - /* set interrupt and DMA channel */ - serial_out(info, UART_ESI_CMD1, ESI_SET_IRQ); - - if (info->stat_flags & ESP_STAT_NEVER_DMA) - serial_out(info, UART_ESI_CMD2, 0x01); - else - serial_out(info, UART_ESI_CMD2, (dma << 4) | 0x01); - - serial_out(info, UART_ESI_CMD1, ESI_SET_ENH_IRQ); - - if (info->line % 8) /* secondary port */ - serial_out(info, UART_ESI_CMD2, 0x0d); /* shared */ - else if (info->irq == 9) - serial_out(info, UART_ESI_CMD2, 0x02); - else - serial_out(info, UART_ESI_CMD2, info->irq); - - /* set error status mask (check this) */ - serial_out(info, UART_ESI_CMD1, ESI_SET_ERR_MASK); - - if (info->stat_flags & ESP_STAT_NEVER_DMA) - serial_out(info, UART_ESI_CMD2, 0xa1); - else - serial_out(info, UART_ESI_CMD2, 0xbd); - - serial_out(info, UART_ESI_CMD2, 0x00); - - /* set DMA timeout */ - serial_out(info, UART_ESI_CMD1, ESI_SET_DMA_TMOUT); - serial_out(info, UART_ESI_CMD2, 0xff); - - /* set FIFO trigger levels */ - serial_out(info, UART_ESI_CMD1, ESI_SET_TRIGGER); - serial_out(info, UART_ESI_CMD2, info->config.rx_trigger >> 8); - serial_out(info, UART_ESI_CMD2, info->config.rx_trigger); - serial_out(info, UART_ESI_CMD2, info->config.tx_trigger >> 8); - serial_out(info, UART_ESI_CMD2, info->config.tx_trigger); - - /* Set clock scaling and wait states */ - serial_out(info, UART_ESI_CMD1, ESI_SET_PRESCALAR); - serial_out(info, UART_ESI_CMD2, 0x04 | ESPC_SCALE); - - /* set reinterrupt pacing */ - serial_out(info, UART_ESI_CMD1, ESI_SET_REINTR); - serial_out(info, UART_ESI_CMD2, 0xff); -} - -static int startup(struct esp_struct *info) -{ - unsigned long flags; - int retval = 0; - unsigned int num_chars; - - spin_lock_irqsave(&info->lock, flags); - - if (info->port.flags & ASYNC_INITIALIZED) - goto out; - - if (!info->xmit_buf) { - info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_ATOMIC); - retval = -ENOMEM; - if (!info->xmit_buf) - goto out; - } - -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "starting up ttys%d (irq %d)...", - info->line, info->irq); -#endif - - /* Flush the RX buffer. Using the ESI flush command may cause */ - /* wild interrupts, so read all the data instead. */ - - serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); - serial_out(info, UART_ESI_CMD1, ESI_GET_RX_AVAIL); - num_chars = serial_in(info, UART_ESI_STAT1) << 8; - num_chars |= serial_in(info, UART_ESI_STAT2); - - while (num_chars > 1) { - inw(info->io_port + UART_ESI_RX); - num_chars -= 2; - } - - if (num_chars) - serial_in(info, UART_ESI_RX); - - /* set receive character timeout */ - serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT); - serial_out(info, UART_ESI_CMD2, info->config.rx_timeout); - - /* clear all flags except the "never DMA" flag */ - info->stat_flags &= ESP_STAT_NEVER_DMA; - - if (info->stat_flags & ESP_STAT_NEVER_DMA) - info->stat_flags |= ESP_STAT_USE_PIO; - - spin_unlock_irqrestore(&info->lock, flags); - - /* - * Allocate the IRQ - */ - - retval = request_irq(info->irq, rs_interrupt_single, IRQF_SHARED, - "esp serial", info); - - if (retval) { - if (capable(CAP_SYS_ADMIN)) { - if (info->port.tty) - set_bit(TTY_IO_ERROR, - &info->port.tty->flags); - retval = 0; - } - goto out_unlocked; - } - - if (!(info->stat_flags & ESP_STAT_USE_PIO) && !dma_buffer) { - dma_buffer = (char *)__get_dma_pages( - GFP_KERNEL, get_order(DMA_BUFFER_SZ)); - - /* use PIO mode if DMA buf/chan cannot be allocated */ - if (!dma_buffer) - info->stat_flags |= ESP_STAT_USE_PIO; - else if (request_dma(dma, "esp serial")) { - free_pages((unsigned long)dma_buffer, - get_order(DMA_BUFFER_SZ)); - dma_buffer = NULL; - info->stat_flags |= ESP_STAT_USE_PIO; - } - - } - - info->MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2; - - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - serial_out(info, UART_ESI_CMD2, info->MCR); - - /* - * Finally, enable interrupts - */ - /* info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI; */ - info->IER = UART_IER_RLSI | UART_IER_RDI | UART_IER_DMA_TMOUT | - UART_IER_DMA_TC; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - - if (info->port.tty) - clear_bit(TTY_IO_ERROR, &info->port.tty->flags); - info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; - spin_unlock_irqrestore(&info->lock, flags); - - /* - * Set up the tty->alt_speed kludge - */ - if (info->port.tty) { - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) - info->port.tty->alt_speed = 57600; - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) - info->port.tty->alt_speed = 115200; - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) - info->port.tty->alt_speed = 230400; - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) - info->port.tty->alt_speed = 460800; - } - - /* - * set the speed of the serial port - */ - change_speed(info); - info->port.flags |= ASYNC_INITIALIZED; - return 0; - -out: - spin_unlock_irqrestore(&info->lock, flags); -out_unlocked: - return retval; -} - -/* - * This routine will shutdown a serial port; interrupts are disabled, and - * DTR is dropped if the hangup on close termio flag is on. - */ -static void shutdown(struct esp_struct *info) -{ - unsigned long flags, f; - - if (!(info->port.flags & ASYNC_INITIALIZED)) - return; - -#ifdef SERIAL_DEBUG_OPEN - printk("Shutting down serial port %d (irq %d)....", info->line, - info->irq); -#endif - - spin_lock_irqsave(&info->lock, flags); - /* - * clear delta_msr_wait queue to avoid mem leaks: we may free the irq - * here so the queue might never be waken up - */ - wake_up_interruptible(&info->port.delta_msr_wait); - wake_up_interruptible(&info->break_wait); - - /* stop a DMA transfer on the port being closed */ - /* DMA lock is higher priority always */ - if (info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) { - f = claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - release_dma_lock(f); - - dma_bytes = 0; - } - - /* - * Free the IRQ - */ - free_irq(info->irq, info); - - if (dma_buffer) { - struct esp_struct *current_port = ports; - - while (current_port) { - if ((current_port != info) && - (current_port->port.flags & ASYNC_INITIALIZED)) - break; - - current_port = current_port->next_port; - } - - if (!current_port) { - free_dma(dma); - free_pages((unsigned long)dma_buffer, - get_order(DMA_BUFFER_SZ)); - dma_buffer = NULL; - } - } - - if (info->xmit_buf) { - free_page((unsigned long) info->xmit_buf); - info->xmit_buf = NULL; - } - - info->IER = 0; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, 0x00); - - if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) - info->MCR &= ~(UART_MCR_DTR|UART_MCR_RTS); - - info->MCR &= ~UART_MCR_OUT2; - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - serial_out(info, UART_ESI_CMD2, info->MCR); - - if (info->port.tty) - set_bit(TTY_IO_ERROR, &info->port.tty->flags); - - info->port.flags &= ~ASYNC_INITIALIZED; - spin_unlock_irqrestore(&info->lock, flags); -} - -/* - * This routine is called to set the UART divisor registers to match - * the specified baud rate for a serial port. - */ -static void change_speed(struct esp_struct *info) -{ - unsigned short port; - int quot = 0; - unsigned cflag, cval; - int baud, bits; - unsigned char flow1 = 0, flow2 = 0; - unsigned long flags; - - if (!info->port.tty || !info->port.tty->termios) - return; - cflag = info->port.tty->termios->c_cflag; - port = info->io_port; - - /* byte size and parity */ - switch (cflag & CSIZE) { - case CS5: cval = 0x00; bits = 7; break; - case CS6: cval = 0x01; bits = 8; break; - case CS7: cval = 0x02; bits = 9; break; - case CS8: cval = 0x03; bits = 10; break; - default: cval = 0x00; bits = 7; break; - } - if (cflag & CSTOPB) { - cval |= 0x04; - bits++; - } - if (cflag & PARENB) { - cval |= UART_LCR_PARITY; - bits++; - } - if (!(cflag & PARODD)) - cval |= UART_LCR_EPAR; -#ifdef CMSPAR - if (cflag & CMSPAR) - cval |= UART_LCR_SPAR; -#endif - baud = tty_get_baud_rate(info->port.tty); - if (baud == 38400 && - ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) - quot = info->custom_divisor; - else { - if (baud == 134) /* Special case since 134 is really 134.5 */ - quot = (2*BASE_BAUD / 269); - else if (baud) - quot = BASE_BAUD / baud; - } - /* If the quotient is ever zero, default to 9600 bps */ - if (!quot) - quot = BASE_BAUD / 9600; - - if (baud) { - /* Actual rate */ - baud = BASE_BAUD/quot; - tty_encode_baud_rate(info->port.tty, baud, baud); - } - info->timeout = ((1024 * HZ * bits * quot) / BASE_BAUD) + (HZ / 50); - - /* CTS flow control flag and modem status interrupts */ - /* info->IER &= ~UART_IER_MSI; */ - if (cflag & CRTSCTS) { - info->port.flags |= ASYNC_CTS_FLOW; - /* info->IER |= UART_IER_MSI; */ - flow1 = 0x04; - flow2 = 0x10; - } else - info->port.flags &= ~ASYNC_CTS_FLOW; - if (cflag & CLOCAL) - info->port.flags &= ~ASYNC_CHECK_CD; - else - info->port.flags |= ASYNC_CHECK_CD; - - /* - * Set up parity check flag - */ - info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; - if (I_INPCK(info->port.tty)) - info->read_status_mask |= UART_LSR_FE | UART_LSR_PE; - if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) - info->read_status_mask |= UART_LSR_BI; - - info->ignore_status_mask = 0; -#if 0 - /* This should be safe, but for some broken bits of hardware... */ - if (I_IGNPAR(info->port.tty)) { - info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; - info->read_status_mask |= UART_LSR_PE | UART_LSR_FE; - } -#endif - if (I_IGNBRK(info->port.tty)) { - info->ignore_status_mask |= UART_LSR_BI; - info->read_status_mask |= UART_LSR_BI; - /* - * If we're ignore parity and break indicators, ignore - * overruns too. (For real raw support). - */ - if (I_IGNPAR(info->port.tty)) { - info->ignore_status_mask |= UART_LSR_OE | \ - UART_LSR_PE | UART_LSR_FE; - info->read_status_mask |= UART_LSR_OE | \ - UART_LSR_PE | UART_LSR_FE; - } - } - - if (I_IXOFF(info->port.tty)) - flow1 |= 0x81; - - spin_lock_irqsave(&info->lock, flags); - /* set baud */ - serial_out(info, UART_ESI_CMD1, ESI_SET_BAUD); - serial_out(info, UART_ESI_CMD2, quot >> 8); - serial_out(info, UART_ESI_CMD2, quot & 0xff); - - /* set data bits, parity, etc. */ - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_LCR); - serial_out(info, UART_ESI_CMD2, cval); - - /* Enable flow control */ - serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_CNTL); - serial_out(info, UART_ESI_CMD2, flow1); - serial_out(info, UART_ESI_CMD2, flow2); - - /* set flow control characters (XON/XOFF only) */ - if (I_IXOFF(info->port.tty)) { - serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_CHARS); - serial_out(info, UART_ESI_CMD2, START_CHAR(info->port.tty)); - serial_out(info, UART_ESI_CMD2, STOP_CHAR(info->port.tty)); - serial_out(info, UART_ESI_CMD2, 0x10); - serial_out(info, UART_ESI_CMD2, 0x21); - switch (cflag & CSIZE) { - case CS5: - serial_out(info, UART_ESI_CMD2, 0x1f); - break; - case CS6: - serial_out(info, UART_ESI_CMD2, 0x3f); - break; - case CS7: - case CS8: - serial_out(info, UART_ESI_CMD2, 0x7f); - break; - default: - serial_out(info, UART_ESI_CMD2, 0xff); - break; - } - } - - /* Set high/low water */ - serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_LVL); - serial_out(info, UART_ESI_CMD2, info->config.flow_off >> 8); - serial_out(info, UART_ESI_CMD2, info->config.flow_off); - serial_out(info, UART_ESI_CMD2, info->config.flow_on >> 8); - serial_out(info, UART_ESI_CMD2, info->config.flow_on); - - spin_unlock_irqrestore(&info->lock, flags); -} - -static int rs_put_char(struct tty_struct *tty, unsigned char ch) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - int ret = 0; - - if (serial_paranoia_check(info, tty->name, "rs_put_char")) - return 0; - - if (!info->xmit_buf) - return 0; - - spin_lock_irqsave(&info->lock, flags); - if (info->xmit_cnt < ESP_XMIT_SIZE - 1) { - info->xmit_buf[info->xmit_head++] = ch; - info->xmit_head &= ESP_XMIT_SIZE-1; - info->xmit_cnt++; - ret = 1; - } - spin_unlock_irqrestore(&info->lock, flags); - return ret; -} - -static void rs_flush_chars(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) - return; - - spin_lock_irqsave(&info->lock, flags); - - if (info->xmit_cnt <= 0 || tty->stopped || !info->xmit_buf) - goto out; - - if (!(info->IER & UART_IER_THRI)) { - info->IER |= UART_IER_THRI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - } -out: - spin_unlock_irqrestore(&info->lock, flags); -} - -static int rs_write(struct tty_struct *tty, - const unsigned char *buf, int count) -{ - int c, t, ret = 0; - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "rs_write")) - return 0; - - if (!info->xmit_buf) - return 0; - - while (1) { - /* Thanks to R. Wolff for suggesting how to do this with */ - /* interrupts enabled */ - - c = count; - t = ESP_XMIT_SIZE - info->xmit_cnt - 1; - - if (t < c) - c = t; - - t = ESP_XMIT_SIZE - info->xmit_head; - - if (t < c) - c = t; - - if (c <= 0) - break; - - memcpy(info->xmit_buf + info->xmit_head, buf, c); - - info->xmit_head = (info->xmit_head + c) & (ESP_XMIT_SIZE-1); - info->xmit_cnt += c; - buf += c; - count -= c; - ret += c; - } - - spin_lock_irqsave(&info->lock, flags); - - if (info->xmit_cnt && !tty->stopped && !(info->IER & UART_IER_THRI)) { - info->IER |= UART_IER_THRI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - } - - spin_unlock_irqrestore(&info->lock, flags); - return ret; -} - -static int rs_write_room(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - int ret; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "rs_write_room")) - return 0; - - spin_lock_irqsave(&info->lock, flags); - - ret = ESP_XMIT_SIZE - info->xmit_cnt - 1; - if (ret < 0) - ret = 0; - spin_unlock_irqrestore(&info->lock, flags); - return ret; -} - -static int rs_chars_in_buffer(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - - if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) - return 0; - return info->xmit_cnt; -} - -static void rs_flush_buffer(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) - return; - spin_lock_irqsave(&info->lock, flags); - info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; - spin_unlock_irqrestore(&info->lock, flags); - tty_wakeup(tty); -} - -/* - * ------------------------------------------------------------ - * rs_throttle() - * - * This routine is called by the upper-layer tty layer to signal that - * incoming characters should be throttled. - * ------------------------------------------------------------ - */ -static void rs_throttle(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; -#ifdef SERIAL_DEBUG_THROTTLE - char buf[64]; - - printk("throttle %s: %d....\n", tty_name(tty, buf), - tty_chars_in_buffer(tty)); -#endif - - if (serial_paranoia_check(info, tty->name, "rs_throttle")) - return; - - spin_lock_irqsave(&info->lock, flags); - info->IER &= ~UART_IER_RDI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT); - serial_out(info, UART_ESI_CMD2, 0x00); - spin_unlock_irqrestore(&info->lock, flags); -} - -static void rs_unthrottle(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; -#ifdef SERIAL_DEBUG_THROTTLE - char buf[64]; - - printk(KERN_DEBUG "unthrottle %s: %d....\n", tty_name(tty, buf), - tty_chars_in_buffer(tty)); -#endif - - if (serial_paranoia_check(info, tty->name, "rs_unthrottle")) - return; - - spin_lock_irqsave(&info->lock, flags); - info->IER |= UART_IER_RDI; - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT); - serial_out(info, UART_ESI_CMD2, info->config.rx_timeout); - spin_unlock_irqrestore(&info->lock, flags); -} - -/* - * ------------------------------------------------------------ - * rs_ioctl() and friends - * ------------------------------------------------------------ - */ - -static int get_serial_info(struct esp_struct *info, - struct serial_struct __user *retinfo) -{ - struct serial_struct tmp; - - lock_kernel(); - memset(&tmp, 0, sizeof(tmp)); - tmp.type = PORT_16550A; - tmp.line = info->line; - tmp.port = info->io_port; - tmp.irq = info->irq; - tmp.flags = info->port.flags; - tmp.xmit_fifo_size = 1024; - tmp.baud_base = BASE_BAUD; - tmp.close_delay = info->close_delay; - tmp.closing_wait = info->closing_wait; - tmp.custom_divisor = info->custom_divisor; - tmp.hub6 = 0; - unlock_kernel(); - if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) - return -EFAULT; - return 0; -} - -static int get_esp_config(struct esp_struct *info, - struct hayes_esp_config __user *retinfo) -{ - struct hayes_esp_config tmp; - - if (!retinfo) - return -EFAULT; - - memset(&tmp, 0, sizeof(tmp)); - lock_kernel(); - tmp.rx_timeout = info->config.rx_timeout; - tmp.rx_trigger = info->config.rx_trigger; - tmp.tx_trigger = info->config.tx_trigger; - tmp.flow_off = info->config.flow_off; - tmp.flow_on = info->config.flow_on; - tmp.pio_threshold = info->config.pio_threshold; - tmp.dma_channel = (info->stat_flags & ESP_STAT_NEVER_DMA ? 0 : dma); - unlock_kernel(); - - return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; -} - -static int set_serial_info(struct esp_struct *info, - struct serial_struct __user *new_info) -{ - struct serial_struct new_serial; - struct esp_struct old_info; - unsigned int change_irq; - int retval = 0; - struct esp_struct *current_async; - - if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) - return -EFAULT; - old_info = *info; - - if ((new_serial.type != PORT_16550A) || - (new_serial.hub6) || - (info->io_port != new_serial.port) || - (new_serial.baud_base != BASE_BAUD) || - (new_serial.irq > 15) || - (new_serial.irq < 2) || - (new_serial.irq == 6) || - (new_serial.irq == 8) || - (new_serial.irq == 13)) - return -EINVAL; - - change_irq = new_serial.irq != info->irq; - - if (change_irq && (info->line % 8)) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) { - if (change_irq || - (new_serial.close_delay != info->close_delay) || - ((new_serial.flags & ~ASYNC_USR_MASK) != - (info->port.flags & ~ASYNC_USR_MASK))) - return -EPERM; - info->port.flags = ((info->port.flags & ~ASYNC_USR_MASK) | - (new_serial.flags & ASYNC_USR_MASK)); - info->custom_divisor = new_serial.custom_divisor; - } else { - if (new_serial.irq == 2) - new_serial.irq = 9; - - if (change_irq) { - current_async = ports; - - while (current_async) { - if ((current_async->line >= info->line) && - (current_async->line < (info->line + 8))) { - if (current_async == info) { - if (current_async->port.count > 1) - return -EBUSY; - } else if (current_async->port.count) - return -EBUSY; - } - - current_async = current_async->next_port; - } - } - - /* - * OK, past this point, all the error checking has been done. - * At this point, we start making changes..... - */ - - info->port.flags = ((info->port.flags & ~ASYNC_FLAGS) | - (new_serial.flags & ASYNC_FLAGS)); - info->custom_divisor = new_serial.custom_divisor; - info->close_delay = new_serial.close_delay * HZ/100; - info->closing_wait = new_serial.closing_wait * HZ/100; - - if (change_irq) { - /* - * We need to shutdown the serial port at the old - * port/irq combination. - */ - shutdown(info); - - current_async = ports; - - while (current_async) { - if ((current_async->line >= info->line) && - (current_async->line < (info->line + 8))) - current_async->irq = new_serial.irq; - - current_async = current_async->next_port; - } - - serial_out(info, UART_ESI_CMD1, ESI_SET_ENH_IRQ); - if (info->irq == 9) - serial_out(info, UART_ESI_CMD2, 0x02); - else - serial_out(info, UART_ESI_CMD2, info->irq); - } - } - - if (info->port.flags & ASYNC_INITIALIZED) { - if (((old_info.port.flags & ASYNC_SPD_MASK) != - (info->port.flags & ASYNC_SPD_MASK)) || - (old_info.custom_divisor != info->custom_divisor)) { - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) - info->port.tty->alt_speed = 57600; - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) - info->port.tty->alt_speed = 115200; - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) - info->port.tty->alt_speed = 230400; - if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) - info->port.tty->alt_speed = 460800; - change_speed(info); - } - } else - retval = startup(info); - - return retval; -} - -static int set_esp_config(struct esp_struct *info, - struct hayes_esp_config __user *new_info) -{ - struct hayes_esp_config new_config; - unsigned int change_dma; - int retval = 0; - struct esp_struct *current_async; - unsigned long flags; - - /* Perhaps a non-sysadmin user should be able to do some of these */ - /* operations. I haven't decided yet. */ - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (copy_from_user(&new_config, new_info, sizeof(new_config))) - return -EFAULT; - - if ((new_config.flow_on >= new_config.flow_off) || - (new_config.rx_trigger < 1) || - (new_config.tx_trigger < 1) || - (new_config.flow_off < 1) || - (new_config.flow_on < 1) || - (new_config.rx_trigger > 1023) || - (new_config.tx_trigger > 1023) || - (new_config.flow_off > 1023) || - (new_config.flow_on > 1023) || - (new_config.pio_threshold < 0) || - (new_config.pio_threshold > 1024)) - return -EINVAL; - - if ((new_config.dma_channel != 1) && (new_config.dma_channel != 3)) - new_config.dma_channel = 0; - - if (info->stat_flags & ESP_STAT_NEVER_DMA) - change_dma = new_config.dma_channel; - else - change_dma = (new_config.dma_channel != dma); - - if (change_dma) { - if (new_config.dma_channel) { - /* PIO mode to DMA mode transition OR */ - /* change current DMA channel */ - current_async = ports; - - while (current_async) { - if (current_async == info) { - if (current_async->port.count > 1) - return -EBUSY; - } else if (current_async->port.count) - return -EBUSY; - - current_async = current_async->next_port; - } - - shutdown(info); - dma = new_config.dma_channel; - info->stat_flags &= ~ESP_STAT_NEVER_DMA; - - /* all ports must use the same DMA channel */ - - spin_lock_irqsave(&info->lock, flags); - current_async = ports; - - while (current_async) { - esp_basic_init(current_async); - current_async = current_async->next_port; - } - spin_unlock_irqrestore(&info->lock, flags); - } else { - /* DMA mode to PIO mode only */ - if (info->port.count > 1) - return -EBUSY; - - shutdown(info); - spin_lock_irqsave(&info->lock, flags); - info->stat_flags |= ESP_STAT_NEVER_DMA; - esp_basic_init(info); - spin_unlock_irqrestore(&info->lock, flags); - } - } - - info->config.pio_threshold = new_config.pio_threshold; - - if ((new_config.flow_off != info->config.flow_off) || - (new_config.flow_on != info->config.flow_on)) { - info->config.flow_off = new_config.flow_off; - info->config.flow_on = new_config.flow_on; - - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_LVL); - serial_out(info, UART_ESI_CMD2, new_config.flow_off >> 8); - serial_out(info, UART_ESI_CMD2, new_config.flow_off); - serial_out(info, UART_ESI_CMD2, new_config.flow_on >> 8); - serial_out(info, UART_ESI_CMD2, new_config.flow_on); - spin_unlock_irqrestore(&info->lock, flags); - } - - if ((new_config.rx_trigger != info->config.rx_trigger) || - (new_config.tx_trigger != info->config.tx_trigger)) { - info->config.rx_trigger = new_config.rx_trigger; - info->config.tx_trigger = new_config.tx_trigger; - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_SET_TRIGGER); - serial_out(info, UART_ESI_CMD2, - new_config.rx_trigger >> 8); - serial_out(info, UART_ESI_CMD2, new_config.rx_trigger); - serial_out(info, UART_ESI_CMD2, - new_config.tx_trigger >> 8); - serial_out(info, UART_ESI_CMD2, new_config.tx_trigger); - spin_unlock_irqrestore(&info->lock, flags); - } - - if (new_config.rx_timeout != info->config.rx_timeout) { - info->config.rx_timeout = new_config.rx_timeout; - spin_lock_irqsave(&info->lock, flags); - - if (info->IER & UART_IER_RDI) { - serial_out(info, UART_ESI_CMD1, - ESI_SET_RX_TIMEOUT); - serial_out(info, UART_ESI_CMD2, - new_config.rx_timeout); - } - - spin_unlock_irqrestore(&info->lock, flags); - } - - if (!(info->port.flags & ASYNC_INITIALIZED)) - retval = startup(info); - - return retval; -} - -/* - * get_lsr_info - get line status register info - * - * Purpose: Let user call ioctl() to get info when the UART physically - * is emptied. On bus types like RS485, the transmitter must - * release the bus after transmitting. This must be done when - * the transmit shift register is empty, not be done when the - * transmit holding register is empty. This functionality - * allows an RS485 driver to be written in user space. - */ -static int get_lsr_info(struct esp_struct *info, unsigned int __user *value) -{ - unsigned char status; - unsigned int result; - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); - status = serial_in(info, UART_ESI_STAT1); - spin_unlock_irqrestore(&info->lock, flags); - result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); - return put_user(result, value); -} - - -static int esp_tiocmget(struct tty_struct *tty, struct file *file) -{ - struct esp_struct *info = tty->driver_data; - unsigned char control, status; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, __func__)) - return -ENODEV; - if (tty->flags & (1 << TTY_IO_ERROR)) - return -EIO; - - control = info->MCR; - - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); - status = serial_in(info, UART_ESI_STAT2); - spin_unlock_irqrestore(&info->lock, flags); - - return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) - | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) - | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) - | ((status & UART_MSR_RI) ? TIOCM_RNG : 0) - | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) - | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0); -} - -static int esp_tiocmset(struct tty_struct *tty, struct file *file, - unsigned int set, unsigned int clear) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, __func__)) - return -ENODEV; - if (tty->flags & (1 << TTY_IO_ERROR)) - return -EIO; - - spin_lock_irqsave(&info->lock, flags); - - if (set & TIOCM_RTS) - info->MCR |= UART_MCR_RTS; - if (set & TIOCM_DTR) - info->MCR |= UART_MCR_DTR; - - if (clear & TIOCM_RTS) - info->MCR &= ~UART_MCR_RTS; - if (clear & TIOCM_DTR) - info->MCR &= ~UART_MCR_DTR; - - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - serial_out(info, UART_ESI_CMD2, info->MCR); - - spin_unlock_irqrestore(&info->lock, flags); - return 0; -} - -/* - * rs_break() --- routine which turns the break handling on or off - */ -static int esp_break(struct tty_struct *tty, int break_state) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "esp_break")) - return -EINVAL; - - if (break_state == -1) { - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_ISSUE_BREAK); - serial_out(info, UART_ESI_CMD2, 0x01); - spin_unlock_irqrestore(&info->lock, flags); - - /* FIXME - new style wait needed here */ - interruptible_sleep_on(&info->break_wait); - } else { - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_ISSUE_BREAK); - serial_out(info, UART_ESI_CMD2, 0x00); - spin_unlock_irqrestore(&info->lock, flags); - } - return 0; -} - -static int rs_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct esp_struct *info = tty->driver_data; - struct async_icount cprev, cnow; /* kernel counter temps */ - struct serial_icounter_struct __user *p_cuser; /* user space */ - void __user *argp = (void __user *)arg; - unsigned long flags; - int ret; - - if (serial_paranoia_check(info, tty->name, "rs_ioctl")) - return -ENODEV; - - if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && - (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) && - (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT) && - (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT) && - (cmd != TIOCGHAYESESP) && (cmd != TIOCSHAYESESP)) { - if (tty->flags & (1 << TTY_IO_ERROR)) - return -EIO; - } - - switch (cmd) { - case TIOCGSERIAL: - return get_serial_info(info, argp); - case TIOCSSERIAL: - lock_kernel(); - ret = set_serial_info(info, argp); - unlock_kernel(); - return ret; - case TIOCSERGWILD: - return put_user(0L, (unsigned long __user *)argp); - case TIOCSERGETLSR: /* Get line status register */ - return get_lsr_info(info, argp); - case TIOCSERSWILD: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - return 0; - /* - * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change - * - mask passed in arg for lines of interest - * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) - * Caller should use TIOCGICOUNT to see which one it was - */ - case TIOCMIWAIT: - spin_lock_irqsave(&info->lock, flags); - cprev = info->icount; /* note the counters on entry */ - spin_unlock_irqrestore(&info->lock, flags); - while (1) { - /* FIXME: convert to new style wakeup */ - interruptible_sleep_on(&info->port.delta_msr_wait); - /* see if a signal did it */ - if (signal_pending(current)) - return -ERESTARTSYS; - spin_lock_irqsave(&info->lock, flags); - cnow = info->icount; /* atomic copy */ - spin_unlock_irqrestore(&info->lock, flags); - if (cnow.rng == cprev.rng && - cnow.dsr == cprev.dsr && - cnow.dcd == cprev.dcd && - cnow.cts == cprev.cts) - return -EIO; /* no change => error */ - if (((arg & TIOCM_RNG) && - (cnow.rng != cprev.rng)) || - ((arg & TIOCM_DSR) && - (cnow.dsr != cprev.dsr)) || - ((arg & TIOCM_CD) && - (cnow.dcd != cprev.dcd)) || - ((arg & TIOCM_CTS) && - (cnow.cts != cprev.cts))) { - return 0; - } - cprev = cnow; - } - /* NOTREACHED */ - /* - * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) - * Return: write counters to the user passed counter struct - * NB: both 1->0 and 0->1 transitions are counted except for - * RI where only 0->1 is counted. - */ - case TIOCGICOUNT: - spin_lock_irqsave(&info->lock, flags); - cnow = info->icount; - spin_unlock_irqrestore(&info->lock, flags); - p_cuser = argp; - if (put_user(cnow.cts, &p_cuser->cts) || - put_user(cnow.dsr, &p_cuser->dsr) || - put_user(cnow.rng, &p_cuser->rng) || - put_user(cnow.dcd, &p_cuser->dcd)) - return -EFAULT; - return 0; - case TIOCGHAYESESP: - return get_esp_config(info, argp); - case TIOCSHAYESESP: - lock_kernel(); - ret = set_esp_config(info, argp); - unlock_kernel(); - return ret; - default: - return -ENOIOCTLCMD; - } - return 0; -} - -static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - change_speed(info); - - spin_lock_irqsave(&info->lock, flags); - - /* Handle transition to B0 status */ - if ((old_termios->c_cflag & CBAUD) && - !(tty->termios->c_cflag & CBAUD)) { - info->MCR &= ~(UART_MCR_DTR|UART_MCR_RTS); - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - serial_out(info, UART_ESI_CMD2, info->MCR); - } - - /* Handle transition away from B0 status */ - if (!(old_termios->c_cflag & CBAUD) && - (tty->termios->c_cflag & CBAUD)) { - info->MCR |= (UART_MCR_DTR | UART_MCR_RTS); - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - serial_out(info, UART_ESI_CMD2, info->MCR); - } - - spin_unlock_irqrestore(&info->lock, flags); - - /* Handle turning of CRTSCTS */ - if ((old_termios->c_cflag & CRTSCTS) && - !(tty->termios->c_cflag & CRTSCTS)) { - rs_start(tty); - } -} - -/* - * ------------------------------------------------------------ - * rs_close() - * - * This routine is called when the serial port gets closed. First, we - * wait for the last remaining data to be sent. Then, we unlink its - * async structure from the interrupt chain if necessary, and we free - * that IRQ if nothing is left in the chain. - * ------------------------------------------------------------ - */ -static void rs_close(struct tty_struct *tty, struct file *filp) -{ - struct esp_struct *info = tty->driver_data; - unsigned long flags; - - if (!info || serial_paranoia_check(info, tty->name, "rs_close")) - return; - - spin_lock_irqsave(&info->lock, flags); - - if (tty_hung_up_p(filp)) { - DBG_CNT("before DEC-hung"); - goto out; - } - -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "rs_close ttys%d, count = %d\n", - info->line, info->port.count); -#endif - if (tty->count == 1 && info->port.count != 1) { - /* - * Uh, oh. tty->count is 1, which means that the tty - * structure will be freed. Info->count should always - * be one in these conditions. If it's greater than - * one, we've got real problems, since it means the - * serial port won't be shutdown. - */ - printk(KERN_DEBUG "rs_close: bad serial port count; tty->count is 1, info->port.count is %d\n", info->port.count); - info->port.count = 1; - } - if (--info->port.count < 0) { - printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n", - info->line, info->port.count); - info->port.count = 0; - } - if (info->port.count) { - DBG_CNT("before DEC-2"); - goto out; - } - info->port.flags |= ASYNC_CLOSING; - - spin_unlock_irqrestore(&info->lock, flags); - /* - * Now we wait for the transmit buffer to clear; and we notify - * the line discipline to only process XON/XOFF characters. - */ - tty->closing = 1; - if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) - tty_wait_until_sent(tty, info->closing_wait); - /* - * At this point we stop accepting input. To do this, we - * disable the receive line status interrupts, and tell the - * interrupt driver to stop checking the data ready bit in the - * line status register. - */ - /* info->IER &= ~UART_IER_RLSI; */ - info->IER &= ~UART_IER_RDI; - info->read_status_mask &= ~UART_LSR_DR; - if (info->port.flags & ASYNC_INITIALIZED) { - - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); - serial_out(info, UART_ESI_CMD2, info->IER); - - /* disable receive timeout */ - serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT); - serial_out(info, UART_ESI_CMD2, 0x00); - - spin_unlock_irqrestore(&info->lock, flags); - - /* - * Before we drop DTR, make sure the UART transmitter - * has completely drained; this is especially - * important if there is a transmit FIFO! - */ - rs_wait_until_sent(tty, info->timeout); - } - shutdown(info); - rs_flush_buffer(tty); - tty_ldisc_flush(tty); - tty->closing = 0; - info->port.tty = NULL; - - if (info->port.blocked_open) { - if (info->close_delay) - msleep_interruptible(jiffies_to_msecs(info->close_delay)); - wake_up_interruptible(&info->port.open_wait); - } - info->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); - wake_up_interruptible(&info->port.close_wait); - return; - -out: - spin_unlock_irqrestore(&info->lock, flags); -} - -static void rs_wait_until_sent(struct tty_struct *tty, int timeout) -{ - struct esp_struct *info = tty->driver_data; - unsigned long orig_jiffies, char_time; - unsigned long flags; - - if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent")) - return; - - orig_jiffies = jiffies; - char_time = ((info->timeout - HZ / 50) / 1024) / 5; - - if (!char_time) - char_time = 1; - - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); - serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL); - - while ((serial_in(info, UART_ESI_STAT1) != 0x03) || - (serial_in(info, UART_ESI_STAT2) != 0xff)) { - - spin_unlock_irqrestore(&info->lock, flags); - msleep_interruptible(jiffies_to_msecs(char_time)); - - if (signal_pending(current)) - return; - - if (timeout && time_after(jiffies, orig_jiffies + timeout)) - return; - - spin_lock_irqsave(&info->lock, flags); - serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); - serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL); - } - spin_unlock_irqrestore(&info->lock, flags); - set_current_state(TASK_RUNNING); -} - -/* - * esp_hangup() --- called by tty_hangup() when a hangup is signaled. - */ -static void esp_hangup(struct tty_struct *tty) -{ - struct esp_struct *info = tty->driver_data; - - if (serial_paranoia_check(info, tty->name, "esp_hangup")) - return; - - rs_flush_buffer(tty); - shutdown(info); - info->port.count = 0; - info->port.flags &= ~ASYNC_NORMAL_ACTIVE; - info->port.tty = NULL; - wake_up_interruptible(&info->port.open_wait); -} - -static int esp_carrier_raised(struct tty_port *port) -{ - struct esp_struct *info = container_of(port, struct esp_struct, port); - serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); - if (serial_in(info, UART_ESI_STAT2) & UART_MSR_DCD) - return 1; - return 0; -} - -/* - * ------------------------------------------------------------ - * esp_open() and friends - * ------------------------------------------------------------ - */ -static int block_til_ready(struct tty_struct *tty, struct file *filp, - struct esp_struct *info) -{ - DECLARE_WAITQUEUE(wait, current); - int retval; - int do_clocal = 0; - unsigned long flags; - int cd; - struct tty_port *port = &info->port; - - /* - * If the device is in the middle of being closed, then block - * until it's done, and then try again. - */ - if (tty_hung_up_p(filp) || - (port->flags & ASYNC_CLOSING)) { - if (port->flags & ASYNC_CLOSING) - interruptible_sleep_on(&port->close_wait); -#ifdef SERIAL_DO_RESTART - if (port->flags & ASYNC_HUP_NOTIFY) - return -EAGAIN; - else - return -ERESTARTSYS; -#else - return -EAGAIN; -#endif - } - - /* - * If non-blocking mode is set, or the port is not enabled, - * then make the check up front and then exit. - */ - if ((filp->f_flags & O_NONBLOCK) || - (tty->flags & (1 << TTY_IO_ERROR))) { - port->flags |= ASYNC_NORMAL_ACTIVE; - return 0; - } - - if (tty->termios->c_cflag & CLOCAL) - do_clocal = 1; - - /* - * Block waiting for the carrier detect and the line to become - * free (i.e., not in use by the callout). While we are in - * this loop, port->count is dropped by one, so that - * rs_close() knows when to free things. We restore it upon - * exit, either normal or abnormal. - */ - retval = 0; - add_wait_queue(&port->open_wait, &wait); -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "block_til_ready before block: ttys%d, count = %d\n", - info->line, port->count); -#endif - spin_lock_irqsave(&info->lock, flags); - if (!tty_hung_up_p(filp)) - port->count--; - port->blocked_open++; - while (1) { - if ((tty->termios->c_cflag & CBAUD)) { - unsigned int scratch; - - serial_out(info, UART_ESI_CMD1, ESI_READ_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - scratch = serial_in(info, UART_ESI_STAT1); - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - serial_out(info, UART_ESI_CMD2, - scratch | UART_MCR_DTR | UART_MCR_RTS); - } - set_current_state(TASK_INTERRUPTIBLE); - if (tty_hung_up_p(filp) || - !(port->flags & ASYNC_INITIALIZED)) { -#ifdef SERIAL_DO_RESTART - if (port->flags & ASYNC_HUP_NOTIFY) - retval = -EAGAIN; - else - retval = -ERESTARTSYS; -#else - retval = -EAGAIN; -#endif - break; - } - - cd = tty_port_carrier_raised(port); - - if (!(port->flags & ASYNC_CLOSING) && - (do_clocal)) - break; - if (signal_pending(current)) { - retval = -ERESTARTSYS; - break; - } -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "block_til_ready blocking: ttys%d, count = %d\n", - info->line, port->count); -#endif - spin_unlock_irqrestore(&info->lock, flags); - schedule(); - spin_lock_irqsave(&info->lock, flags); - } - set_current_state(TASK_RUNNING); - remove_wait_queue(&port->open_wait, &wait); - if (!tty_hung_up_p(filp)) - port->count++; - port->blocked_open--; - spin_unlock_irqrestore(&info->lock, flags); -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "block_til_ready after blocking: ttys%d, count = %d\n", - info->line, port->count); -#endif - if (retval) - return retval; - port->flags |= ASYNC_NORMAL_ACTIVE; - return 0; -} - -/* - * This routine is called whenever a serial port is opened. It - * enables interrupts for a serial port, linking in its async structure into - * the IRQ chain. It also performs the serial-specific - * initialization for the tty structure. - */ -static int esp_open(struct tty_struct *tty, struct file *filp) -{ - struct esp_struct *info; - int retval, line; - unsigned long flags; - - line = tty->index; - if ((line < 0) || (line >= NR_PORTS)) - return -ENODEV; - - /* find the port in the chain */ - - info = ports; - - while (info && (info->line != line)) - info = info->next_port; - - if (!info) { - serial_paranoia_check(info, tty->name, "esp_open"); - return -ENODEV; - } - -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "esp_open %s, count = %d\n", tty->name, info->port.count); -#endif - spin_lock_irqsave(&info->lock, flags); - info->port.count++; - tty->driver_data = info; - info->port.tty = tty; - - spin_unlock_irqrestore(&info->lock, flags); - - /* - * Start up serial port - */ - retval = startup(info); - if (retval) - return retval; - - retval = block_til_ready(tty, filp, info); - if (retval) { -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "esp_open returning after block_til_ready with %d\n", - retval); -#endif - return retval; - } -#ifdef SERIAL_DEBUG_OPEN - printk(KERN_DEBUG "esp_open %s successful...", tty->name); -#endif - return 0; -} - -/* - * --------------------------------------------------------------------- - * espserial_init() and friends - * - * espserial_init() is called at boot-time to initialize the serial driver. - * --------------------------------------------------------------------- - */ - -/* - * This routine prints out the appropriate serial driver version - * number, and identifies which options were configured into this - * driver. - */ - -static void __init show_serial_version(void) -{ - printk(KERN_INFO "%s version %s (DMA %u)\n", - serial_name, serial_version, dma); -} - -/* - * This routine is called by espserial_init() to initialize a specific serial - * port. - */ -static int autoconfig(struct esp_struct *info) -{ - int port_detected = 0; - unsigned long flags; - - if (!request_region(info->io_port, REGION_SIZE, "esp serial")) - return -EIO; - - spin_lock_irqsave(&info->lock, flags); - /* - * Check for ESP card - */ - - if (serial_in(info, UART_ESI_BASE) == 0xf3) { - serial_out(info, UART_ESI_CMD1, 0x00); - serial_out(info, UART_ESI_CMD1, 0x01); - - if ((serial_in(info, UART_ESI_STAT2) & 0x70) == 0x20) { - port_detected = 1; - - if (!(info->irq)) { - serial_out(info, UART_ESI_CMD1, 0x02); - - if (serial_in(info, UART_ESI_STAT1) & 0x01) - info->irq = 3; - else - info->irq = 4; - } - - - /* put card in enhanced mode */ - /* this prevents access through */ - /* the "old" IO ports */ - esp_basic_init(info); - - /* clear out MCR */ - serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); - serial_out(info, UART_ESI_CMD2, UART_MCR); - serial_out(info, UART_ESI_CMD2, 0x00); - } - } - if (!port_detected) - release_region(info->io_port, REGION_SIZE); - - spin_unlock_irqrestore(&info->lock, flags); - return (port_detected); -} - -static const struct tty_operations esp_ops = { - .open = esp_open, - .close = rs_close, - .write = rs_write, - .put_char = rs_put_char, - .flush_chars = rs_flush_chars, - .write_room = rs_write_room, - .chars_in_buffer = rs_chars_in_buffer, - .flush_buffer = rs_flush_buffer, - .ioctl = rs_ioctl, - .throttle = rs_throttle, - .unthrottle = rs_unthrottle, - .set_termios = rs_set_termios, - .stop = rs_stop, - .start = rs_start, - .hangup = esp_hangup, - .break_ctl = esp_break, - .wait_until_sent = rs_wait_until_sent, - .tiocmget = esp_tiocmget, - .tiocmset = esp_tiocmset, -}; - -static const struct tty_port_operations esp_port_ops = { - .esp_carrier_raised, -}; - -/* - * The serial driver boot-time initialization code! - */ -static int __init espserial_init(void) -{ - int i, offset; - struct esp_struct *info; - struct esp_struct *last_primary = NULL; - int esp[] = { 0x100, 0x140, 0x180, 0x200, 0x240, 0x280, 0x300, 0x380 }; - - esp_driver = alloc_tty_driver(NR_PORTS); - if (!esp_driver) - return -ENOMEM; - - for (i = 0; i < NR_PRIMARY; i++) { - if (irq[i] != 0) { - if ((irq[i] < 2) || (irq[i] > 15) || (irq[i] == 6) || - (irq[i] == 8) || (irq[i] == 13)) - irq[i] = 0; - else if (irq[i] == 2) - irq[i] = 9; - } - } - - if ((dma != 1) && (dma != 3)) - dma = 0; - - if ((rx_trigger < 1) || (rx_trigger > 1023)) - rx_trigger = 768; - - if ((tx_trigger < 1) || (tx_trigger > 1023)) - tx_trigger = 768; - - if ((flow_off < 1) || (flow_off > 1023)) - flow_off = 1016; - - if ((flow_on < 1) || (flow_on > 1023)) - flow_on = 944; - - if ((rx_timeout < 0) || (rx_timeout > 255)) - rx_timeout = 128; - - if (flow_on >= flow_off) - flow_on = flow_off - 1; - - show_serial_version(); - - /* Initialize the tty_driver structure */ - - esp_driver->owner = THIS_MODULE; - esp_driver->name = "ttyP"; - esp_driver->major = ESP_IN_MAJOR; - esp_driver->minor_start = 0; - esp_driver->type = TTY_DRIVER_TYPE_SERIAL; - esp_driver->subtype = SERIAL_TYPE_NORMAL; - esp_driver->init_termios = tty_std_termios; - esp_driver->init_termios.c_cflag = - B9600 | CS8 | CREAD | HUPCL | CLOCAL; - esp_driver->init_termios.c_ispeed = 9600; - esp_driver->init_termios.c_ospeed = 9600; - esp_driver->flags = TTY_DRIVER_REAL_RAW; - tty_set_operations(esp_driver, &esp_ops); - if (tty_register_driver(esp_driver)) { - printk(KERN_ERR "Couldn't register esp serial driver"); - put_tty_driver(esp_driver); - return 1; - } - - info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); - - if (!info) { - printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); - tty_unregister_driver(esp_driver); - put_tty_driver(esp_driver); - return 1; - } - - spin_lock_init(&info->lock); - /* rx_trigger, tx_trigger are needed by autoconfig */ - info->config.rx_trigger = rx_trigger; - info->config.tx_trigger = tx_trigger; - - i = 0; - offset = 0; - - do { - tty_port_init(&info->port); - info->port.ops = &esp_port_ops; - info->io_port = esp[i] + offset; - info->irq = irq[i]; - info->line = (i * 8) + (offset / 8); - - if (!autoconfig(info)) { - i++; - offset = 0; - continue; - } - - info->custom_divisor = (divisor[i] >> (offset / 2)) & 0xf; - info->port.flags = STD_COM_FLAGS; - if (info->custom_divisor) - info->port.flags |= ASYNC_SPD_CUST; - info->magic = ESP_MAGIC; - info->close_delay = 5*HZ/10; - info->closing_wait = 30*HZ; - info->config.rx_timeout = rx_timeout; - info->config.flow_on = flow_on; - info->config.flow_off = flow_off; - info->config.pio_threshold = pio_threshold; - info->next_port = ports; - init_waitqueue_head(&info->break_wait); - ports = info; - printk(KERN_INFO "ttyP%d at 0x%04x (irq = %d) is an ESP ", - info->line, info->io_port, info->irq); - - if (info->line % 8) { - printk("secondary port\n"); - /* 8 port cards can't do DMA */ - info->stat_flags |= ESP_STAT_NEVER_DMA; - - if (last_primary) - last_primary->stat_flags |= ESP_STAT_NEVER_DMA; - } else { - printk("primary port\n"); - last_primary = info; - irq[i] = info->irq; - } - - if (!dma) - info->stat_flags |= ESP_STAT_NEVER_DMA; - - info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); - if (!info) { - printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); - /* allow use of the already detected ports */ - return 0; - } - - spin_lock_init(&info->lock); - /* rx_trigger, tx_trigger are needed by autoconfig */ - info->config.rx_trigger = rx_trigger; - info->config.tx_trigger = tx_trigger; - - if (offset == 56) { - i++; - offset = 0; - } else { - offset += 8; - } - } while (i < NR_PRIMARY); - - /* free the last port memory allocation */ - kfree(info); - - return 0; -} - -static void __exit espserial_exit(void) -{ - int e1; - struct esp_struct *temp_async; - struct esp_pio_buffer *pio_buf; - - e1 = tty_unregister_driver(esp_driver); - if (e1) - printk(KERN_ERR "esp: failed to unregister driver (%d)\n", e1); - put_tty_driver(esp_driver); - - while (ports) { - if (ports->io_port) - release_region(ports->io_port, REGION_SIZE); - temp_async = ports->next_port; - kfree(ports); - ports = temp_async; - } - - if (dma_buffer) - free_pages((unsigned long)dma_buffer, - get_order(DMA_BUFFER_SZ)); - - while (free_pio_buf) { - pio_buf = free_pio_buf->next; - kfree(free_pio_buf); - free_pio_buf = pio_buf; - } -} - -module_init(espserial_init); -module_exit(espserial_exit); diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 5a5385749e16..f72914db2a11 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -214,7 +214,6 @@ unifdef-y += futex.h unifdef-y += fs.h unifdef-y += gameport.h unifdef-y += generic_serial.h -unifdef-y += hayesesp.h unifdef-y += hdlcdrv.h unifdef-y += hdlc.h unifdef-y += hdreg.h diff --git a/include/linux/hayesesp.h b/include/linux/hayesesp.h deleted file mode 100644 index 92b08cfe4a75..000000000000 --- a/include/linux/hayesesp.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef HAYESESP_H -#define HAYESESP_H - -struct hayes_esp_config { - short flow_on; - short flow_off; - short rx_trigger; - short tx_trigger; - short pio_threshold; - unsigned char rx_timeout; - char dma_channel; -}; - -#ifdef __KERNEL__ - -#define ESP_DMA_CHANNEL 0 -#define ESP_RX_TRIGGER 768 -#define ESP_TX_TRIGGER 768 -#define ESP_FLOW_OFF 1016 -#define ESP_FLOW_ON 944 -#define ESP_RX_TMOUT 128 -#define ESP_PIO_THRESHOLD 32 - -#define ESP_IN_MAJOR 57 /* major dev # for dial in */ -#define ESP_OUT_MAJOR 58 /* major dev # for dial out */ -#define ESPC_SCALE 3 -#define UART_ESI_BASE 0x00 -#define UART_ESI_SID 0x01 -#define UART_ESI_RX 0x02 -#define UART_ESI_TX 0x02 -#define UART_ESI_CMD1 0x04 -#define UART_ESI_CMD2 0x05 -#define UART_ESI_STAT1 0x04 -#define UART_ESI_STAT2 0x05 -#define UART_ESI_RWS 0x07 - -#define UART_IER_DMA_TMOUT 0x80 -#define UART_IER_DMA_TC 0x08 - -#define ESI_SET_IRQ 0x04 -#define ESI_SET_DMA_TMOUT 0x05 -#define ESI_SET_SRV_MASK 0x06 -#define ESI_SET_ERR_MASK 0x07 -#define ESI_SET_FLOW_CNTL 0x08 -#define ESI_SET_FLOW_CHARS 0x09 -#define ESI_SET_FLOW_LVL 0x0a -#define ESI_SET_TRIGGER 0x0b -#define ESI_SET_RX_TIMEOUT 0x0c -#define ESI_SET_FLOW_TMOUT 0x0d -#define ESI_WRITE_UART 0x0e -#define ESI_READ_UART 0x0f -#define ESI_SET_MODE 0x10 -#define ESI_GET_ERR_STAT 0x12 -#define ESI_GET_UART_STAT 0x13 -#define ESI_GET_RX_AVAIL 0x14 -#define ESI_GET_TX_AVAIL 0x15 -#define ESI_START_DMA_RX 0x16 -#define ESI_START_DMA_TX 0x17 -#define ESI_ISSUE_BREAK 0x1a -#define ESI_FLUSH_RX 0x1b -#define ESI_FLUSH_TX 0x1c -#define ESI_SET_BAUD 0x1d -#define ESI_SET_ENH_IRQ 0x1f -#define ESI_SET_REINTR 0x20 -#define ESI_SET_PRESCALAR 0x23 -#define ESI_NO_COMMAND 0xff - -#define ESP_STAT_RX_TIMEOUT 0x01 -#define ESP_STAT_DMA_RX 0x02 -#define ESP_STAT_DMA_TX 0x04 -#define ESP_STAT_NEVER_DMA 0x08 -#define ESP_STAT_USE_PIO 0x10 - -#define ESP_MAGIC 0x53ee -#define ESP_XMIT_SIZE 4096 - -struct esp_struct { - int magic; - struct tty_port port; - spinlock_t lock; - int io_port; - int irq; - int read_status_mask; - int ignore_status_mask; - int timeout; - int stat_flags; - int custom_divisor; - int close_delay; - unsigned short closing_wait; - unsigned short closing_wait2; - int IER; /* Interrupt Enable Register */ - int MCR; /* Modem control register */ - unsigned long last_active; - int line; - unsigned char *xmit_buf; - int xmit_head; - int xmit_tail; - int xmit_cnt; - wait_queue_head_t break_wait; - struct async_icount icount; /* kernel counters for the 4 input interrupts */ - struct hayes_esp_config config; /* port configuration */ - struct esp_struct *next_port; /* For the linked list */ -}; - -struct esp_pio_buffer { - unsigned char data[1024]; - struct esp_pio_buffer *next; -}; - -#endif /* __KERNEL__ */ - - -#endif /* ESP_H */ - -- cgit v1.2.3 From 64bc397914265a9ead8d73b63bb31ab3bdd25f67 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 6 Oct 2009 16:06:11 +0100 Subject: tty_port: add "tty_port_open" helper For the moment this just moves the USB logic over and fixes the 'what if we open and hangup at the same time' race noticed by Oliver Neukum. Signed-off-by: Alan Cox Cc: Alan Stern Cc: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_port.c | 36 +++++++++++++++++++++++++++++- drivers/usb/serial/usb-serial.c | 49 +++++++++++++++++------------------------ include/linux/tty.h | 10 ++++++++- 3 files changed, 64 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index c63f3d33914a..b22a61a4fbe5 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -99,10 +99,11 @@ EXPORT_SYMBOL(tty_port_tty_set); static void tty_port_shutdown(struct tty_port *port) { + mutex_lock(&port->mutex); if (port->ops->shutdown && test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) port->ops->shutdown(port); - + mutex_unlock(&port->mutex); } /** @@ -381,3 +382,36 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty, tty_port_tty_set(port, NULL); } EXPORT_SYMBOL(tty_port_close); + +int tty_port_open(struct tty_port *port, struct tty_struct *tty, + struct file *filp) +{ + spin_lock_irq(&port->lock); + if (!tty_hung_up_p(filp)) + ++port->count; + spin_unlock_irq(&port->lock); + tty_port_tty_set(port, tty); + + /* + * Do the device-specific open only if the hardware isn't + * already initialized. Serialize open and shutdown using the + * port mutex. + */ + + mutex_lock(&port->mutex); + + if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) { + if (port->ops->activate) { + int retval = port->ops->activate(port, tty); + if (retval) { + mutex_unlock(&port->mutex); + return retval; + } + } + set_bit(ASYNCB_INITIALIZED, &port->flags); + } + mutex_unlock(&port->mutex); + return tty_port_block_til_ready(port, tty, filp); +} + +EXPORT_SYMBOL(tty_port_open); diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index bd3fa7ff15b1..b0649d92251f 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -247,41 +247,31 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty) return retval; } -static int serial_open(struct tty_struct *tty, struct file *filp) +static int serial_activate(struct tty_port *tport, struct tty_struct *tty) { - struct usb_serial_port *port = tty->driver_data; + struct usb_serial_port *port = + container_of(tport, struct usb_serial_port, port); struct usb_serial *serial = port->serial; int retval; - dbg("%s - port %d", __func__, port->number); - - spin_lock_irq(&port->port.lock); - if (!tty_hung_up_p(filp)) - ++port->port.count; - spin_unlock_irq(&port->port.lock); - tty_port_tty_set(&port->port, tty); + if (mutex_lock_interruptible(&port->mutex)) + return -ERESTARTSYS; + mutex_lock(&serial->disc_mutex); + if (serial->disconnected) + retval = -ENODEV; + else + retval = port->serial->type->open(tty, port); + mutex_unlock(&serial->disc_mutex); + mutex_unlock(&port->mutex); + return retval; +} - /* Do the device-specific open only if the hardware isn't - * already initialized. - */ - if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) { - if (mutex_lock_interruptible(&port->mutex)) - return -ERESTARTSYS; - mutex_lock(&serial->disc_mutex); - if (serial->disconnected) - retval = -ENODEV; - else - retval = port->serial->type->open(tty, port); - mutex_unlock(&serial->disc_mutex); - mutex_unlock(&port->mutex); - if (retval) - return retval; - set_bit(ASYNCB_INITIALIZED, &port->port.flags); - } +static int serial_open(struct tty_struct *tty, struct file *filp) +{ + struct usb_serial_port *port = tty->driver_data; - /* Now do the correct tty layer semantics */ - retval = tty_port_block_til_ready(&port->port, tty, filp); - return retval; + dbg("%s - port %d", __func__, port->number); + return tty_port_open(&port->port, tty, filp); } /** @@ -725,6 +715,7 @@ static void serial_dtr_rts(struct tty_port *port, int on) static const struct tty_port_operations serial_port_ops = { .carrier_raised = serial_carrier_raised, .dtr_rts = serial_dtr_rts, + .activate = serial_activate, }; int usb_serial_probe(struct usb_interface *interface, diff --git a/include/linux/tty.h b/include/linux/tty.h index f0f43d08d8b8..6352ac257fcb 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -190,9 +190,15 @@ struct tty_port_operations { /* Control the DTR line */ void (*dtr_rts)(struct tty_port *port, int raise); /* Called when the last close completes or a hangup finishes - IFF the port was initialized. Do not use to free resources */ + IFF the port was initialized. Do not use to free resources. Called + under the port mutex to serialize against activate/shutdowns */ void (*shutdown)(struct tty_port *port); void (*drop)(struct tty_port *port); + /* Called under the port mutex from tty_port_open, serialized using + the port mutex */ + /* FIXME: long term getting the tty argument *out* of this would be + good for consoles */ + int (*activate)(struct tty_port *port, struct tty_struct *tty); }; struct tty_port { @@ -467,6 +473,8 @@ extern int tty_port_close_start(struct tty_port *port, extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); extern void tty_port_close(struct tty_port *port, struct tty_struct *tty, struct file *filp); +extern int tty_port_open(struct tty_port *port, + struct tty_struct *tty, struct file *filp); extern inline int tty_port_users(struct tty_port *port) { return port->count + port->blocked_open; -- cgit v1.2.3 From 82fc5943430e3cbf15033ed4186a73f90906345d Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 6 Oct 2009 16:06:46 +0100 Subject: usb_serial: Kill port mutex The tty port has a port mutex used for all the port related locking so we don't need the one in the USB serial layer any more. Signed-off-by: Alan Cox Cc: Alan Stern Cc: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/opticon.c | 4 ++-- drivers/usb/serial/usb-serial.c | 1 - include/linux/usb/serial.h | 3 --- 3 files changed, 2 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 80f59b6350cb..c03fdc0242dd 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -501,12 +501,12 @@ static int opticon_resume(struct usb_interface *intf) struct usb_serial_port *port = serial->port[0]; int result; - mutex_lock(&port->mutex); + mutex_lock(&port->port.mutex); if (port->port.count) result = usb_submit_urb(priv->bulk_read_urb, GFP_NOIO); else result = 0; - mutex_unlock(&port->mutex); + mutex_unlock(&port->port.mutex); return result; } diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 829a46684e1d..4543f359be75 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -897,7 +897,6 @@ int usb_serial_probe(struct usb_interface *interface, spin_lock_init(&port->lock); /* Keep this for private driver use for the moment but should probably go away */ - mutex_init(&port->mutex); INIT_WORK(&port->work, usb_serial_port_work); serial->port[i] = port; port->dev.parent = &interface->dev; diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index ce911ebf91e8..acf6e457c04b 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -39,8 +39,6 @@ enum port_dev_state { * @serial: pointer back to the struct usb_serial owner of this port. * @port: pointer to the corresponding tty_port for this port. * @lock: spinlock to grab when updating portions of this structure. - * @mutex: mutex used to synchronize serial_open() and serial_close() - * access for this port. * @number: the number of the port (the minor number). * @interrupt_in_buffer: pointer to the interrupt in buffer for this port. * @interrupt_in_urb: pointer to the interrupt in struct urb for this port. @@ -77,7 +75,6 @@ struct usb_serial_port { struct usb_serial *serial; struct tty_port port; spinlock_t lock; - struct mutex mutex; unsigned char number; unsigned char *interrupt_in_buffer; -- cgit v1.2.3 From 44e4909e453eaa09c7de409fc9ee4ebefd986c1c Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 30 Nov 2009 13:16:41 +0000 Subject: tty: tty_port: Change the buffer allocator locking We want to be able to do this without regard for the activate/own open method being used which causes a problem using port->mutex. Add another mutex for now. Once everything uses port_open to do buffer allocs we can kill it back off Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_port.c | 9 +++++---- include/linux/tty.h | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index dd471d63fae2..3ef644a2e517 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -25,6 +25,7 @@ void tty_port_init(struct tty_port *port) init_waitqueue_head(&port->close_wait); init_waitqueue_head(&port->delta_msr_wait); mutex_init(&port->mutex); + mutex_init(&port->buf_mutex); spin_lock_init(&port->lock); port->close_delay = (50 * HZ) / 100; port->closing_wait = (3000 * HZ) / 100; @@ -34,10 +35,10 @@ EXPORT_SYMBOL(tty_port_init); int tty_port_alloc_xmit_buf(struct tty_port *port) { /* We may sleep in get_zeroed_page() */ - mutex_lock(&port->mutex); + mutex_lock(&port->buf_mutex); if (port->xmit_buf == NULL) port->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); - mutex_unlock(&port->mutex); + mutex_unlock(&port->buf_mutex); if (port->xmit_buf == NULL) return -ENOMEM; return 0; @@ -46,12 +47,12 @@ EXPORT_SYMBOL(tty_port_alloc_xmit_buf); void tty_port_free_xmit_buf(struct tty_port *port) { - mutex_lock(&port->mutex); + mutex_lock(&port->buf_mutex); if (port->xmit_buf != NULL) { free_page((unsigned long)port->xmit_buf); port->xmit_buf = NULL; } - mutex_unlock(&port->mutex); + mutex_unlock(&port->buf_mutex); } EXPORT_SYMBOL(tty_port_free_xmit_buf); diff --git a/include/linux/tty.h b/include/linux/tty.h index 6352ac257fcb..e9269ca1542a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -212,6 +212,7 @@ struct tty_port { wait_queue_head_t delta_msr_wait; /* Modem status change */ unsigned long flags; /* TTY flags ASY_*/ struct mutex mutex; /* Locking */ + struct mutex buf_mutex; /* Buffer alloc lock */ unsigned char *xmit_buf; /* Optional buffer */ unsigned int close_delay; /* Close port delay */ unsigned int closing_wait; /* Delay for output */ -- cgit v1.2.3 From 568aafc627e2978509e8a80c640ba534d1e843cc Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 30 Nov 2009 13:17:14 +0000 Subject: tty: tty_port: Add a kref object to the tty port Users of tty port need a way to refcount ports when hotplugging is involved. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_port.c | 18 ++++++++++++++++++ include/linux/tty.h | 12 ++++++++++++ 2 files changed, 30 insertions(+) (limited to 'include') diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index 84006de2900f..be492dd66437 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -29,6 +29,7 @@ void tty_port_init(struct tty_port *port) spin_lock_init(&port->lock); port->close_delay = (50 * HZ) / 100; port->closing_wait = (3000 * HZ) / 100; + kref_init(&port->kref); } EXPORT_SYMBOL(tty_port_init); @@ -56,6 +57,23 @@ void tty_port_free_xmit_buf(struct tty_port *port) } EXPORT_SYMBOL(tty_port_free_xmit_buf); +static void tty_port_destructor(struct kref *kref) +{ + struct tty_port *port = container_of(kref, struct tty_port, kref); + if (port->xmit_buf) + free_page((unsigned long)port->xmit_buf); + if (port->ops->destruct) + port->ops->destruct(port); + else + kfree(port); +} + +void tty_port_put(struct tty_port *port) +{ + if (port) + kref_put(&port->kref, tty_port_destructor); +} +EXPORT_SYMBOL(tty_port_put); /** * tty_port_tty_get - get a tty reference diff --git a/include/linux/tty.h b/include/linux/tty.h index e9269ca1542a..e6da667ba34d 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -199,6 +199,8 @@ struct tty_port_operations { /* FIXME: long term getting the tty argument *out* of this would be good for consoles */ int (*activate)(struct tty_port *port, struct tty_struct *tty); + /* Called on the final put of a port */ + void (*destruct)(struct tty_port *port); }; struct tty_port { @@ -219,6 +221,7 @@ struct tty_port { int drain_delay; /* Set to zero if no pure time based drain is needed else set to size of fifo */ + struct kref kref; /* Ref counter */ }; /* @@ -461,6 +464,15 @@ extern int tty_write_lock(struct tty_struct *tty, int ndelay); extern void tty_port_init(struct tty_port *port); extern int tty_port_alloc_xmit_buf(struct tty_port *port); extern void tty_port_free_xmit_buf(struct tty_port *port); +extern void tty_port_put(struct tty_port *port); + +extern inline struct tty_port *tty_port_get(struct tty_port *port) +{ + if (port) + kref_get(&port->kref); + return port; +} + extern struct tty_struct *tty_port_tty_get(struct tty_port *port); extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); extern int tty_port_carrier_raised(struct tty_port *port); -- cgit v1.2.3 From 6ed847d8efd08658ece10c9129cd511c8d7452cd Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 30 Nov 2009 13:17:30 +0000 Subject: tty: isicom: sort out the board init logic Split this into two flags - INIT meaning the board is set up and ACTIVE meaning the board has ports open. Remove the broken HUPCL casing and push the counts somewhere sensible. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/isicom.c | 41 +++++++++++------------------------------ include/linux/isicom.h | 1 + 2 files changed, 12 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index e7be3ec6d21c..1e91c302ee42 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c @@ -793,21 +793,19 @@ static inline void isicom_setup_board(struct isi_board *bp) { int channel; struct isi_port *port; - unsigned long flags; - spin_lock_irqsave(&bp->card_lock, flags); - if (bp->status & BOARD_ACTIVE) { - spin_unlock_irqrestore(&bp->card_lock, flags); - return; - } - port = bp->ports; - bp->status |= BOARD_ACTIVE; - for (channel = 0; channel < bp->port_count; channel++, port++) - drop_dtr_rts(port); bp->count++; - spin_unlock_irqrestore(&bp->card_lock, flags); + if (!(bp->status & BOARD_INIT)) { + port = bp->ports; + for (channel = 0; channel < bp->port_count; channel++, port++) + drop_dtr_rts(port); + } + bp->status |= BOARD_ACTIVE | BOARD_INIT; } +/* Activate and thus setup board are protected from races against shutdown + by the tty_port mutex */ + static int isicom_activate(struct tty_port *tport, struct tty_struct *tty) { struct isi_port *port = container_of(tport, struct isi_port, port); @@ -884,19 +882,10 @@ static int isicom_open(struct tty_struct *tty, struct file *filp) /* close et all */ -static inline void isicom_shutdown_board(struct isi_board *bp) -{ - if (bp->status & BOARD_ACTIVE) - bp->status &= ~BOARD_ACTIVE; -} - /* card->lock HAS to be held */ static void isicom_shutdown_port(struct isi_port *port) { struct isi_board *card = port->card; - struct tty_struct *tty; - - tty = tty_port_tty_get(&port->port); tty_port_free_xmit_buf(&port->port); if (--card->count < 0) { @@ -904,17 +893,9 @@ static void isicom_shutdown_port(struct isi_port *port) card->base, card->count); card->count = 0; } - /* last port was closed, shutdown that board too */ - if (tty && C_HUPCL(tty)) { - /* FIXME: this logic is bogus - it's the old logic that was - bogus before but it still wants fixing */ - if (!card->count) { - if (card->status & BOARD_ACTIVE) - card->status &= ~BOARD_ACTIVE; - } - } - tty_kref_put(tty); + if (!card->count) + card->status &= BOARD_ACTIVE; } static void isicom_flush_buffer(struct tty_struct *tty) diff --git a/include/linux/isicom.h b/include/linux/isicom.h index bbd42197298f..b92e05650639 100644 --- a/include/linux/isicom.h +++ b/include/linux/isicom.h @@ -67,6 +67,7 @@ #define FIRMWARE_LOADED 0x0001 #define BOARD_ACTIVE 0x0002 +#define BOARD_INIT 0x0004 /* isi_port status bitmap */ -- cgit v1.2.3 From eeb89d918c2fa2b809e464136bbafdaec2aacb30 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 30 Nov 2009 13:18:29 +0000 Subject: tty: push the BKL down into the handlers a bit Start trying to untangle the remaining BKL mess Updated to fix missing unlock_kernel noted by Dan Carpenter Signed-off-by: Alan "I must be out of my tree" Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/pty.c | 2 +- drivers/char/tty_io.c | 141 +++++++++++++++++++++++++++-------------------- drivers/char/tty_ldisc.c | 13 +++++ include/linux/tty.h | 2 +- 4 files changed, 95 insertions(+), 63 deletions(-) (limited to 'include') diff --git a/drivers/char/pty.c b/drivers/char/pty.c index d86c0bc05c1c..385c44b3034f 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c @@ -659,7 +659,7 @@ static int __ptmx_open(struct inode *inode, struct file *filp) if (!retval) return 0; out1: - tty_release_dev(filp); + tty_release(inode, filp); return retval; out: devpts_kill_index(inode, index); diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 59499ee0fe6a..1e2413093615 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -142,7 +142,6 @@ ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *); static unsigned int tty_poll(struct file *, poll_table *); static int tty_open(struct inode *, struct file *); -static int tty_release(struct inode *, struct file *); long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long tty_compat_ioctl(struct file *file, unsigned int cmd, @@ -1017,14 +1016,16 @@ out: void tty_write_message(struct tty_struct *tty, char *msg) { - lock_kernel(); if (tty) { mutex_lock(&tty->atomic_write_lock); - if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) + lock_kernel(); + if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) { + unlock_kernel(); tty->ops->write(tty, msg, strlen(msg)); + } else + unlock_kernel(); tty_write_unlock(tty); } - unlock_kernel(); return; } @@ -1202,14 +1203,21 @@ static int tty_driver_install_tty(struct tty_driver *driver, struct tty_struct *tty) { int idx = tty->index; + int ret; - if (driver->ops->install) - return driver->ops->install(driver, tty); + if (driver->ops->install) { + lock_kernel(); + ret = driver->ops->install(driver, tty); + unlock_kernel(); + return ret; + } if (tty_init_termios(tty) == 0) { + lock_kernel(); tty_driver_kref_get(driver); tty->count++; driver->ttys[idx] = tty; + unlock_kernel(); return 0; } return -ENOMEM; @@ -1302,10 +1310,14 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, struct tty_struct *tty; int retval; + lock_kernel(); /* Check if pty master is being opened multiple times */ if (driver->subtype == PTY_TYPE_MASTER && - (driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) + (driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) { + unlock_kernel(); return ERR_PTR(-EIO); + } + unlock_kernel(); /* * First time open is complex, especially for PTY devices. @@ -1335,8 +1347,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, * If we fail here just call release_tty to clean up. No need * to decrement the use counts, as release_tty doesn't care. */ - + lock_kernel(); retval = tty_ldisc_setup(tty, tty->link); + unlock_kernel(); if (retval) goto release_mem_out; return tty; @@ -1350,7 +1363,9 @@ release_mem_out: if (printk_ratelimit()) printk(KERN_INFO "tty_init_dev: ldisc open failed, " "clearing slot %d\n", idx); + lock_kernel(); release_tty(tty, idx); + unlock_kernel(); return ERR_PTR(retval); } @@ -1464,7 +1479,17 @@ static void release_tty(struct tty_struct *tty, int idx) tty_kref_put(tty); } -/* +/** + * tty_release - vfs callback for close + * @inode: inode of tty + * @filp: file pointer for handle to tty + * + * Called the last time each file handle is closed that references + * this tty. There may however be several such references. + * + * Locking: + * Takes bkl. See tty_release_dev + * * Even releasing the tty structures is a tricky business.. We have * to be very careful that the structures are all released at the * same time, as interrupts might otherwise get the wrong pointers. @@ -1472,20 +1497,20 @@ static void release_tty(struct tty_struct *tty, int idx) * WSH 09/09/97: rewritten to avoid some nasty race conditions that could * lead to double frees or releasing memory still in use. */ -void tty_release_dev(struct file *filp) + +int tty_release(struct inode *inode, struct file *filp) { struct tty_struct *tty, *o_tty; int pty_master, tty_closing, o_tty_closing, do_sleep; int devpts; int idx; char buf[64]; - struct inode *inode; - inode = filp->f_path.dentry->d_inode; tty = (struct tty_struct *)filp->private_data; if (tty_paranoia_check(tty, inode, "tty_release_dev")) - return; + return 0; + lock_kernel(); check_tty_count(tty, "tty_release_dev"); tty_fasync(-1, filp, 0); @@ -1500,19 +1525,22 @@ void tty_release_dev(struct file *filp) if (idx < 0 || idx >= tty->driver->num) { printk(KERN_DEBUG "tty_release_dev: bad idx when trying to " "free (%s)\n", tty->name); - return; + unlock_kernel(); + return 0; } if (!devpts) { if (tty != tty->driver->ttys[idx]) { + unlock_kernel(); printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty " "for (%s)\n", idx, tty->name); - return; + return 0; } if (tty->termios != tty->driver->termios[idx]) { + unlock_kernel(); printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios " "for (%s)\n", idx, tty->name); - return; + return 0; } } #endif @@ -1526,26 +1554,30 @@ void tty_release_dev(struct file *filp) if (tty->driver->other && !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) { if (o_tty != tty->driver->other->ttys[idx]) { + unlock_kernel(); printk(KERN_DEBUG "tty_release_dev: other->table[%d] " "not o_tty for (%s)\n", idx, tty->name); - return; + return 0 ; } if (o_tty->termios != tty->driver->other->termios[idx]) { + unlock_kernel(); printk(KERN_DEBUG "tty_release_dev: other->termios[%d] " "not o_termios for (%s)\n", idx, tty->name); - return; + return 0; } if (o_tty->link != tty) { + unlock_kernel(); printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n"); - return; + return 0; } } #endif if (tty->ops->close) tty->ops->close(tty, filp); + unlock_kernel(); /* * Sanity check: if tty->count is going to zero, there shouldn't be * any waiters on tty->read_wait or tty->write_wait. We test the @@ -1568,6 +1600,7 @@ void tty_release_dev(struct file *filp) opens on /dev/tty */ mutex_lock(&tty_mutex); + lock_kernel(); tty_closing = tty->count <= 1; o_tty_closing = o_tty && (o_tty->count <= (pty_master ? 1 : 0)); @@ -1598,6 +1631,7 @@ void tty_release_dev(struct file *filp) printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue " "active!\n", tty_name(tty, buf)); + unlock_kernel(); mutex_unlock(&tty_mutex); schedule(); } @@ -1661,8 +1695,10 @@ void tty_release_dev(struct file *filp) mutex_unlock(&tty_mutex); /* check whether both sides are closing ... */ - if (!tty_closing || (o_tty && !o_tty_closing)) - return; + if (!tty_closing || (o_tty && !o_tty_closing)) { + unlock_kernel(); + return 0; + } #ifdef TTY_DEBUG_HANGUP printk(KERN_DEBUG "freeing tty structure..."); @@ -1680,10 +1716,12 @@ void tty_release_dev(struct file *filp) /* Make this pty number available for reallocation */ if (devpts) devpts_kill_index(inode, idx); + unlock_kernel(); + return 0; } /** - * __tty_open - open a tty device + * tty_open - open a tty device * @inode: inode of device file * @filp: file pointer to tty * @@ -1703,7 +1741,7 @@ void tty_release_dev(struct file *filp) * ->siglock protects ->signal/->sighand */ -static int __tty_open(struct inode *inode, struct file *filp) +static int tty_open(struct inode *inode, struct file *filp) { struct tty_struct *tty = NULL; int noctty, retval; @@ -1720,10 +1758,12 @@ retry_open: retval = 0; mutex_lock(&tty_mutex); + lock_kernel(); if (device == MKDEV(TTYAUX_MAJOR, 0)) { tty = get_current_tty(); if (!tty) { + unlock_kernel(); mutex_unlock(&tty_mutex); return -ENXIO; } @@ -1755,12 +1795,14 @@ retry_open: goto got_driver; } } + unlock_kernel(); mutex_unlock(&tty_mutex); return -ENODEV; } driver = get_tty_driver(device, &index); if (!driver) { + unlock_kernel(); mutex_unlock(&tty_mutex); return -ENODEV; } @@ -1770,6 +1812,7 @@ got_driver: tty = tty_driver_lookup_tty(driver, inode, index); if (IS_ERR(tty)) { + unlock_kernel(); mutex_unlock(&tty_mutex); return PTR_ERR(tty); } @@ -1784,8 +1827,10 @@ got_driver: mutex_unlock(&tty_mutex); tty_driver_kref_put(driver); - if (IS_ERR(tty)) + if (IS_ERR(tty)) { + unlock_kernel(); return PTR_ERR(tty); + } filp->private_data = tty; file_move(filp, &tty->tty_files); @@ -1813,11 +1858,15 @@ got_driver: printk(KERN_DEBUG "error %d in opening %s...", retval, tty->name); #endif - tty_release_dev(filp); - if (retval != -ERESTARTSYS) + tty_release(inode, filp); + if (retval != -ERESTARTSYS) { + unlock_kernel(); return retval; - if (signal_pending(current)) + } + if (signal_pending(current)) { + unlock_kernel(); return retval; + } schedule(); /* * Need to reset f_op in case a hangup happened. @@ -1826,8 +1875,11 @@ got_driver: filp->f_op = &tty_fops; goto retry_open; } + unlock_kernel(); + mutex_lock(&tty_mutex); + lock_kernel(); spin_lock_irq(¤t->sighand->siglock); if (!noctty && current->signal->leader && @@ -1835,44 +1887,13 @@ got_driver: tty->session == NULL) __proc_set_tty(current, tty); spin_unlock_irq(¤t->sighand->siglock); + unlock_kernel(); mutex_unlock(&tty_mutex); return 0; } -/* BKL pushdown: scary code avoidance wrapper */ -static int tty_open(struct inode *inode, struct file *filp) -{ - int ret; - - lock_kernel(); - ret = __tty_open(inode, filp); - unlock_kernel(); - return ret; -} - - -/** - * tty_release - vfs callback for close - * @inode: inode of tty - * @filp: file pointer for handle to tty - * - * Called the last time each file handle is closed that references - * this tty. There may however be several such references. - * - * Locking: - * Takes bkl. See tty_release_dev - */ - -static int tty_release(struct inode *inode, struct file *filp) -{ - lock_kernel(); - tty_release_dev(filp); - unlock_kernel(); - return 0; -} - /** * tty_poll - check tty status * @filp: file being polled @@ -2317,9 +2338,7 @@ static int tiocsetd(struct tty_struct *tty, int __user *p) if (get_user(ldisc, p)) return -EFAULT; - lock_kernel(); ret = tty_set_ldisc(tty, ldisc); - unlock_kernel(); return ret; } diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index feb55075819b..d914e77f7f01 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c @@ -34,6 +34,8 @@ #include #include +#include /* For the moment */ + #include #include @@ -545,6 +547,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) if (IS_ERR(new_ldisc)) return PTR_ERR(new_ldisc); + lock_kernel(); /* * We need to look at the tty locking here for pty/tty pairs * when both sides try to change in parallel. @@ -558,6 +561,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) */ if (tty->ldisc->ops->num == ldisc) { + unlock_kernel(); tty_ldisc_put(new_ldisc); return 0; } @@ -569,6 +573,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) tty_wait_until_sent(tty, 0); + unlock_kernel(); mutex_lock(&tty->ldisc_mutex); /* @@ -582,6 +587,9 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0); mutex_lock(&tty->ldisc_mutex); } + + lock_kernel(); + set_bit(TTY_LDISC_CHANGING, &tty->flags); /* @@ -592,6 +600,8 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) tty->receive_room = 0; o_ldisc = tty->ldisc; + + unlock_kernel(); /* * Make sure we don't change while someone holds a * reference to the line discipline. The TTY_LDISC bit @@ -617,12 +627,14 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) flush_scheduled_work(); mutex_lock(&tty->ldisc_mutex); + lock_kernel(); if (test_bit(TTY_HUPPED, &tty->flags)) { /* We were raced by the hangup method. It will have stomped the ldisc data and closed the ldisc down */ clear_bit(TTY_LDISC_CHANGING, &tty->flags); mutex_unlock(&tty->ldisc_mutex); tty_ldisc_put(new_ldisc); + unlock_kernel(); return -EIO; } @@ -664,6 +676,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) if (o_work) schedule_delayed_work(&o_tty->buf.work, 1); mutex_unlock(&tty->ldisc_mutex); + unlock_kernel(); return retval; } diff --git a/include/linux/tty.h b/include/linux/tty.h index e6da667ba34d..405a9035fe40 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -449,7 +449,7 @@ extern void initialize_tty_struct(struct tty_struct *tty, struct tty_driver *driver, int idx); extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, int first_ok); -extern void tty_release_dev(struct file *filp); +extern int tty_release(struct inode *inode, struct file *filp); extern int tty_init_termios(struct tty_struct *tty); extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); -- cgit v1.2.3 From 967c9ef9b8c3bdec1bd3a380edac19e0b9fbeadc Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 11 Dec 2009 22:00:57 -0800 Subject: Input: i8042 - allow installing platform filters for incoming data Some hardware (such as Dell laptops) signal a variety of events through the i8042 controller, even if these don't map to keyboard events. Add support for drivers to filter the i8042 event stream in order to respond to these events and (if appropriate) block them from entering the input stream. Signed-off-by: Matthew Garrett Signed-off-by: Dmitry Torokhov --- drivers/input/serio/i8042.c | 58 ++++++++++++++++++++++++++++++++++++++++++--- include/linux/i8042.h | 18 +++++++++++++- 2 files changed, 72 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 634da68f7f35..d84a36e545f6 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -126,6 +126,8 @@ static unsigned char i8042_suppress_kbd_ack; static struct platform_device *i8042_platform_device; static irqreturn_t i8042_interrupt(int irq, void *dev_id); +static bool (*i8042_platform_filter)(unsigned char data, unsigned char str, + struct serio *serio); void i8042_lock_chip(void) { @@ -139,6 +141,48 @@ void i8042_unlock_chip(void) } EXPORT_SYMBOL(i8042_unlock_chip); +int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&i8042_lock, flags); + + if (i8042_platform_filter) { + ret = -EBUSY; + goto out; + } + + i8042_platform_filter = filter; + +out: + spin_unlock_irqrestore(&i8042_lock, flags); + return ret; +} +EXPORT_SYMBOL(i8042_install_filter); + +int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *port)) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&i8042_lock, flags); + + if (i8042_platform_filter != filter) { + ret = -EINVAL; + goto out; + } + + i8042_platform_filter = NULL; + +out: + spin_unlock_irqrestore(&i8042_lock, flags); + return ret; +} +EXPORT_SYMBOL(i8042_remove_filter); + /* * The i8042_wait_read() and i8042_wait_write functions wait for the i8042 to * be ready for reading values from it / writing values to it. @@ -373,7 +417,8 @@ static void i8042_stop(struct serio *serio) * It is called from i8042_interrupt and thus is running with interrupts * off and i8042_lock held. */ -static bool i8042_filter(unsigned char data, unsigned char str) +static bool i8042_filter(unsigned char data, unsigned char str, + struct serio *serio) { if (unlikely(i8042_suppress_kbd_ack)) { if ((~str & I8042_STR_AUXDATA) && @@ -384,6 +429,11 @@ static bool i8042_filter(unsigned char data, unsigned char str) } } + if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) { + dbg("Filtered out by platfrom filter\n"); + return true; + } + return false; } @@ -396,6 +446,7 @@ static bool i8042_filter(unsigned char data, unsigned char str) static irqreturn_t i8042_interrupt(int irq, void *dev_id) { struct i8042_port *port; + struct serio *serio; unsigned long flags; unsigned char str, data; unsigned int dfl; @@ -462,18 +513,19 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) } port = &i8042_ports[port_no]; + serio = port->exists ? port->serio : NULL; dbg("%02x <- i8042 (interrupt, %d, %d%s%s)", data, port_no, irq, dfl & SERIO_PARITY ? ", bad parity" : "", dfl & SERIO_TIMEOUT ? ", timeout" : ""); - filtered = i8042_filter(data, str); + filtered = i8042_filter(data, str, serio); spin_unlock_irqrestore(&i8042_lock, flags); if (likely(port->exists && !filtered)) - serio_interrupt(port->serio, data, dfl); + serio_interrupt(serio, data, dfl); out: return IRQ_RETVAL(ret); diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 60c3360ef6ad..9bf6870ee5f4 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -39,6 +39,10 @@ void i8042_lock_chip(void); void i8042_unlock_chip(void); int i8042_command(unsigned char *param, int command); bool i8042_check_port_owner(const struct serio *); +int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); +int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); #else @@ -52,7 +56,7 @@ void i8042_unlock_chip(void) int i8042_command(unsigned char *param, int command) { - return -ENOSYS; + return -ENODEV; } bool i8042_check_port_owner(const struct serio *serio) @@ -60,6 +64,18 @@ bool i8042_check_port_owner(const struct serio *serio) return false; } +int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) +{ + return -ENODEV; +} + +int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) +{ + return -ENODEV; +} + #endif #endif -- cgit v1.2.3 From 01fc0ac198eabcbf460e1ed058860a935b6c2c9a Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Apr 2009 21:57:19 +0200 Subject: kbuild: move bounds.h to include/generated Signed-off-by: Sam Ravnborg Cc: Al Viro Signed-off-by: Michal Marek --- .gitignore | 1 - Kbuild | 2 +- Makefile | 2 +- include/linux/mmzone.h | 2 +- include/linux/page-flags.h | 2 +- kernel/bounds.c | 2 +- 6 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/.gitignore b/.gitignore index 946c7ec5c922..36d9cd6d4281 100644 --- a/.gitignore +++ b/.gitignore @@ -52,7 +52,6 @@ include/linux/autoconf.h include/linux/compile.h include/linux/version.h include/linux/utsrelease.h -include/linux/bounds.h include/generated # stgit generated dirs diff --git a/Kbuild b/Kbuild index f056b4feee51..1165d7a5ca4a 100644 --- a/Kbuild +++ b/Kbuild @@ -8,7 +8,7 @@ ##### # 1) Generate bounds.h -bounds-file := include/linux/bounds.h +bounds-file := include/generated/bounds.h always := $(bounds-file) targets := $(bounds-file) kernel/bounds.s diff --git a/Makefile b/Makefile index 07711786dc95..b58e9312ce30 100644 --- a/Makefile +++ b/Makefile @@ -1197,7 +1197,7 @@ MRPROPER_DIRS += include/config include2 usr/include include/generated MRPROPER_FILES += .config .config.old include/asm .version .old_version \ include/linux/autoconf.h include/linux/version.h \ include/linux/utsrelease.h \ - include/linux/bounds.h include/asm*/asm-offsets.h \ + include/asm*/asm-offsets.h \ Module.symvers Module.markers tags TAGS cscope* # clean - Delete most, but leave enough to build external modules diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6f7561730d88..30fe668c2542 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b202b173955..ef36725aa515 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -8,7 +8,7 @@ #include #ifndef __GENERATING_BOUNDS_H #include -#include +#include #endif /* !__GENERATING_BOUNDS_H */ /* diff --git a/kernel/bounds.c b/kernel/bounds.c index 3c5301381837..98a51f26c136 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -12,7 +12,7 @@ void foo(void) { - /* The enum constants to put into include/linux/bounds.h */ + /* The enum constants to put into include/generated/bounds.h */ DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES); /* End of constants */ -- cgit v1.2.3 From 98b8788ae91694499d1995035625bea16a4db0c4 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 18 Oct 2009 00:39:40 +0200 Subject: drop explicit include of autoconf.h kbuild.h forces include of autoconf.h on the commandline using -include - so we do not need to include the file explicit. Signed-off-by: Sam Ravnborg Signed-off-by: Michal Marek --- arch/cris/arch-v32/kernel/head.S | 1 - arch/cris/kernel/asm-offsets.c | 1 - arch/cris/kernel/vmlinux.lds.S | 1 - arch/ia64/kvm/asm-offsets.c | 1 - drivers/accessibility/braille/braille_console.c | 1 - drivers/hid/hid-lg.h | 2 -- drivers/platform/x86/compal-laptop.c | 1 - drivers/staging/iio/ring_sw.h | 1 - include/linux/mmdebug.h | 2 -- 9 files changed, 11 deletions(-) (limited to 'include') diff --git a/arch/cris/arch-v32/kernel/head.S b/arch/cris/arch-v32/kernel/head.S index 3db478eb5155..76266f80a5f1 100644 --- a/arch/cris/arch-v32/kernel/head.S +++ b/arch/cris/arch-v32/kernel/head.S @@ -10,7 +10,6 @@ * The macros found in mmu_defs_asm.h uses the ## concatenation operator, so * -traditional must not be used when assembling this file. */ -#include #include #include #include diff --git a/arch/cris/kernel/asm-offsets.c b/arch/cris/kernel/asm-offsets.c index ddd6fbbe75de..dd7b8e983221 100644 --- a/arch/cris/kernel/asm-offsets.c +++ b/arch/cris/kernel/asm-offsets.c @@ -1,6 +1,5 @@ #include #include -#include /* * Generate definitions needed by assembly language modules. diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index bbfda67d2907..d49d17d2a14f 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -8,7 +8,6 @@ * the kernel has booted. */ -#include #include #include diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c index 0c3564a7a033..9324c875caf5 100644 --- a/arch/ia64/kvm/asm-offsets.c +++ b/arch/ia64/kvm/asm-offsets.c @@ -22,7 +22,6 @@ * */ -#include #include #include diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index d672cfe7ca59..cb423f5aef24 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c @@ -21,7 +21,6 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include #include #include diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h index 27ae750ca878..bf31592eaf79 100644 --- a/drivers/hid/hid-lg.h +++ b/drivers/hid/hid-lg.h @@ -1,8 +1,6 @@ #ifndef __HID_LG_H #define __HID_LG_H -#include - #ifdef CONFIG_LOGITECH_FF int lgff_init(struct hid_device *hdev); #else diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 11003bba10d3..1a387e79f719 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -51,7 +51,6 @@ #include #include #include -#include #define COMPAL_DRIVER_VERSION "0.2.6" diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h index f0b86f02cd80..fd677f008365 100644 --- a/drivers/staging/iio/ring_sw.h +++ b/drivers/staging/iio/ring_sw.h @@ -29,7 +29,6 @@ * driver requests - some may support multiple options */ -#include #include "iio.h" #include "ring_generic.h" diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 8a5509877192..ee24ef8ab616 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -1,8 +1,6 @@ #ifndef LINUX_MM_DEBUG_H #define LINUX_MM_DEBUG_H 1 -#include - #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) #else -- cgit v1.2.3 From 273b281fa22c293963ee3e6eec418f5dda2dbc83 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 18 Oct 2009 00:52:28 +0200 Subject: kbuild: move utsrelease.h to include/generated Fix up all users of utsrelease.h Signed-off-by: Sam Ravnborg Signed-off-by: Michal Marek --- .gitignore | 1 - Makefile | 5 ++--- arch/alpha/boot/bootp.c | 2 +- arch/alpha/boot/bootpz.c | 2 +- arch/alpha/boot/main.c | 2 +- arch/frv/kernel/setup.c | 2 +- arch/powerpc/platforms/52xx/efika.c | 2 +- arch/powerpc/platforms/amigaone/setup.c | 2 +- arch/powerpc/platforms/chrp/setup.c | 2 +- arch/powerpc/platforms/powermac/bootx_init.c | 2 +- arch/x86/boot/header.S | 2 +- arch/x86/boot/version.c | 2 +- drivers/staging/panel/panel.c | 2 +- include/linux/vermagic.h | 2 +- init/version.c | 2 +- kernel/kexec.c | 2 +- kernel/trace/trace.c | 2 +- 17 files changed, 17 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/.gitignore b/.gitignore index c6c19ea6ea96..002d5304968b 100644 --- a/.gitignore +++ b/.gitignore @@ -47,7 +47,6 @@ Module.symvers # include/config include/linux/version.h -include/linux/utsrelease.h include/generated # stgit generated dirs diff --git a/Makefile b/Makefile index 3bdd932e3d88..860224d7cbcf 100644 --- a/Makefile +++ b/Makefile @@ -968,7 +968,7 @@ endif # prepare2 creates a makefile if using a separate output directory prepare2: prepare3 outputmakefile -prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \ +prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \ include/config/auto.conf $(cmd_crmodverdir) @@ -1005,7 +1005,7 @@ endef include/linux/version.h: $(srctree)/Makefile FORCE $(call filechk,version.h) -include/linux/utsrelease.h: include/config/kernel.release FORCE +include/generated/utsrelease.h: include/config/kernel.release FORCE $(call filechk,utsrelease.h) PHONY += headerdep @@ -1151,7 +1151,6 @@ CLEAN_FILES += vmlinux System.map \ MRPROPER_DIRS += include/config usr/include include/generated MRPROPER_FILES += .config .config.old .version .old_version \ include/linux/version.h \ - include/linux/utsrelease.h \ Module.symvers Module.markers tags TAGS cscope* # clean - Delete most, but leave enough to build external modules diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c index 3af21c789339..3c8d1b25c661 100644 --- a/arch/alpha/boot/bootp.c +++ b/arch/alpha/boot/bootp.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c index 1036b515e20c..ade3f129dc27 100644 --- a/arch/alpha/boot/bootpz.c +++ b/arch/alpha/boot/bootpz.c @@ -11,7 +11,7 @@ */ #include #include -#include +#include #include #include diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c index 89f3be071ae5..644b7db55438 100644 --- a/arch/alpha/boot/main.c +++ b/arch/alpha/boot/main.c @@ -7,7 +7,7 @@ */ #include #include -#include +#include #include #include diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c index 55e4fab7c0bc..75cf7f4b2fa8 100644 --- a/arch/frv/kernel/setup.c +++ b/arch/frv/kernel/setup.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index bcc69e1f77c1..45c0cb9b67e6 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c index 9290a7a442d0..fb4eb0df054c 100644 --- a/arch/powerpc/platforms/amigaone/setup.c +++ b/arch/powerpc/platforms/amigaone/setup.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index cd4ad9aea760..0a6f5ab8aab3 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index cf660916ae0b..9dd789a7370d 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index b31cc54b4641..93e689f4bd86 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -16,7 +16,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/x86/boot/version.c b/arch/x86/boot/version.c index 4d88763e39cb..2b15aa488ffb 100644 --- a/arch/x86/boot/version.c +++ b/arch/x86/boot/version.c @@ -13,7 +13,7 @@ */ #include "boot.h" -#include +#include #include const char kernel_version[] = diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 4ce399b6d237..f98a52448eae 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -55,7 +55,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index 79b9837d9ca0..cf97b5b9d1fe 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -1,4 +1,4 @@ -#include +#include #include /* Simply sanity version stamp for modules. */ diff --git a/init/version.c b/init/version.c index 82328aaca1ef..adff586401a5 100644 --- a/init/version.c +++ b/init/version.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #ifndef CONFIG_KALLSYMS diff --git a/kernel/kexec.c b/kernel/kexec.c index f336e2107f98..83f54e2a6eed 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 88bd9ae2a9ed..bfb1b64bfa9d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -12,7 +12,7 @@ * Copyright (C) 2004 William Lee Irwin III */ #include -#include +#include #include #include #include -- cgit v1.2.3 From 7a77080dbec28ab2bceb422398601dcc53c142ad Mon Sep 17 00:00:00 2001 From: Jie Zhang Date: Thu, 12 Nov 2009 17:59:56 +0800 Subject: net: add net_tstamp.h to headers_install include/linux/net_tstamp.h is userspace API for hardware time stamping of network packets. It should be exported to userspace. Signed-off-by: Jie Zhang Signed-off-by: Barry Song Signed-off-by: Patrick Ohly Signed-off-by: Michal Marek --- include/linux/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index f72914db2a11..756f831cbdd5 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -118,6 +118,7 @@ header-y += mtio.h header-y += ncp_no.h header-y += neighbour.h header-y += net_dropmon.h +header-y += net_tstamp.h header-y += netfilter_arp.h header-y += netrom.h header-y += nfs2.h -- cgit v1.2.3 From c5df7f775148723de39274537a886e9502eef336 Mon Sep 17 00:00:00 2001 From: Albert Herranz Date: Sat, 12 Dec 2009 06:31:54 +0000 Subject: powerpc: allow ioremap within reserved memory regions Add a flag to let a platform ioremap memory regions marked as reserved. This flag will be used later by the Nintendo Wii support code to allow ioremapping the I/O region sitting between MEM1 and MEM2 and marked as reserved RAM in the patch "wii: use both mem1 and mem2 as ram". This will no longer be needed when proper discontig memory support for 32-bit PowerPC is added to the kernel. Signed-off-by: Albert Herranz Acked-by: Benjamin Herrenschmidt Signed-off-by: Grant Likely --- arch/powerpc/mm/init_32.c | 5 +++++ arch/powerpc/mm/mmu_decl.h | 1 + arch/powerpc/mm/pgtable_32.c | 4 +++- include/linux/lmb.h | 1 + lib/lmb.c | 7 ++++++- 5 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 703c7c2e0d9f..4ec900af332f 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -82,6 +82,11 @@ extern struct task_struct *current_set[NR_CPUS]; int __map_without_bats; int __map_without_ltlbs; +/* + * This tells the system to allow ioremapping memory marked as reserved. + */ +int __allow_ioremap_reserved; + /* max amount of low RAM to map in */ unsigned long __max_low_memory = MAX_LOW_MEM; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 9aa39fe74f8a..34dacc32250d 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -115,6 +115,7 @@ extern void settlbcam(int index, unsigned long virt, phys_addr_t phys, extern void invalidate_tlbcam_entry(int index); extern int __map_without_bats; +extern int __allow_ioremap_reserved; extern unsigned long ioremap_base; extern unsigned int rtas_data, rtas_size; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index b55bbe87acb8..177e4038b43c 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -191,7 +192,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, * Don't allow anybody to remap normal RAM that we're using. * mem_init() sets high_memory so only do the check after that. */ - if (mem_init_done && (p < virt_to_phys(high_memory))) { + if (mem_init_done && (p < virt_to_phys(high_memory)) && + !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) { printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", (unsigned long long)p, __builtin_return_address(0)); return NULL; diff --git a/include/linux/lmb.h b/include/linux/lmb.h index 2442e3f3d033..ef82b8fcbddb 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h @@ -54,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void); extern u64 lmb_end_of_DRAM(void); extern void __init lmb_enforce_memory_limit(u64 memory_limit); extern int __init lmb_is_reserved(u64 addr); +extern int lmb_is_region_reserved(u64 base, u64 size); extern int lmb_find(struct lmb_property *res); extern void lmb_dump_all(void); diff --git a/lib/lmb.c b/lib/lmb.c index 0343c05609f0..9cee17142b2c 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -263,7 +263,7 @@ long __init lmb_reserve(u64 base, u64 size) return lmb_add_region(_rgn, base, size); } -long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) +long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) { unsigned long i; @@ -493,6 +493,11 @@ int __init lmb_is_reserved(u64 addr) return 0; } +int lmb_is_region_reserved(u64 base, u64 size) +{ + return lmb_overlaps_region(&lmb.reserved, base, size); +} + /* * Given a , find which memory regions belong to this range. * Adjust the request and return a contiguous chunk. -- cgit v1.2.3 From 4819568f23a8bef0ca99b740ca60fe2450ab0aac Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 12 Dec 2009 13:06:13 -0800 Subject: ftrace.h: Use common pr_info fmt string Reduces fmt string space a bit. Signed-off-by: Joe Perches Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <1260651974.2637.4.camel@Joe-Laptop.home> Signed-off-by: Ingo Molnar --- include/trace/ftrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index d1b3de9c1a71..c4eca380204e 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -733,7 +733,7 @@ static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ ret = register_trace_##call(ftrace_raw_event_##call); \ if (ret) \ pr_info("event trace: Could not activate trace point " \ - "probe to " #call "\n"); \ + "probe to %s\n", #call); \ return ret; \ } \ \ -- cgit v1.2.3 From 8051effcbced8478119167b93b0e9554cb82d28e Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 26 Nov 2009 11:10:05 +0000 Subject: spi: SuperH MSIOF SPI Master driver V2 This patch is V2 of SPI Master support for the SuperH MSIOF. Full duplex, spi mode 0-3, active high cs, 3-wire and lsb first should all be supported, but the driver has so far only been tested with "mmc_spi". The MSIOF hardware comes with 32-bit FIFOs for receive and transmit, and this driver simply breaks the SPI messages into FIFO-sized chunks. The MSIOF hardware manages the pins for clock, receive and transmit (sck/miso/mosi), but the chip select pin is managed by software and must be configured as a regular GPIO pin by the board code. Performance wise there is still room for improvement, but on a Ecovec board with the built-in sh7724 MSIOF0 this driver gets Mini-sd read speeds of about half a megabyte per second. Future work include better clock setup and merging of 8-bit transfers into 32-bit words to reduce interrupt load and improve throughput. Signed-off-by: Magnus Damm Signed-off-by: Grant Likely --- drivers/spi/Kconfig | 7 + drivers/spi/Makefile | 1 + drivers/spi/spi_sh_msiof.c | 691 +++++++++++++++++++++++++++++++++++++++++++ include/linux/spi/sh_msiof.h | 10 + 4 files changed, 709 insertions(+) create mode 100644 drivers/spi/spi_sh_msiof.c create mode 100644 include/linux/spi/sh_msiof.h (limited to 'include') diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index c00f9e5b9df8..06e1c0c9d35e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -223,6 +223,13 @@ config SPI_S3C24XX_GPIO the inbuilt hardware cannot provide the transfer mode, or where the board is using non hardware connected pins. +config SPI_SH_MSIOF + tristate "SuperH MSIOF SPI controller" + depends on SUPERH && HAVE_CLK + select SPI_BITBANG + help + SPI driver for SuperH MSIOF blocks. + config SPI_SH_SCI tristate "SuperH SCI SPI controller" depends on SUPERH diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 2f2bebc63b64..9c51e2526892 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o +obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o # ... add above this line ... diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c new file mode 100644 index 000000000000..51e5e1dfa6e5 --- /dev/null +++ b/drivers/spi/spi_sh_msiof.c @@ -0,0 +1,691 @@ +/* + * SuperH MSIOF SPI Master Interface + * + * Copyright (c) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +struct sh_msiof_spi_priv { + struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */ + void __iomem *mapbase; + struct clk *clk; + struct platform_device *pdev; + struct sh_msiof_spi_info *info; + struct completion done; + unsigned long flags; + int tx_fifo_size; + int rx_fifo_size; +}; + +#define TMDR1 0x00 +#define TMDR2 0x04 +#define TMDR3 0x08 +#define RMDR1 0x10 +#define RMDR2 0x14 +#define RMDR3 0x18 +#define TSCR 0x20 +#define RSCR 0x22 +#define CTR 0x28 +#define FCTR 0x30 +#define STR 0x40 +#define IER 0x44 +#define TDR1 0x48 +#define TDR2 0x4c +#define TFDR 0x50 +#define RDR1 0x58 +#define RDR2 0x5c +#define RFDR 0x60 + +#define CTR_TSCKE (1 << 15) +#define CTR_TFSE (1 << 14) +#define CTR_TXE (1 << 9) +#define CTR_RXE (1 << 8) + +#define STR_TEOF (1 << 23) +#define STR_REOF (1 << 7) + +static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) +{ + switch (reg_offs) { + case TSCR: + case RSCR: + return ioread16(p->mapbase + reg_offs); + default: + return ioread32(p->mapbase + reg_offs); + } +} + +static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, + unsigned long value) +{ + switch (reg_offs) { + case TSCR: + case RSCR: + iowrite16(value, p->mapbase + reg_offs); + break; + default: + iowrite32(value, p->mapbase + reg_offs); + break; + } +} + +static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, + unsigned long clr, unsigned long set) +{ + unsigned long mask = clr | set; + unsigned long data; + int k; + + data = sh_msiof_read(p, CTR); + data &= ~clr; + data |= set; + sh_msiof_write(p, CTR, data); + + for (k = 100; k > 0; k--) { + if ((sh_msiof_read(p, CTR) & mask) == set) + break; + + udelay(10); + } + + return k > 0 ? 0 : -ETIMEDOUT; +} + +static irqreturn_t sh_msiof_spi_irq(int irq, void *data) +{ + struct sh_msiof_spi_priv *p = data; + + /* just disable the interrupt and wake up */ + sh_msiof_write(p, IER, 0); + complete(&p->done); + + return IRQ_HANDLED; +} + +static struct { + unsigned short div; + unsigned short scr; +} const sh_msiof_spi_clk_table[] = { + { 1, 0x0007 }, + { 2, 0x0000 }, + { 4, 0x0001 }, + { 8, 0x0002 }, + { 16, 0x0003 }, + { 32, 0x0004 }, + { 64, 0x1f00 }, + { 128, 0x1f01 }, + { 256, 0x1f02 }, + { 512, 0x1f03 }, + { 1024, 0x1f04 }, +}; + +static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, + unsigned long parent_rate, + unsigned long spi_hz) +{ + unsigned long div = 1024; + size_t k; + + if (!WARN_ON(!spi_hz || !parent_rate)) + div = parent_rate / spi_hz; + + /* TODO: make more fine grained */ + + for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { + if (sh_msiof_spi_clk_table[k].div >= div) + break; + } + + k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); + + sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); + sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); +} + +static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, + int cpol, int cpha, + int tx_hi_z, int lsb_first) +{ + unsigned long tmp; + int edge; + + /* + * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG(!) + * 0 0 10 10 1 0 + * 0 1 10 10 0 1 + * 1 0 11 11 0 1 + * 1 1 11 11 1 0 + * + * (!) Note: REDG is inverted recommended data sheet setting + */ + + sh_msiof_write(p, FCTR, 0); + sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); + sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); + + tmp = 0xa0000000; + tmp |= cpol << 30; /* TSCKIZ */ + tmp |= cpol << 28; /* RSCKIZ */ + + edge = cpol ? cpha : !cpha; + + tmp |= edge << 27; /* TEDG */ + tmp |= !edge << 26; /* REDG */ + tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ + sh_msiof_write(p, CTR, tmp); +} + +static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, + const void *tx_buf, void *rx_buf, + int bits, int words) +{ + unsigned long dr2; + + dr2 = ((bits - 1) << 24) | ((words - 1) << 16); + + if (tx_buf) + sh_msiof_write(p, TMDR2, dr2); + else + sh_msiof_write(p, TMDR2, dr2 | 1); + + if (rx_buf) + sh_msiof_write(p, RMDR2, dr2); + + sh_msiof_write(p, IER, STR_TEOF | STR_REOF); +} + +static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) +{ + sh_msiof_write(p, STR, sh_msiof_read(p, STR)); +} + +static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned char *buf_8 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_8[k] << fs); +} + +static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned short *buf_16 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_16[k] << fs); +} + +static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned short *buf_16 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); +} + +static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned int *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_32[k] << fs); +} + +static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const unsigned int *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); +} + +static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned char *buf_8 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_8[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned short *buf_16 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_16[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned short *buf_16 = rx_buf; + int k; + + for (k = 0; k < words; k++) + put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); +} + +static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned int *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_32[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + unsigned int *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); +} + +static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t) +{ + int bits; + + bits = t ? t->bits_per_word : 0; + bits = bits ? bits : spi->bits_per_word; + return bits; +} + +static unsigned long sh_msiof_spi_hz(struct spi_device *spi, + struct spi_transfer *t) +{ + unsigned long hz; + + hz = t ? t->speed_hz : 0; + hz = hz ? hz : spi->max_speed_hz; + return hz; +} + +static int sh_msiof_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + int bits; + + /* noting to check hz values against since parent clock is disabled */ + + bits = sh_msiof_spi_bits(spi, t); + if (bits < 8) + return -EINVAL; + if (bits > 32) + return -EINVAL; + + return spi_bitbang_setup_transfer(spi, t); +} + +static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on) +{ + struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + int value; + + /* chip select is active low unless SPI_CS_HIGH is set */ + if (spi->mode & SPI_CS_HIGH) + value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0; + else + value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1; + + if (is_on == BITBANG_CS_ACTIVE) { + if (!test_and_set_bit(0, &p->flags)) { + pm_runtime_get_sync(&p->pdev->dev); + clk_enable(p->clk); + } + + /* Configure pins before asserting CS */ + sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL), + !!(spi->mode & SPI_CPHA), + !!(spi->mode & SPI_3WIRE), + !!(spi->mode & SPI_LSB_FIRST)); + } + + /* use spi->controller data for CS (same strategy as spi_gpio) */ + gpio_set_value((unsigned)spi->controller_data, value); + + if (is_on == BITBANG_CS_INACTIVE) { + if (test_and_clear_bit(0, &p->flags)) { + clk_disable(p->clk); + pm_runtime_put(&p->pdev->dev); + } + } +} + +static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, + void (*tx_fifo)(struct sh_msiof_spi_priv *, + const void *, int, int), + void (*rx_fifo)(struct sh_msiof_spi_priv *, + void *, int, int), + const void *tx_buf, void *rx_buf, + int words, int bits) +{ + int fifo_shift; + int ret; + + /* limit maximum word transfer to rx/tx fifo size */ + if (tx_buf) + words = min_t(int, words, p->tx_fifo_size); + if (rx_buf) + words = min_t(int, words, p->rx_fifo_size); + + /* the fifo contents need shifting */ + fifo_shift = 32 - bits; + + /* setup msiof transfer mode registers */ + sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); + + /* write tx fifo */ + if (tx_buf) + tx_fifo(p, tx_buf, words, fifo_shift); + + /* setup clock and rx/tx signals */ + ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); + if (rx_buf) + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); + + /* start by setting frame bit */ + INIT_COMPLETION(p->done); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); + if (ret) { + dev_err(&p->pdev->dev, "failed to start hardware\n"); + goto err; + } + + /* wait for tx fifo to be emptied / rx fifo to be filled */ + wait_for_completion(&p->done); + + /* read rx fifo */ + if (rx_buf) + rx_fifo(p, rx_buf, words, fifo_shift); + + /* clear status bits */ + sh_msiof_reset_str(p); + + /* shut down frame, tx/tx and clock signals */ + ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); + if (rx_buf) + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); + if (ret) { + dev_err(&p->pdev->dev, "failed to shut down hardware\n"); + goto err; + } + + return words; + + err: + sh_msiof_write(p, IER, 0); + return ret; +} + +static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); + void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); + int bits; + int bytes_per_word; + int bytes_done; + int words; + int n; + + bits = sh_msiof_spi_bits(spi, t); + + /* setup bytes per word and fifo read/write functions */ + if (bits <= 8) { + bytes_per_word = 1; + tx_fifo = sh_msiof_spi_write_fifo_8; + rx_fifo = sh_msiof_spi_read_fifo_8; + } else if (bits <= 16) { + bytes_per_word = 2; + if ((unsigned long)t->tx_buf & 0x01) + tx_fifo = sh_msiof_spi_write_fifo_16u; + else + tx_fifo = sh_msiof_spi_write_fifo_16; + + if ((unsigned long)t->rx_buf & 0x01) + rx_fifo = sh_msiof_spi_read_fifo_16u; + else + rx_fifo = sh_msiof_spi_read_fifo_16; + } else { + bytes_per_word = 4; + if ((unsigned long)t->tx_buf & 0x03) + tx_fifo = sh_msiof_spi_write_fifo_32u; + else + tx_fifo = sh_msiof_spi_write_fifo_32; + + if ((unsigned long)t->rx_buf & 0x03) + rx_fifo = sh_msiof_spi_read_fifo_32u; + else + rx_fifo = sh_msiof_spi_read_fifo_32; + } + + /* setup clocks (clock already enabled in chipselect()) */ + sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), + sh_msiof_spi_hz(spi, t)); + + /* transfer in fifo sized chunks */ + words = t->len / bytes_per_word; + bytes_done = 0; + + while (bytes_done < t->len) { + n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, + t->tx_buf + bytes_done, + t->rx_buf + bytes_done, + words, bits); + if (n < 0) + break; + + bytes_done += n * bytes_per_word; + words -= n; + } + + return bytes_done; +} + +static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs, + u32 word, u8 bits) +{ + BUG(); /* unused but needed by bitbang code */ + return 0; +} + +static int sh_msiof_spi_probe(struct platform_device *pdev) +{ + struct resource *r; + struct spi_master *master; + struct sh_msiof_spi_priv *p; + char clk_name[16]; + int i; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv)); + if (master == NULL) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + ret = -ENOMEM; + goto err0; + } + + p = spi_master_get_devdata(master); + + platform_set_drvdata(pdev, p); + p->info = pdev->dev.platform_data; + init_completion(&p->done); + + snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id); + p->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(p->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(p->clk); + goto err1; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i = platform_get_irq(pdev, 0); + if (!r || i < 0) { + dev_err(&pdev->dev, "cannot get platform resources\n"); + ret = -ENOENT; + goto err2; + } + p->mapbase = ioremap_nocache(r->start, resource_size(r)); + if (!p->mapbase) { + dev_err(&pdev->dev, "unable to ioremap\n"); + ret = -ENXIO; + goto err2; + } + + ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED, + dev_name(&pdev->dev), p); + if (ret) { + dev_err(&pdev->dev, "unable to request irq\n"); + goto err3; + } + + p->pdev = pdev; + pm_runtime_enable(&pdev->dev); + + /* The standard version of MSIOF use 64 word FIFOs */ + p->tx_fifo_size = 64; + p->rx_fifo_size = 64; + + /* Platform data may override FIFO sizes */ + if (p->info->tx_fifo_override) + p->tx_fifo_size = p->info->tx_fifo_override; + if (p->info->rx_fifo_override) + p->rx_fifo_size = p->info->rx_fifo_override; + + /* init master and bitbang code */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; + master->flags = 0; + master->bus_num = pdev->id; + master->num_chipselect = p->info->num_chipselect; + master->setup = spi_bitbang_setup; + master->cleanup = spi_bitbang_cleanup; + + p->bitbang.master = master; + p->bitbang.chipselect = sh_msiof_spi_chipselect; + p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer; + p->bitbang.txrx_bufs = sh_msiof_spi_txrx; + p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word; + + ret = spi_bitbang_start(&p->bitbang); + if (ret == 0) + return 0; + + pm_runtime_disable(&pdev->dev); + err3: + iounmap(p->mapbase); + err2: + clk_put(p->clk); + err1: + spi_master_put(master); + err0: + return ret; +} + +static int sh_msiof_spi_remove(struct platform_device *pdev) +{ + struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); + int ret; + + ret = spi_bitbang_stop(&p->bitbang); + if (!ret) { + pm_runtime_disable(&pdev->dev); + free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq); + iounmap(p->mapbase); + clk_put(p->clk); + spi_master_put(p->bitbang.master); + } + return ret; +} + +static int sh_msiof_spi_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { + .runtime_suspend = sh_msiof_spi_runtime_nop, + .runtime_resume = sh_msiof_spi_runtime_nop, +}; + +static struct platform_driver sh_msiof_spi_drv = { + .probe = sh_msiof_spi_probe, + .remove = sh_msiof_spi_remove, + .driver = { + .name = "spi_sh_msiof", + .owner = THIS_MODULE, + .pm = &sh_msiof_spi_dev_pm_ops, + }, +}; + +static int __init sh_msiof_spi_init(void) +{ + return platform_driver_register(&sh_msiof_spi_drv); +} +module_init(sh_msiof_spi_init); + +static void __exit sh_msiof_spi_exit(void) +{ + platform_driver_unregister(&sh_msiof_spi_drv); +} +module_exit(sh_msiof_spi_exit); + +MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:spi_sh_msiof"); diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h new file mode 100644 index 000000000000..2e8db3d2d2e5 --- /dev/null +++ b/include/linux/spi/sh_msiof.h @@ -0,0 +1,10 @@ +#ifndef __SPI_SH_MSIOF_H__ +#define __SPI_SH_MSIOF_H__ + +struct sh_msiof_spi_info { + int tx_fifo_override; + int rx_fifo_override; + u16 num_chipselect; +}; + +#endif /* __SPI_SH_MSIOF_H__ */ -- cgit v1.2.3 From 87d9b4e1c52867a45331a9a5495f6448e0c68b23 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 8 Dec 2009 11:14:20 +0800 Subject: tracing: Extract duplicate ftrace_raw_init_event_foo() Use a generic trace_event_raw_init() function for all event's raw_init callbacks (but kprobes) instead of defining the same version for each of these. This shrinks the kernel code: text data bss dec hex filename 5355293 1961928 7103260 14420481 dc0a01 vmlinux.o.old 5346802 1961864 7103260 14411926 dbe896 vmlinux.o raw_init can't be removed, because ftrace events and kprobe events use different raw_init callbacks. Though it's possible to totally remove raw_init, I choose to leave it as it is for now. Signed-off-by: Li Zefan Acked-by: Steven Rostedt Cc: Jason Baron Cc: Ingo Molnar LKML-Reference: <4B1DC48C.7080603@cn.fujitsu.com> Signed-off-by: Frederic Weisbecker --- include/linux/ftrace_event.h | 1 + include/linux/syscalls.h | 4 ++-- include/trace/ftrace.h | 35 ++++------------------------------- kernel/trace/trace_events.c | 14 ++++++++++++++ 4 files changed, 21 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 38f8d6553831..ea44b8911094 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -158,6 +158,7 @@ enum { FILTER_PTR_STRING, }; +extern int trace_event_raw_init(struct ftrace_event_call *call); extern int trace_define_common_fields(struct ftrace_event_call *call); extern int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bc70c5810fec..94ac28437bef 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -145,7 +145,7 @@ struct perf_event_attr; .name = "sys_enter"#sname, \ .system = "syscalls", \ .event = &enter_syscall_print_##sname, \ - .raw_init = init_syscall_trace, \ + .raw_init = trace_event_raw_init, \ .show_format = syscall_enter_format, \ .define_fields = syscall_enter_define_fields, \ .regfunc = reg_event_syscall_enter, \ @@ -167,7 +167,7 @@ struct perf_event_attr; .name = "sys_exit"#sname, \ .system = "syscalls", \ .event = &exit_syscall_print_##sname, \ - .raw_init = init_syscall_trace, \ + .raw_init = trace_event_raw_init, \ .show_format = syscall_exit_format, \ .define_fields = syscall_exit_define_fields, \ .regfunc = reg_event_syscall_exit, \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index c4eca380204e..6055b0604c86 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -623,23 +623,12 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ * .trace = ftrace_raw_output_, <-- stage 2 * }; * - * static int ftrace_raw_init_event_(struct ftrace_event_call *unused) - * { - * int id; - * - * id = register_ftrace_event(&ftrace_event_type_); - * if (!id) - * return -ENODEV; - * event_.id = id; - * return 0; - * } - * * static struct ftrace_event_call __used * __attribute__((__aligned__(4))) * __attribute__((section("_ftrace_events"))) event_ = { * .name = "", * .system = "", - * .raw_init = ftrace_raw_init_event_, + * .raw_init = trace_event_raw_init, * .regfunc = ftrace_reg_event_, * .unregfunc = ftrace_unreg_event_, * .show_format = ftrace_format_, @@ -647,9 +636,6 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ * */ -#undef TP_FMT -#define TP_FMT(fmt, args...) fmt "\n", ##args - #ifdef CONFIG_EVENT_PROFILE #define _TRACE_PROFILE_INIT(call) \ @@ -744,19 +730,7 @@ static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ \ static struct trace_event ftrace_event_type_##call = { \ .trace = ftrace_raw_output_##call, \ -}; \ - \ -static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\ -{ \ - int id; \ - \ - id = register_ftrace_event(&ftrace_event_type_##call); \ - if (!id) \ - return -ENODEV; \ - event_##call.id = id; \ - INIT_LIST_HEAD(&event_##call.fields); \ - return 0; \ -} +}; #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ @@ -776,7 +750,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ .system = __stringify(TRACE_SYSTEM), \ .event = &ftrace_event_type_##call, \ - .raw_init = ftrace_raw_init_event_##call, \ + .raw_init = trace_event_raw_init, \ .regfunc = ftrace_raw_reg_event_##call, \ .unregfunc = ftrace_raw_unreg_event_##call, \ .show_format = ftrace_format_##template, \ @@ -793,7 +767,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ .system = __stringify(TRACE_SYSTEM), \ .event = &ftrace_event_type_##call, \ - .raw_init = ftrace_raw_init_event_##call, \ + .raw_init = trace_event_raw_init, \ .regfunc = ftrace_raw_reg_event_##call, \ .unregfunc = ftrace_raw_unreg_event_##call, \ .show_format = ftrace_format_##call, \ @@ -953,7 +927,6 @@ end: \ perf_swevent_put_recursion_context(rctx); \ end_recursion: \ local_irq_restore(irq_flags); \ - \ } #undef DEFINE_EVENT diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1d18315dc836..8ed66e0d476b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -105,6 +105,20 @@ void trace_destroy_fields(struct ftrace_event_call *call) } } +int trace_event_raw_init(struct ftrace_event_call *call) +{ + int id; + + id = register_ftrace_event(call->event); + if (!id) + return -ENODEV; + call->id = id; + INIT_LIST_HEAD(&call->fields); + + return 0; +} +EXPORT_SYMBOL_GPL(trace_event_raw_init); + static void ftrace_event_enable_disable(struct ftrace_event_call *call, int enable) { -- cgit v1.2.3 From 614a71a26ba3d97e9fa85649db69a682b78e407d Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 8 Dec 2009 11:14:36 +0800 Subject: tracing: Pull up calls to trace_define_common_fields() Call trace_define_common_fields() in event_create_dir() only. This avoids trace events to handle it from their define_fields callbacks and shrinks the kernel code size: text data bss dec hex filename 5346802 1961864 7103260 14411926 dbe896 vmlinux.o.old 5345151 1961864 7103260 14410275 dbe223 vmlinux.o Signed-off-by: Li Zefan Acked-by: Steven Rostedt Cc: Ingo Molnar Cc: Jason Baron Cc: Masami Hiramatsu LKML-Reference: <4B1DC49C.8000107@cn.fujitsu.com> Signed-off-by: Frederic Weisbecker --- include/linux/ftrace_event.h | 1 - include/trace/ftrace.h | 4 ---- kernel/trace/trace_events.c | 7 ++++--- kernel/trace/trace_export.c | 4 ---- kernel/trace/trace_kprobe.c | 8 -------- kernel/trace/trace_syscalls.c | 8 -------- 6 files changed, 4 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index ea44b8911094..db97c64ce0e9 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -159,7 +159,6 @@ enum { }; extern int trace_event_raw_init(struct ftrace_event_call *call); -extern int trace_define_common_fields(struct ftrace_event_call *call); extern int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type); diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 6055b0604c86..2af2f7a2c1bd 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -436,10 +436,6 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ struct ftrace_raw_##call field; \ int ret; \ \ - ret = trace_define_common_fields(event_call); \ - if (ret) \ - return ret; \ - \ tstruct; \ \ return ret; \ diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 8ed66e0d476b..97b0b3aa166d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -78,7 +78,7 @@ EXPORT_SYMBOL_GPL(trace_define_field); if (ret) \ return ret; -int trace_define_common_fields(struct ftrace_event_call *call) +static int trace_define_common_fields(struct ftrace_event_call *call) { int ret; struct trace_entry ent; @@ -91,7 +91,6 @@ int trace_define_common_fields(struct ftrace_event_call *call) return ret; } -EXPORT_SYMBOL_GPL(trace_define_common_fields); void trace_destroy_fields(struct ftrace_event_call *call) { @@ -927,7 +926,9 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, id); if (call->define_fields) { - ret = call->define_fields(call); + ret = trace_define_common_fields(call); + if (!ret) + ret = call->define_fields(call); if (ret < 0) { pr_warning("Could not initialize trace point" " events/%s\n", call->name); diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index dff8c84ddf17..458e5bfe26d0 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -184,10 +184,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ struct struct_name field; \ int ret; \ \ - ret = trace_define_common_fields(event_call); \ - if (ret) \ - return ret; \ - \ tstruct; \ \ return ret; \ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index aff5f80b59b8..e3c80e925896 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1113,10 +1113,6 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) struct kprobe_trace_entry field; struct trace_probe *tp = (struct trace_probe *)event_call->data; - ret = trace_define_common_fields(event_call); - if (!ret) - return ret; - DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); /* Set argument names as fields */ @@ -1131,10 +1127,6 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) struct kretprobe_trace_entry field; struct trace_probe *tp = (struct trace_probe *)event_call->data; - ret = trace_define_common_fields(event_call); - if (!ret) - return ret; - DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 57501d90096a..b957edd0ca3b 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -217,10 +217,6 @@ int syscall_enter_define_fields(struct ftrace_event_call *call) int i; int offset = offsetof(typeof(trace), args); - ret = trace_define_common_fields(call); - if (ret) - return ret; - ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); if (ret) return ret; @@ -241,10 +237,6 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) struct syscall_trace_exit trace; int ret; - ret = trace_define_common_fields(call); - if (ret) - return ret; - ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); if (ret) return ret; -- cgit v1.2.3 From 3b8e4273814a7f9e9a74ece517d9206fea919aaa Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 8 Dec 2009 11:14:52 +0800 Subject: tracing: Move a printk out of ftrace_raw_reg_event_foo() Move the printk from each ftrace_raw_reg_event_foo() to its caller ftrace_event_enable_disable(). This avoids each regfunc trace event callbacks to handle a same error report that can be carried from the caller. See how much space this saves: text data bss dec hex filename 5345151 1961864 7103260 14410275 dbe223 vmlinux.o.old 5331487 1961864 7103260 14396611 dbacc3 vmlinux.o Signed-off-by: Li Zefan Acked-by: Steven Rostedt Cc: Jason Baron LKML-Reference: <4B1DC4AC.802@cn.fujitsu.com> [start cmdline record before calling regfunc to avoid lost window of pid to comm resolution] Signed-off-by: Frederic Weisbecker --- include/trace/ftrace.h | 16 ++-------------- kernel/trace/trace_events.c | 20 +++++++++++++++----- kernel/trace/trace_syscalls.c | 10 ++-------- 3 files changed, 19 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 2af2f7a2c1bd..0c21af85211c 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -555,13 +555,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ * * static int ftrace_reg_event_(struct ftrace_event_call *unused) * { - * int ret; - * - * ret = register_trace_(ftrace_event_); - * if (!ret) - * pr_info("event trace: Could not activate trace point " - * "probe to "); - * return ret; + * return register_trace_(ftrace_event_); * } * * static void ftrace_unreg_event_(struct ftrace_event_call *unused) @@ -710,13 +704,7 @@ static void ftrace_raw_event_##call(proto) \ \ static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ { \ - int ret; \ - \ - ret = register_trace_##call(ftrace_raw_event_##call); \ - if (ret) \ - pr_info("event trace: Could not activate trace point " \ - "probe to %s\n", #call); \ - return ret; \ + return register_trace_##call(ftrace_raw_event_##call); \ } \ \ static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 97b0b3aa166d..189b09baf4fb 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -118,9 +118,11 @@ int trace_event_raw_init(struct ftrace_event_call *call) } EXPORT_SYMBOL_GPL(trace_event_raw_init); -static void ftrace_event_enable_disable(struct ftrace_event_call *call, +static int ftrace_event_enable_disable(struct ftrace_event_call *call, int enable) { + int ret = 0; + switch (enable) { case 0: if (call->enabled) { @@ -131,12 +133,20 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call, break; case 1: if (!call->enabled) { - call->enabled = 1; tracing_start_cmdline_record(); - call->regfunc(call); + ret = call->regfunc(call); + if (ret) { + tracing_stop_cmdline_record(); + pr_info("event trace: Could not enable event " + "%s\n", call->name); + break; + } + call->enabled = 1; } break; } + + return ret; } static void ftrace_clear_events(void) @@ -415,7 +425,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, case 0: case 1: mutex_lock(&event_mutex); - ftrace_event_enable_disable(call, val); + ret = ftrace_event_enable_disable(call, val); mutex_unlock(&event_mutex); break; @@ -425,7 +435,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, *ppos += cnt; - return cnt; + return ret ? ret : cnt; } static ssize_t diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index b957edd0ca3b..75289f372dd2 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -325,10 +325,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) mutex_lock(&syscall_trace_lock); if (!sys_refcount_enter) ret = register_trace_sys_enter(ftrace_syscall_enter); - if (ret) { - pr_info("event trace: Could not activate" - "syscall entry trace point"); - } else { + if (!ret) { set_bit(num, enabled_enter_syscalls); sys_refcount_enter++; } @@ -362,10 +359,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) mutex_lock(&syscall_trace_lock); if (!sys_refcount_exit) ret = register_trace_sys_exit(ftrace_syscall_exit); - if (ret) { - pr_info("event trace: Could not activate" - "syscall exit trace point"); - } else { + if (!ret) { set_bit(num, enabled_exit_syscalls); sys_refcount_exit++; } -- cgit v1.2.3 From e00bf2ec60605eb95687b7a0c3b83c87c48541dc Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 8 Dec 2009 11:17:29 +0800 Subject: tracing: Change event->profile_count to be int type Like total_profile_count, struct ftrace_event_call::profile_count is protected by event_mutex, so it doesn't need to be atomic_t. Signed-off-by: Li Zefan Acked-by: Steven Rostedt Cc: Jason Baron Cc: Masami Hiramatsu Cc: Peter Zijlstra LKML-Reference: <4B1DC549.5010705@cn.fujitsu.com> Signed-off-by: Frederic Weisbecker --- include/linux/ftrace_event.h | 2 +- include/linux/syscalls.h | 2 -- include/trace/ftrace.h | 1 - kernel/trace/trace_event_profile.c | 6 +++--- kernel/trace/trace_kprobe.c | 1 - 5 files changed, 4 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index db97c64ce0e9..2233c98d80df 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -131,7 +131,7 @@ struct ftrace_event_call { void *mod; void *data; - atomic_t profile_count; + int profile_count; int (*profile_enable)(struct ftrace_event_call *); void (*profile_disable)(struct ftrace_event_call *); }; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 94ac28437bef..72d69860d901 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -102,12 +102,10 @@ struct perf_event_attr; #ifdef CONFIG_EVENT_PROFILE #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ - .profile_count = ATOMIC_INIT(-1), \ .profile_enable = prof_sysenter_enable, \ .profile_disable = prof_sysenter_disable, #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ - .profile_count = ATOMIC_INIT(-1), \ .profile_enable = prof_sysexit_enable, \ .profile_disable = prof_sysexit_disable, #else diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0c21af85211c..73523151a731 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -629,7 +629,6 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ #ifdef CONFIG_EVENT_PROFILE #define _TRACE_PROFILE_INIT(call) \ - .profile_count = ATOMIC_INIT(-1), \ .profile_enable = ftrace_profile_enable_##call, \ .profile_disable = ftrace_profile_disable_##call, diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index d9c60f80aa0d..9e25573242cf 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -25,7 +25,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) char *buf; int ret = -ENOMEM; - if (atomic_inc_return(&event->profile_count)) + if (event->profile_count++ > 0) return 0; if (!total_profile_count) { @@ -56,7 +56,7 @@ fail_buf_nmi: perf_trace_buf = NULL; } fail_buf: - atomic_dec(&event->profile_count); + event->profile_count--; return ret; } @@ -83,7 +83,7 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) { char *buf, *nmi_buf; - if (!atomic_add_negative(-1, &event->profile_count)) + if (--event->profile_count > 0) return; event->profile_disable(event); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index e3c80e925896..6ed223447a3f 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1426,7 +1426,6 @@ static int register_probe_event(struct trace_probe *tp) call->unregfunc = probe_event_disable; #ifdef CONFIG_EVENT_PROFILE - atomic_set(&call->profile_count, -1); call->profile_enable = probe_profile_enable; call->profile_disable = probe_profile_disable; #endif -- cgit v1.2.3 From 4107da2a2853c070fb3effa58a83f94dc067fc44 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Thu, 17 Sep 2009 08:54:03 -0400 Subject: mfd: Add 88PM8607 driver This adds a core driver for 88PM8607 found in Marvell DKB development platform. This driver is a proxy for all accesses to 88PM8607 sub-drivers which will be merged on top of this one, RTC, regulators, battery and so on. This chip is manufactured by Marvell. Signed-off-by: Haojian Zhuang Reviewed-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/88pm8607.c | 302 +++++++++++++++++++++++++++++++++++++++++++ drivers/mfd/Kconfig | 10 ++ drivers/mfd/Makefile | 1 + include/linux/mfd/88pm8607.h | 217 +++++++++++++++++++++++++++++++ 4 files changed, 530 insertions(+) create mode 100644 drivers/mfd/88pm8607.c create mode 100644 include/linux/mfd/88pm8607.h (limited to 'include') diff --git a/drivers/mfd/88pm8607.c b/drivers/mfd/88pm8607.c new file mode 100644 index 000000000000..7e3f65907993 --- /dev/null +++ b/drivers/mfd/88pm8607.c @@ -0,0 +1,302 @@ +/* + * Base driver for Marvell 88PM8607 + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + + +#define PM8607_REG_RESOURCE(_start, _end) \ +{ \ + .start = PM8607_##_start, \ + .end = PM8607_##_end, \ + .flags = IORESOURCE_IO, \ +} + +static struct resource pm8607_regulator_resources[] = { + PM8607_REG_RESOURCE(BUCK1, BUCK1), + PM8607_REG_RESOURCE(BUCK2, BUCK2), + PM8607_REG_RESOURCE(BUCK3, BUCK3), + PM8607_REG_RESOURCE(LDO1, LDO1), + PM8607_REG_RESOURCE(LDO2, LDO2), + PM8607_REG_RESOURCE(LDO3, LDO3), + PM8607_REG_RESOURCE(LDO4, LDO4), + PM8607_REG_RESOURCE(LDO5, LDO5), + PM8607_REG_RESOURCE(LDO6, LDO6), + PM8607_REG_RESOURCE(LDO7, LDO7), + PM8607_REG_RESOURCE(LDO8, LDO8), + PM8607_REG_RESOURCE(LDO9, LDO9), + PM8607_REG_RESOURCE(LDO10, LDO10), + PM8607_REG_RESOURCE(LDO12, LDO12), + PM8607_REG_RESOURCE(LDO14, LDO14), +}; + +#define PM8607_REG_DEVS(_name, _id) \ +{ \ + .name = "88pm8607-" #_name, \ + .num_resources = 1, \ + .resources = &pm8607_regulator_resources[PM8607_ID_##_id], \ +} + +static struct mfd_cell pm8607_devs[] = { + PM8607_REG_DEVS(buck1, BUCK1), + PM8607_REG_DEVS(buck2, BUCK2), + PM8607_REG_DEVS(buck3, BUCK3), + PM8607_REG_DEVS(ldo1, LDO1), + PM8607_REG_DEVS(ldo2, LDO2), + PM8607_REG_DEVS(ldo3, LDO3), + PM8607_REG_DEVS(ldo4, LDO4), + PM8607_REG_DEVS(ldo5, LDO5), + PM8607_REG_DEVS(ldo6, LDO6), + PM8607_REG_DEVS(ldo7, LDO7), + PM8607_REG_DEVS(ldo8, LDO8), + PM8607_REG_DEVS(ldo9, LDO9), + PM8607_REG_DEVS(ldo10, LDO10), + PM8607_REG_DEVS(ldo12, LDO12), + PM8607_REG_DEVS(ldo14, LDO14), +}; + +static inline int pm8607_read_device(struct pm8607_chip *chip, + int reg, int bytes, void *dest) +{ + struct i2c_client *i2c = chip->client; + unsigned char data; + int ret; + + data = (unsigned char)reg; + ret = i2c_master_send(i2c, &data, 1); + if (ret < 0) + return ret; + + ret = i2c_master_recv(i2c, dest, bytes); + if (ret < 0) + return ret; + return 0; +} + +static inline int pm8607_write_device(struct pm8607_chip *chip, + int reg, int bytes, void *src) +{ + struct i2c_client *i2c = chip->client; + unsigned char buf[bytes + 1]; + int ret; + + buf[0] = (unsigned char)reg; + memcpy(&buf[1], src, bytes); + + ret = i2c_master_send(i2c, buf, bytes + 1); + if (ret < 0) + return ret; + return 0; +} + +int pm8607_reg_read(struct pm8607_chip *chip, int reg) +{ + unsigned char data; + int ret; + + mutex_lock(&chip->io_lock); + ret = chip->read(chip, reg, 1, &data); + mutex_unlock(&chip->io_lock); + + if (ret < 0) + return ret; + else + return (int)data; +} +EXPORT_SYMBOL(pm8607_reg_read); + +int pm8607_reg_write(struct pm8607_chip *chip, int reg, + unsigned char data) +{ + int ret; + + mutex_lock(&chip->io_lock); + ret = chip->write(chip, reg, 1, &data); + mutex_unlock(&chip->io_lock); + + return ret; +} +EXPORT_SYMBOL(pm8607_reg_write); + +int pm8607_bulk_read(struct pm8607_chip *chip, int reg, + int count, unsigned char *buf) +{ + int ret; + + mutex_lock(&chip->io_lock); + ret = chip->read(chip, reg, count, buf); + mutex_unlock(&chip->io_lock); + + return ret; +} +EXPORT_SYMBOL(pm8607_bulk_read); + +int pm8607_bulk_write(struct pm8607_chip *chip, int reg, + int count, unsigned char *buf) +{ + int ret; + + mutex_lock(&chip->io_lock); + ret = chip->write(chip, reg, count, buf); + mutex_unlock(&chip->io_lock); + + return ret; +} +EXPORT_SYMBOL(pm8607_bulk_write); + +int pm8607_set_bits(struct pm8607_chip *chip, int reg, + unsigned char mask, unsigned char data) +{ + unsigned char value; + int ret; + + mutex_lock(&chip->io_lock); + ret = chip->read(chip, reg, 1, &value); + if (ret < 0) + goto out; + value &= ~mask; + value |= data; + ret = chip->write(chip, reg, 1, &value); +out: + mutex_unlock(&chip->io_lock); + return ret; +} +EXPORT_SYMBOL(pm8607_set_bits); + + +static const struct i2c_device_id pm8607_id_table[] = { + { "88PM8607", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, pm8607_id_table); + + +static int __devinit pm8607_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pm8607_platform_data *pdata = client->dev.platform_data; + struct pm8607_chip *chip; + int i, count; + int ret; + + chip = kzalloc(sizeof(struct pm8607_chip), GFP_KERNEL); + if (chip == NULL) + return -ENOMEM; + + chip->client = client; + chip->dev = &client->dev; + chip->read = pm8607_read_device; + chip->write = pm8607_write_device; + i2c_set_clientdata(client, chip); + + mutex_init(&chip->io_lock); + dev_set_drvdata(chip->dev, chip); + + ret = pm8607_reg_read(chip, PM8607_CHIP_ID); + if (ret < 0) { + dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret); + goto out; + } + if ((ret & CHIP_ID_MASK) == CHIP_ID) + dev_info(chip->dev, "Marvell 88PM8607 (ID: %02x) detected\n", + ret); + else { + dev_err(chip->dev, "Failed to detect Marvell 88PM8607. " + "Chip ID: %02x\n", ret); + goto out; + } + chip->chip_id = ret; + + ret = pm8607_reg_read(chip, PM8607_BUCK3); + if (ret < 0) { + dev_err(chip->dev, "Failed to read BUCK3 register: %d\n", ret); + goto out; + } + if (ret & PM8607_BUCK3_DOUBLE) + chip->buck3_double = 1; + + ret = pm8607_reg_read(chip, PM8607_MISC1); + if (ret < 0) { + dev_err(chip->dev, "Failed to read MISC1 register: %d\n", ret); + goto out; + } + if (pdata->i2c_port == PI2C_PORT) + ret |= PM8607_MISC1_PI2C; + else + ret &= ~PM8607_MISC1_PI2C; + ret = pm8607_reg_write(chip, PM8607_MISC1, ret); + if (ret < 0) { + dev_err(chip->dev, "Failed to write MISC1 register: %d\n", ret); + goto out; + } + + + count = ARRAY_SIZE(pm8607_devs); + for (i = 0; i < count; i++) { + ret = mfd_add_devices(chip->dev, i, &pm8607_devs[i], + 1, NULL, 0); + if (ret != 0) { + dev_err(chip->dev, "Failed to add subdevs\n"); + goto out; + } + } + + return 0; + +out: + i2c_set_clientdata(client, NULL); + kfree(chip); + return ret; +} + +static int __devexit pm8607_remove(struct i2c_client *client) +{ + struct pm8607_chip *chip = i2c_get_clientdata(client); + + mfd_remove_devices(chip->dev); + kfree(chip); + return 0; +} + +static struct i2c_driver pm8607_driver = { + .driver = { + .name = "88PM8607", + .owner = THIS_MODULE, + }, + .probe = pm8607_probe, + .remove = __devexit_p(pm8607_remove), + .id_table = pm8607_id_table, +}; + +static int __init pm8607_init(void) +{ + int ret; + ret = i2c_add_driver(&pm8607_driver); + if (ret != 0) + pr_err("Failed to register 88PM8607 I2C driver: %d\n", ret); + return ret; +} +subsys_initcall(pm8607_init); + +static void __exit pm8607_exit(void) +{ + i2c_del_driver(&pm8607_driver); +} +module_exit(pm8607_exit); + +MODULE_DESCRIPTION("PMIC Driver for Marvell 88PM8607"); +MODULE_AUTHOR("Haojian Zhuang "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a296e717e86e..26a36f887357 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -319,6 +319,16 @@ config EZX_PCAP This enables the PCAP ASIC present on EZX Phones. This is needed for MMC, TouchScreen, Sound, USB, etc.. +config MFD_88PM8607 + bool "Support Marvell 88PM8607" + depends on I2C + select MFD_CORE + help + This supports for Marvell 88PM8607 Power Management IC. This includes + the I2C driver and the core APIs _only_, you have to select + individual components like voltage regulators, RTC and + battery-charger under the corresponding menus. + endmenu menu "Multimedia Capabilities Port drivers" diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 11350c1d9301..2596add427a2 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -52,3 +52,4 @@ obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o obj-$(CONFIG_AB3100_CORE) += ab3100-core.o obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o +obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h new file mode 100644 index 000000000000..f41b428d2cec --- /dev/null +++ b/include/linux/mfd/88pm8607.h @@ -0,0 +1,217 @@ +/* + * Marvell 88PM8607 Interface + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_88PM8607_H +#define __LINUX_MFD_88PM8607_H + +enum { + PM8607_ID_BUCK1 = 0, + PM8607_ID_BUCK2, + PM8607_ID_BUCK3, + + PM8607_ID_LDO1, + PM8607_ID_LDO2, + PM8607_ID_LDO3, + PM8607_ID_LDO4, + PM8607_ID_LDO5, + PM8607_ID_LDO6, + PM8607_ID_LDO7, + PM8607_ID_LDO8, + PM8607_ID_LDO9, + PM8607_ID_LDO10, + PM8607_ID_LDO12, + PM8607_ID_LDO14, + + PM8607_ID_RG_MAX, +}; + +#define CHIP_ID (0x40) +#define CHIP_ID_MASK (0xF8) + +/* Interrupt Registers */ +#define PM8607_STATUS_1 (0x01) +#define PM8607_STATUS_2 (0x02) +#define PM8607_INT_STATUS1 (0x03) +#define PM8607_INT_STATUS2 (0x04) +#define PM8607_INT_STATUS3 (0x05) +#define PM8607_INT_MASK_1 (0x06) +#define PM8607_INT_MASK_2 (0x07) +#define PM8607_INT_MASK_3 (0x08) + +/* Regulator Control Registers */ +#define PM8607_LDO1 (0x10) +#define PM8607_LDO2 (0x11) +#define PM8607_LDO3 (0x12) +#define PM8607_LDO4 (0x13) +#define PM8607_LDO5 (0x14) +#define PM8607_LDO6 (0x15) +#define PM8607_LDO7 (0x16) +#define PM8607_LDO8 (0x17) +#define PM8607_LDO9 (0x18) +#define PM8607_LDO10 (0x19) +#define PM8607_LDO12 (0x1A) +#define PM8607_LDO14 (0x1B) +#define PM8607_SLEEP_MODE1 (0x1C) +#define PM8607_SLEEP_MODE2 (0x1D) +#define PM8607_SLEEP_MODE3 (0x1E) +#define PM8607_SLEEP_MODE4 (0x1F) +#define PM8607_GO (0x20) +#define PM8607_SLEEP_BUCK1 (0x21) +#define PM8607_SLEEP_BUCK2 (0x22) +#define PM8607_SLEEP_BUCK3 (0x23) +#define PM8607_BUCK1 (0x24) +#define PM8607_BUCK2 (0x25) +#define PM8607_BUCK3 (0x26) +#define PM8607_BUCK_CONTROLS (0x27) +#define PM8607_SUPPLIES_EN11 (0x2B) +#define PM8607_SUPPLIES_EN12 (0x2C) +#define PM8607_GROUP1 (0x2D) +#define PM8607_GROUP2 (0x2E) +#define PM8607_GROUP3 (0x2F) +#define PM8607_GROUP4 (0x30) +#define PM8607_GROUP5 (0x31) +#define PM8607_GROUP6 (0x32) +#define PM8607_SUPPLIES_EN21 (0x33) +#define PM8607_SUPPLIES_EN22 (0x34) + +/* RTC Control Registers */ +#define PM8607_RTC1 (0xA0) +#define PM8607_RTC_COUNTER1 (0xA1) +#define PM8607_RTC_COUNTER2 (0xA2) +#define PM8607_RTC_COUNTER3 (0xA3) +#define PM8607_RTC_COUNTER4 (0xA4) +#define PM8607_RTC_EXPIRE1 (0xA5) +#define PM8607_RTC_EXPIRE2 (0xA6) +#define PM8607_RTC_EXPIRE3 (0xA7) +#define PM8607_RTC_EXPIRE4 (0xA8) +#define PM8607_RTC_TRIM1 (0xA9) +#define PM8607_RTC_TRIM2 (0xAA) +#define PM8607_RTC_TRIM3 (0xAB) +#define PM8607_RTC_TRIM4 (0xAC) +#define PM8607_RTC_MISC1 (0xAD) +#define PM8607_RTC_MISC2 (0xAE) +#define PM8607_RTC_MISC3 (0xAF) + +/* Misc Registers */ +#define PM8607_CHIP_ID (0x00) +#define PM8607_LDO1 (0x10) +#define PM8607_DVC3 (0x26) +#define PM8607_MISC1 (0x40) + +/* bit definitions for PM8607 events */ +#define PM8607_EVENT_ONKEY (1 << 0) +#define PM8607_EVENT_EXTON (1 << 1) +#define PM8607_EVENT_CHG (1 << 2) +#define PM8607_EVENT_BAT (1 << 3) +#define PM8607_EVENT_RTC (1 << 4) +#define PM8607_EVENT_CC (1 << 5) +#define PM8607_EVENT_VBAT (1 << 8) +#define PM8607_EVENT_VCHG (1 << 9) +#define PM8607_EVENT_VSYS (1 << 10) +#define PM8607_EVENT_TINT (1 << 11) +#define PM8607_EVENT_GPADC0 (1 << 12) +#define PM8607_EVENT_GPADC1 (1 << 13) +#define PM8607_EVENT_GPADC2 (1 << 14) +#define PM8607_EVENT_GPADC3 (1 << 15) +#define PM8607_EVENT_AUDIO_SHORT (1 << 16) +#define PM8607_EVENT_PEN (1 << 17) +#define PM8607_EVENT_HEADSET (1 << 18) +#define PM8607_EVENT_HOOK (1 << 19) +#define PM8607_EVENT_MICIN (1 << 20) +#define PM8607_EVENT_CHG_TIMEOUT (1 << 21) +#define PM8607_EVENT_CHG_DONE (1 << 22) +#define PM8607_EVENT_CHG_FAULT (1 << 23) + +/* bit definitions of Status Query Interface */ +#define PM8607_STATUS_CC (1 << 3) +#define PM8607_STATUS_PEN (1 << 4) +#define PM8607_STATUS_HEADSET (1 << 5) +#define PM8607_STATUS_HOOK (1 << 6) +#define PM8607_STATUS_MICIN (1 << 7) +#define PM8607_STATUS_ONKEY (1 << 8) +#define PM8607_STATUS_EXTON (1 << 9) +#define PM8607_STATUS_CHG (1 << 10) +#define PM8607_STATUS_BAT (1 << 11) +#define PM8607_STATUS_VBUS (1 << 12) +#define PM8607_STATUS_OV (1 << 13) + +/* bit definitions of BUCK3 */ +#define PM8607_BUCK3_DOUBLE (1 << 6) + +/* bit definitions of Misc1 */ +#define PM8607_MISC1_PI2C (1 << 0) + +/* Interrupt Number in 88PM8607 */ +enum { + PM8607_IRQ_ONKEY = 0, + PM8607_IRQ_EXTON, + PM8607_IRQ_CHG, + PM8607_IRQ_BAT, + PM8607_IRQ_RTC, + PM8607_IRQ_VBAT = 8, + PM8607_IRQ_VCHG, + PM8607_IRQ_VSYS, + PM8607_IRQ_TINT, + PM8607_IRQ_GPADC0, + PM8607_IRQ_GPADC1, + PM8607_IRQ_GPADC2, + PM8607_IRQ_GPADC3, + PM8607_IRQ_AUDIO_SHORT = 16, + PM8607_IRQ_PEN, + PM8607_IRQ_HEADSET, + PM8607_IRQ_HOOK, + PM8607_IRQ_MICIN, + PM8607_IRQ_CHG_FAIL, + PM8607_IRQ_CHG_DONE, + PM8607_IRQ_CHG_FAULT, +}; + +enum { + PM8607_CHIP_A0 = 0x40, + PM8607_CHIP_A1 = 0x41, + PM8607_CHIP_B0 = 0x48, +}; + + +struct pm8607_chip { + struct device *dev; + struct mutex io_lock; + struct i2c_client *client; + + int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest); + int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src); + + int buck3_double; /* DVC ramp slope double */ + unsigned char chip_id; + +}; + +#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */ + +enum { + GI2C_PORT = 0, + PI2C_PORT, +}; + +struct pm8607_platform_data { + int i2c_port; /* Controlled by GI2C or PI2C */ + struct regulator_init_data *regulator[PM8607_MAX_REGULATOR]; +}; + +extern int pm8607_reg_read(struct pm8607_chip *, int); +extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char); +extern int pm8607_bulk_read(struct pm8607_chip *, int, int, + unsigned char *); +extern int pm8607_bulk_write(struct pm8607_chip *, int, int, + unsigned char *); +extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char, + unsigned char); +#endif /* __LINUX_MFD_88PM8607_H */ -- cgit v1.2.3 From 0c41839e98272a317d4af4dfcb54b599b2c3dcba Mon Sep 17 00:00:00 2001 From: Srinidhi Kasagar Date: Mon, 12 Oct 2009 17:11:52 +0200 Subject: mfd: add AB4500 driver This adds core driver support for AB4500 mixed signal multimedia & power management chip. This connects to U8500 on the SSP (pl022) and exports read/write functions for the device to get access to this chip. This also registers the client devices and sets the parent. Signed-off-by: srinidhi kasagar Acked-by: Andrea Gallo Reviewed-by: Mark Brown Reviewed-by: Jean-Christophe Signed-off-by: Samuel Ortiz --- drivers/mfd/Kconfig | 10 ++ drivers/mfd/Makefile | 1 + drivers/mfd/ab4500-core.c | 207 +++++++++++++++++++++++++++++++++++ include/linux/mfd/ab4500.h | 262 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 480 insertions(+) create mode 100644 drivers/mfd/ab4500-core.c create mode 100644 include/linux/mfd/ab4500.h (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 26a36f887357..2aea25ba7ba0 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -329,6 +329,16 @@ config MFD_88PM8607 individual components like voltage regulators, RTC and battery-charger under the corresponding menus. +config AB4500_CORE + tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip" + depends on SPI + default y + help + Select this option to enable access to AB4500 power management + chip. This connects to U8500 on the SSP/SPI bus and exports + read/write functions for the devices to get access to this chip. + This chip embeds various other multimedia funtionalities as well. + endmenu menu "Multimedia Capabilities Port drivers" diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 2596add427a2..1792b1ff8ffc 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -52,4 +52,5 @@ obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o obj-$(CONFIG_AB3100_CORE) += ab3100-core.o obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o +obj-$(CONFIG_AB4500_CORE) += ab4500-core.o obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c new file mode 100644 index 000000000000..3ad91f7ee22b --- /dev/null +++ b/drivers/mfd/ab4500-core.c @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2009 ST-Ericsson + * + * Author: Srinidhi KASAGAR + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation. + * + * AB4500 is a companion power management chip used with U8500. + * On this platform, this is interfaced with SSP0 controller + * which is a ARM primecell pl022. + * + * At the moment the module just exports read/write features. + * Interrupt management to be added - TODO. + */ +#include +#include +#include +#include +#include +#include + +/* just required if probe fails, we need to + * unregister the device + */ +static struct spi_driver ab4500_driver; + +/* + * This funtion writes to any AB4500 registers using + * SPI protocol & before it writes it packs the data + * in the below 24 bit frame format + * + * *|------------------------------------| + * *| 23|22...18|17.......10|9|8|7......0| + * *| r/w bank adr data | + * * ------------------------------------ + * + * This function shouldn't be called from interrupt + * context + */ +int ab4500_write(struct ab4500 *ab4500, unsigned char block, + unsigned long addr, unsigned char data) +{ + struct spi_transfer xfer; + struct spi_message msg; + int err; + unsigned long spi_data = + block << 18 | addr << 10 | data; + + mutex_lock(&ab4500->lock); + ab4500->tx_buf[0] = spi_data; + ab4500->rx_buf[0] = 0; + + xfer.tx_buf = ab4500->tx_buf; + xfer.rx_buf = NULL; + xfer.len = sizeof(unsigned long); + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + err = spi_sync(ab4500->spi, &msg); + mutex_unlock(&ab4500->lock); + + return err; +} +EXPORT_SYMBOL(ab4500_write); + +int ab4500_read(struct ab4500 *ab4500, unsigned char block, + unsigned long addr) +{ + struct spi_transfer xfer; + struct spi_message msg; + unsigned long spi_data = + 1 << 23 | block << 18 | addr << 10; + + mutex_lock(&ab4500->lock); + ab4500->tx_buf[0] = spi_data; + ab4500->rx_buf[0] = 0; + + xfer.tx_buf = ab4500->tx_buf; + xfer.rx_buf = ab4500->rx_buf; + xfer.len = sizeof(unsigned long); + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + spi_sync(ab4500->spi, &msg); + mutex_unlock(&ab4500->lock); + + return ab4500->rx_buf[0]; +} +EXPORT_SYMBOL(ab4500_read); + +/* ref: ab3100 core */ +#define AB4500_DEVICE(devname, devid) \ +static struct platform_device ab4500_##devname##_device = { \ + .name = devid, \ + .id = -1, \ +} + +/* list of childern devices of ab4500 - all are + * not populated here - TODO + */ +AB4500_DEVICE(charger, "ab4500-charger"); +AB4500_DEVICE(audio, "ab4500-audio"); +AB4500_DEVICE(usb, "ab4500-usb"); +AB4500_DEVICE(tvout, "ab4500-tvout"); +AB4500_DEVICE(sim, "ab4500-sim"); +AB4500_DEVICE(gpadc, "ab4500-gpadc"); +AB4500_DEVICE(clkmgt, "ab4500-clkmgt"); +AB4500_DEVICE(misc, "ab4500-misc"); + +static struct platform_device *ab4500_platform_devs[] = { + &ab4500_charger_device, + &ab4500_audio_device, + &ab4500_usb_device, + &ab4500_tvout_device, + &ab4500_sim_device, + &ab4500_gpadc_device, + &ab4500_clkmgt_device, + &ab4500_misc_device, +}; + +static int __init ab4500_probe(struct spi_device *spi) +{ + struct ab4500 *ab4500; + unsigned char revision; + int err = 0; + int i; + + ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL); + if (!ab4500) { + dev_err(&spi->dev, "could not allocate AB4500\n"); + err = -ENOMEM; + goto not_detect; + } + + ab4500->spi = spi; + spi_set_drvdata(spi, ab4500); + + mutex_init(&ab4500->lock); + + /* read the revision register */ + revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG); + + /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */ + if (revision == 0x0 || revision == 0x10) + dev_info(&spi->dev, "Detected chip: %s, revision = %x\n", + ab4500_driver.driver.name, revision); + else { + dev_err(&spi->dev, "unknown chip: 0x%x\n", revision); + goto not_detect; + } + + for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) { + ab4500_platform_devs[i]->dev.parent = + &spi->dev; + platform_set_drvdata(ab4500_platform_devs[i], ab4500); + } + + /* register the ab4500 platform devices */ + platform_add_devices(ab4500_platform_devs, + ARRAY_SIZE(ab4500_platform_devs)); + + return err; + + not_detect: + spi_unregister_driver(&ab4500_driver); + kfree(ab4500); + return err; +} + +static int __devexit ab4500_remove(struct spi_device *spi) +{ + struct ab4500 *ab4500 = + spi_get_drvdata(spi); + + kfree(ab4500); + + return 0; +} + +static struct spi_driver ab4500_driver = { + .driver = { + .name = "ab4500", + .owner = THIS_MODULE, + }, + .probe = ab4500_probe, + .remove = __devexit_p(ab4500_remove) +}; + +static int __devinit ab4500_init(void) +{ + return spi_register_driver(&ab4500_driver); +} + +static void __exit ab4500_exit(void) +{ + spi_unregister_driver(&ab4500_driver); +} + +subsys_initcall_sync(ab4500_init); + +MODULE_AUTHOR("Srinidhi KASAGAR + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * AB4500 device core funtions, for client access + */ +#ifndef MFD_AB4500_H +#define MFD_AB4500_H + +#include + +/* + * AB4500 bank addresses + */ +#define AB4500_SYS_CTRL1_BLOCK 0x1 +#define AB4500_SYS_CTRL2_BLOCK 0x2 +#define AB4500_REGU_CTRL1 0x3 +#define AB4500_REGU_CTRL2 0x4 +#define AB4500_USB 0x5 +#define AB4500_TVOUT 0x6 +#define AB4500_DBI 0x7 +#define AB4500_ECI_AV_ACC 0x8 +#define AB4500_RESERVED 0x9 +#define AB4500_GPADC 0xA +#define AB4500_CHARGER 0xB +#define AB4500_GAS_GAUGE 0xC +#define AB4500_AUDIO 0xD +#define AB4500_INTERRUPT 0xE +#define AB4500_RTC 0xF +#define AB4500_MISC 0x10 +#define AB4500_DEBUG 0x12 +#define AB4500_PROD_TEST 0x13 +#define AB4500_OTP_EMUL 0x15 + +/* + * System control 1 register offsets. + * Bank = 0x01 + */ +#define AB4500_TURNON_STAT_REG 0x0100 +#define AB4500_RESET_STAT_REG 0x0101 +#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102 + +#define AB4500_FSM_STAT1_REG 0x0140 +#define AB4500_FSM_STAT2_REG 0x0141 +#define AB4500_SYSCLK_REQ_STAT_REG 0x0142 +#define AB4500_USB_STAT1_REG 0x0143 +#define AB4500_USB_STAT2_REG 0x0144 +#define AB4500_STATUS_SPARE1_REG 0x0145 +#define AB4500_STATUS_SPARE2_REG 0x0146 + +#define AB4500_CTRL1_REG 0x0180 +#define AB4500_CTRL2_REG 0x0181 + +/* + * System control 2 register offsets. + * bank = 0x02 + */ +#define AB4500_CTRL3_REG 0x0200 +#define AB4500_MAIN_WDOG_CTRL_REG 0x0201 +#define AB4500_MAIN_WDOG_TIMER_REG 0x0202 +#define AB4500_LOW_BAT_REG 0x0203 +#define AB4500_BATT_OK_REG 0x0204 +#define AB4500_SYSCLK_TIMER_REG 0x0205 +#define AB4500_SMPSCLK_CTRL_REG 0x0206 +#define AB4500_SMPSCLK_SEL1_REG 0x0207 +#define AB4500_SMPSCLK_SEL2_REG 0x0208 +#define AB4500_SMPSCLK_SEL3_REG 0x0209 +#define AB4500_SYSULPCLK_CONF_REG 0x020A +#define AB4500_SYSULPCLK_CTRL1_REG 0x020B +#define AB4500_SYSCLK_CTRL_REG 0x020C +#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D +#define AB4500_SYSCLK_REQ_VALID_REG 0x020E +#define AB4500_SYSCTRL_SPARE_REG 0x020F +#define AB4500_PAD_CONF_REG 0x0210 + +/* + * Regu control1 register offsets + * Bank = 0x03 + */ +#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300 +#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301 +#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302 +#define AB4500_REGU_REQ_CTRL1_REG 0x0303 +#define AB4500_REGU_REQ_CTRL2_REG 0x0304 +#define AB4500_REGU_REQ_CTRL3_REG 0x0305 +#define AB4500_REGU_REQ_CTRL4_REG 0x0306 +#define AB4500_REGU_MISC1_REG 0x0380 +#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381 +#define AB4500_REGU_VUSB_CTRL_REG 0x0382 +#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383 +#define AB4500_REGU_CTRL1_SPARE_REG 0x0384 + +/* + * Regu control2 Vmod register offsets + */ +#define AB4500_REGU_VMOD_REGU_REG 0x0440 +#define AB4500_REGU_VMOD_SEL1_REG 0x0441 +#define AB4500_REGU_VMOD_SEL2_REG 0x0442 +#define AB4500_REGU_CTRL_DISCH_REG 0x0443 +#define AB4500_REGU_CTRL_DISCH2_REG 0x0444 + +/* + * USB/ULPI register offsets + * Bank : 0x5 + */ +#define AB4500_USB_LINE_STAT_REG 0x0580 +#define AB4500_USB_LINE_CTRL1_REG 0x0581 +#define AB4500_USB_LINE_CTRL2_REG 0x0582 +#define AB4500_USB_LINE_CTRL3_REG 0x0583 +#define AB4500_USB_LINE_CTRL4_REG 0x0584 +#define AB4500_USB_LINE_CTRL5_REG 0x0585 +#define AB4500_USB_OTG_CTRL_REG 0x0587 +#define AB4500_USB_OTG_STAT_REG 0x0588 +#define AB4500_USB_OTG_STAT_REG 0x0588 +#define AB4500_USB_CTRL_SPARE_REG 0x0589 +#define AB4500_USB_PHY_CTRL_REG 0x058A + +/* + * TVOUT / CTRL register offsets + * Bank : 0x06 + */ +#define AB4500_TVOUT_CTRL_REG 0x0680 + +/* + * DBI register offsets + * Bank : 0x07 + */ +#define AB4500_DBI_REG1_REG 0x0700 +#define AB4500_DBI_REG2_REG 0x0701 + +/* + * ECI regsiter offsets + * Bank : 0x08 + */ +#define AB4500_ECI_CTRL_REG 0x0800 +#define AB4500_ECI_HOOKLEVEL_REG 0x0801 +#define AB4500_ECI_DATAOUT_REG 0x0802 +#define AB4500_ECI_DATAIN_REG 0x0803 + +/* + * AV Connector register offsets + * Bank : 0x08 + */ +#define AB4500_AV_CONN_REG 0x0840 + +/* + * Accessory detection register offsets + * Bank : 0x08 + */ +#define AB4500_ACC_DET_DB1_REG 0x0880 +#define AB4500_ACC_DET_DB2_REG 0x0881 + +/* + * GPADC register offsets + * Bank : 0x0A + */ +#define AB4500_GPADC_CTRL1_REG 0x0A00 +#define AB4500_GPADC_CTRL2_REG 0x0A01 +#define AB4500_GPADC_CTRL3_REG 0x0A02 +#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03 +#define AB4500_GPADC_STAT_REG 0x0A04 +#define AB4500_GPADC_MANDATAL_REG 0x0A05 +#define AB4500_GPADC_MANDATAH_REG 0x0A06 +#define AB4500_GPADC_AUTODATAL_REG 0x0A07 +#define AB4500_GPADC_AUTODATAH_REG 0x0A08 +#define AB4500_GPADC_MUX_CTRL_REG 0x0A09 + +/* + * Charger / status register offfsets + * Bank : 0x0B + */ +#define AB4500_CH_STATUS1_REG 0x0B00 +#define AB4500_CH_STATUS2_REG 0x0B01 +#define AB4500_CH_USBCH_STAT1_REG 0x0B02 +#define AB4500_CH_USBCH_STAT2_REG 0x0B03 +#define AB4500_CH_FSM_STAT_REG 0x0B04 +#define AB4500_CH_STAT_REG 0x0B05 + +/* + * Charger / control register offfsets + * Bank : 0x0B + */ +#define AB4500_CH_VOLT_LVL_REG 0x0B40 + +/* + * Charger / main control register offfsets + * Bank : 0x0B + */ +#define AB4500_MCH_CTRL1 0x0B80 +#define AB4500_MCH_CTRL2 0x0B81 +#define AB4500_MCH_IPT_CURLVL_REG 0x0B82 +#define AB4500_CH_WD_REG 0x0B83 + +/* + * Charger / USB control register offsets + * Bank : 0x0B + */ +#define AB4500_USBCH_CTRL1_REG 0x0BC0 +#define AB4500_USBCH_CTRL2_REG 0x0BC1 +#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2 + +/* + * RTC bank register offsets + * Bank : 0xF + */ +#define AB4500_RTC_SOFF_STAT_REG 0x0F00 +#define AB4500_RTC_CC_CONF_REG 0x0F01 +#define AB4500_RTC_READ_REQ_REG 0x0F02 +#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03 +#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04 +#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05 +#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06 +#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07 +#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08 +#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09 +#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A +#define AB4500_RTC_STAT_REG 0x0F0B +#define AB4500_RTC_BKUP_CHG_REG 0x0F0C +#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D +#define AB4500_RTC_CALIB_REG 0x0F0E +#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F + +/* + * PWM Out generators + * Bank: 0x10 + */ +#define AB4500_PWM_OUT_CTRL1_REG 0x1060 +#define AB4500_PWM_OUT_CTRL2_REG 0x1061 +#define AB4500_PWM_OUT_CTRL3_REG 0x1062 +#define AB4500_PWM_OUT_CTRL4_REG 0x1063 +#define AB4500_PWM_OUT_CTRL5_REG 0x1064 +#define AB4500_PWM_OUT_CTRL6_REG 0x1065 +#define AB4500_PWM_OUT_CTRL7_REG 0x1066 + +#define AB4500_I2C_PAD_CTRL_REG 0x1067 +#define AB4500_REV_REG 0x1080 + +/** + * struct ab4500 + * @spi: spi device structure + * @tx_buf: transmit buffer + * @rx_buf: receive buffer + * @lock: sync primitive + */ +struct ab4500 { + struct spi_device *spi; + unsigned long tx_buf[4]; + unsigned long rx_buf[4]; + struct mutex lock; +}; + +int ab4500_write(struct ab4500 *ab4500, unsigned char block, + unsigned long addr, unsigned char data); +int ab4500_read(struct ab4500 *ab4500, unsigned char block, + unsigned long addr); + +#endif /* MFD_AB4500_H */ -- cgit v1.2.3 From 6f2ecaae72910211034c4f1955da97b2ff994265 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 1 Oct 2009 15:41:05 +0100 Subject: gpiolib: Make WM831x GPIO count dynamic This supports future devices with fewer GPIOs. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/gpio/wm831x-gpio.c | 4 +--- drivers/mfd/wm831x-core.c | 3 +++ include/linux/mfd/wm831x/core.h | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpio/wm831x-gpio.c b/drivers/gpio/wm831x-gpio.c index f9c09a54ec7f..f5e4934f1da1 100644 --- a/drivers/gpio/wm831x-gpio.c +++ b/drivers/gpio/wm831x-gpio.c @@ -23,8 +23,6 @@ #include #include -#define WM831X_GPIO_MAX 16 - struct wm831x_gpio { struct wm831x *wm831x; struct gpio_chip gpio_chip; @@ -192,7 +190,7 @@ static int __devinit wm831x_gpio_probe(struct platform_device *pdev) wm831x_gpio->wm831x = wm831x; wm831x_gpio->gpio_chip = template_chip; - wm831x_gpio->gpio_chip.ngpio = WM831X_GPIO_MAX; + wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio; wm831x_gpio->gpio_chip.dev = &pdev->dev; if (pdata && pdata->gpio_base) wm831x_gpio->gpio_chip.base = pdata->gpio_base; diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 8504c6ef4a16..8d386c0c8027 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -1293,16 +1293,19 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) switch (ret) { case WM8310: parent = WM8310; + wm831x->num_gpio = 16; dev_info(wm831x->dev, "WM8310 revision %c\n", 'A' + rev); break; case WM8311: parent = WM8311; + wm831x->num_gpio = 16; dev_info(wm831x->dev, "WM8311 revision %c\n", 'A' + rev); break; case WM8312: parent = WM8312; + wm831x->num_gpio = 16; dev_info(wm831x->dev, "WM8312 revision %c\n", 'A' + rev); break; diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index 91eb493bf14c..c1bc59f6cbf2 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -253,6 +253,8 @@ struct wm831x { unsigned int irq_base; int irq_masks[5]; + int num_gpio; + struct mutex auxadc_lock; /* The WM831x has a security key blocking access to certain -- cgit v1.2.3 From d4e0a89e3d170affa896efcdb4320e38a2f3a546 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 1 Oct 2009 15:41:07 +0100 Subject: mfd: Add support for WM8320 PMICs The WM8320 is an integrated power management subsystem providing voltage regulators, RTC, watchdog and other functionality. The WM8320 is derived from the WM831x and therefore shares most of the driver code with the WM831x. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/Kconfig | 6 +- drivers/mfd/wm831x-core.c | 159 ++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/core.h | 1 + 3 files changed, 163 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2aea25ba7ba0..b0b3e42e7ee6 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -185,12 +185,12 @@ config MFD_WM8400 the functionality of the device. config MFD_WM831X - tristate "Support Wolfson Microelectronics WM831x PMICs" + tristate "Support Wolfson Microelectronics WM831x/2x PMICs" select MFD_CORE depends on I2C help - Support for the Wolfson Microelecronics WM831x PMICs. This - driver provides common support for accessing the device, + Support for the Wolfson Microelecronics WM831x and WM832x PMICs. + This driver provides common support for accessing the device, additional drivers must be enabled in order to use the functionality of the device. diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 8d386c0c8027..163029f06185 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -93,6 +93,7 @@ enum wm831x_parent { WM8310 = 0x8310, WM8311 = 0x8311, WM8312 = 0x8312, + WM8320 = 0x8320, }; static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg) @@ -478,6 +479,20 @@ static struct resource wm831x_dcdc4_resources[] = { }, }; +static struct resource wm8320_dcdc4_buck_resources[] = { + { + .start = WM831X_DC4_CONTROL, + .end = WM832X_DC4_SLEEP_CONTROL, + .flags = IORESOURCE_IO, + }, + { + .name = "UV", + .start = WM831X_IRQ_UV_DC4, + .end = WM831X_IRQ_UV_DC4, + .flags = IORESOURCE_IRQ, + }, +}; + static struct resource wm831x_gpio_resources[] = { { .start = WM831X_IRQ_GPIO_1, @@ -1237,6 +1252,137 @@ static struct mfd_cell wm8312_devs[] = { }, }; +static struct mfd_cell wm8320_devs[] = { + { + .name = "wm831x-backup", + }, + { + .name = "wm831x-buckv", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources), + .resources = wm831x_dcdc1_resources, + }, + { + .name = "wm831x-buckv", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_dcdc2_resources), + .resources = wm831x_dcdc2_resources, + }, + { + .name = "wm831x-buckp", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_dcdc3_resources), + .resources = wm831x_dcdc3_resources, + }, + { + .name = "wm831x-buckp", + .id = 4, + .num_resources = ARRAY_SIZE(wm8320_dcdc4_buck_resources), + .resources = wm8320_dcdc4_buck_resources, + }, + { + .name = "wm831x-gpio", + .num_resources = ARRAY_SIZE(wm831x_gpio_resources), + .resources = wm831x_gpio_resources, + }, + { + .name = "wm831x-hwmon", + }, + { + .name = "wm831x-ldo", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_ldo1_resources), + .resources = wm831x_ldo1_resources, + }, + { + .name = "wm831x-ldo", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_ldo2_resources), + .resources = wm831x_ldo2_resources, + }, + { + .name = "wm831x-ldo", + .id = 3, + .num_resources = ARRAY_SIZE(wm831x_ldo3_resources), + .resources = wm831x_ldo3_resources, + }, + { + .name = "wm831x-ldo", + .id = 4, + .num_resources = ARRAY_SIZE(wm831x_ldo4_resources), + .resources = wm831x_ldo4_resources, + }, + { + .name = "wm831x-ldo", + .id = 5, + .num_resources = ARRAY_SIZE(wm831x_ldo5_resources), + .resources = wm831x_ldo5_resources, + }, + { + .name = "wm831x-ldo", + .id = 6, + .num_resources = ARRAY_SIZE(wm831x_ldo6_resources), + .resources = wm831x_ldo6_resources, + }, + { + .name = "wm831x-aldo", + .id = 7, + .num_resources = ARRAY_SIZE(wm831x_ldo7_resources), + .resources = wm831x_ldo7_resources, + }, + { + .name = "wm831x-aldo", + .id = 8, + .num_resources = ARRAY_SIZE(wm831x_ldo8_resources), + .resources = wm831x_ldo8_resources, + }, + { + .name = "wm831x-aldo", + .id = 9, + .num_resources = ARRAY_SIZE(wm831x_ldo9_resources), + .resources = wm831x_ldo9_resources, + }, + { + .name = "wm831x-aldo", + .id = 10, + .num_resources = ARRAY_SIZE(wm831x_ldo10_resources), + .resources = wm831x_ldo10_resources, + }, + { + .name = "wm831x-alive-ldo", + .id = 11, + .num_resources = ARRAY_SIZE(wm831x_ldo11_resources), + .resources = wm831x_ldo11_resources, + }, + { + .name = "wm831x-on", + .num_resources = ARRAY_SIZE(wm831x_on_resources), + .resources = wm831x_on_resources, + }, + { + .name = "wm831x-rtc", + .num_resources = ARRAY_SIZE(wm831x_rtc_resources), + .resources = wm831x_rtc_resources, + }, + { + .name = "wm831x-status", + .id = 1, + .num_resources = ARRAY_SIZE(wm831x_status1_resources), + .resources = wm831x_status1_resources, + }, + { + .name = "wm831x-status", + .id = 2, + .num_resources = ARRAY_SIZE(wm831x_status2_resources), + .resources = wm831x_status2_resources, + }, + { + .name = "wm831x-watchdog", + .num_resources = ARRAY_SIZE(wm831x_wdt_resources), + .resources = wm831x_wdt_resources, + }, +}; + static struct mfd_cell backlight_devs[] = { { .name = "wm831x-backlight", @@ -1309,6 +1455,12 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) dev_info(wm831x->dev, "WM8312 revision %c\n", 'A' + rev); break; + case WM8320: + parent = WM8320; + wm831x->num_gpio = 12; + dev_info(wm831x->dev, "WM8320 revision %c\n", 'A' + rev); + break; + default: dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret); ret = -EINVAL; @@ -1367,6 +1519,12 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) NULL, 0); break; + case WM8320: + ret = mfd_add_devices(wm831x->dev, -1, + wm8320_devs, ARRAY_SIZE(wm8320_devs), + NULL, 0); + break; + default: /* If this happens the bus probe function is buggy */ BUG(); @@ -1492,6 +1650,7 @@ static const struct i2c_device_id wm831x_i2c_id[] = { { "wm8310", WM8310 }, { "wm8311", WM8311 }, { "wm8312", WM8312 }, + { "wm8320", WM8320 }, { } }; MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id); diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index c1bc59f6cbf2..d01d293a6b25 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -117,6 +117,7 @@ #define WM831X_DC3_SLEEP_CONTROL 0x4063 #define WM831X_DC4_CONTROL 0x4064 #define WM831X_DC4_SLEEP_CONTROL 0x4065 +#define WM832X_DC4_SLEEP_CONTROL 0x4067 #define WM831X_EPE1_CONTROL 0x4066 #define WM831X_EPE2_CONTROL 0x4067 #define WM831X_LDO1_CONTROL 0x4068 -- cgit v1.2.3 From a5736e0b62fcb7c1b20892c62e1c5fe5e9387c86 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Mon, 12 Oct 2009 17:22:38 +0200 Subject: mfd: Add ADP5520/ADP5501 driver Base driver for Analog Devices ADP5520/ADP5501 MFD PMICs Subdevs: LCD Backlight : drivers/video/backlight/adp5520_bl.c LEDs : drivers/led/leds-adp5520.c GPIO : drivers/gpio/adp5520-gpio.c (ADP5520 only) Keys : drivers/input/keyboard/adp5520-keys.c (ADP5520 only) Signed-off-by: Michael Hennerich Signed-off-by: Bryan Wu Signed-off-by: Mike Frysinger Signed-off-by: Samuel Ortiz --- drivers/mfd/Kconfig | 10 ++ drivers/mfd/Makefile | 1 + drivers/mfd/adp5520.c | 378 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/adp5520.h | 299 +++++++++++++++++++++++++++++++++++ 4 files changed, 688 insertions(+) create mode 100644 drivers/mfd/adp5520.c create mode 100644 include/linux/mfd/adp5520.h (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 7df3bcf79490..b1d537053faf 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -174,6 +174,16 @@ config PMIC_DA903X individual components like LCD backlight, voltage regulators, LEDs and battery-charger under the corresponding menus. +config PMIC_ADP5520 + bool "Analog Devices ADP5520/01 MFD PMIC Core Support" + depends on I2C=y + help + Say yes here to add support for Analog Devices AD5520 and ADP5501, + Multifunction Power Management IC. This includes + the I2C driver and the core APIs _only_, you have to select + individual components like LCD backlight, LEDs, GPIOs and Kepad + under the corresponding menus. + config MFD_WM8400 tristate "Support Wolfson Microelectronics WM8400" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 1792b1ff8ffc..24558574a737 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -54,3 +54,4 @@ obj-$(CONFIG_AB3100_CORE) += ab3100-core.o obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o obj-$(CONFIG_AB4500_CORE) += ab4500-core.o obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o +obj-$(CONFIG_PMIC_ADP5520) += adp5520.o \ No newline at end of file diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c new file mode 100644 index 000000000000..401a9b6029e3 --- /dev/null +++ b/drivers/mfd/adp5520.c @@ -0,0 +1,378 @@ +/* + * Base driver for Analog Devices ADP5520/ADP5501 MFD PMICs + * LCD Backlight: drivers/video/backlight/adp5520_bl + * LEDs : drivers/led/leds-adp5520 + * GPIO : drivers/gpio/adp5520-gpio (ADP5520 only) + * Keys : drivers/input/keyboard/adp5520-keys (ADP5520 only) + * + * Copyright 2009 Analog Devices Inc. + * + * Derived from da903x: + * Copyright (C) 2008 Compulab, Ltd. + * Mike Rapoport + * + * Copyright (C) 2006-2008 Marvell International Ltd. + * Eric Miao + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct adp5520_chip { + struct i2c_client *client; + struct device *dev; + struct mutex lock; + struct blocking_notifier_head notifier_list; + int irq; + unsigned long id; +}; + +static int __adp5520_read(struct i2c_client *client, + int reg, uint8_t *val) +{ + int ret; + + ret = i2c_smbus_read_byte_data(client, reg); + if (ret < 0) { + dev_err(&client->dev, "failed reading at 0x%02x\n", reg); + return ret; + } + + *val = (uint8_t)ret; + return 0; +} + +static int __adp5520_write(struct i2c_client *client, + int reg, uint8_t val) +{ + int ret; + + ret = i2c_smbus_write_byte_data(client, reg, val); + if (ret < 0) { + dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n", + val, reg); + return ret; + } + return 0; +} + +int __adp5520_ack_bits(struct i2c_client *client, int reg, uint8_t bit_mask) +{ + struct adp5520_chip *chip = i2c_get_clientdata(client); + uint8_t reg_val; + int ret; + + mutex_lock(&chip->lock); + + ret = __adp5520_read(client, reg, ®_val); + + if (!ret) { + reg_val |= bit_mask; + ret = __adp5520_write(client, reg, reg_val); + } + + mutex_unlock(&chip->lock); + return ret; +} + +int adp5520_write(struct device *dev, int reg, uint8_t val) +{ + return __adp5520_write(to_i2c_client(dev), reg, val); +} +EXPORT_SYMBOL_GPL(adp5520_write); + +int adp5520_read(struct device *dev, int reg, uint8_t *val) +{ + return __adp5520_read(to_i2c_client(dev), reg, val); +} +EXPORT_SYMBOL_GPL(adp5520_read); + +int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask) +{ + struct adp5520_chip *chip = dev_get_drvdata(dev); + uint8_t reg_val; + int ret; + + mutex_lock(&chip->lock); + + ret = __adp5520_read(chip->client, reg, ®_val); + + if (!ret && ((reg_val & bit_mask) == 0)) { + reg_val |= bit_mask; + ret = __adp5520_write(chip->client, reg, reg_val); + } + + mutex_unlock(&chip->lock); + return ret; +} +EXPORT_SYMBOL_GPL(adp5520_set_bits); + +int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask) +{ + struct adp5520_chip *chip = dev_get_drvdata(dev); + uint8_t reg_val; + int ret; + + mutex_lock(&chip->lock); + + ret = __adp5520_read(chip->client, reg, ®_val); + + if (!ret && (reg_val & bit_mask)) { + reg_val &= ~bit_mask; + ret = __adp5520_write(chip->client, reg, reg_val); + } + + mutex_unlock(&chip->lock); + return ret; +} +EXPORT_SYMBOL_GPL(adp5520_clr_bits); + +int adp5520_register_notifier(struct device *dev, struct notifier_block *nb, + unsigned int events) +{ + struct adp5520_chip *chip = dev_get_drvdata(dev); + + if (chip->irq) { + adp5520_set_bits(chip->dev, ADP5520_INTERRUPT_ENABLE, + events & (ADP5520_KP_IEN | ADP5520_KR_IEN | + ADP5520_OVP_IEN | ADP5520_CMPR_IEN)); + + return blocking_notifier_chain_register(&chip->notifier_list, + nb); + } + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(adp5520_register_notifier); + +int adp5520_unregister_notifier(struct device *dev, struct notifier_block *nb, + unsigned int events) +{ + struct adp5520_chip *chip = dev_get_drvdata(dev); + + adp5520_clr_bits(chip->dev, ADP5520_INTERRUPT_ENABLE, + events & (ADP5520_KP_IEN | ADP5520_KR_IEN | + ADP5520_OVP_IEN | ADP5520_CMPR_IEN)); + + return blocking_notifier_chain_unregister(&chip->notifier_list, nb); +} +EXPORT_SYMBOL_GPL(adp5520_unregister_notifier); + +static irqreturn_t adp5520_irq_thread(int irq, void *data) +{ + struct adp5520_chip *chip = data; + unsigned int events; + uint8_t reg_val; + int ret; + + ret = __adp5520_read(chip->client, ADP5520_MODE_STATUS, ®_val); + if (ret) + goto out; + + events = reg_val & (ADP5520_OVP_INT | ADP5520_CMPR_INT | + ADP5520_GPI_INT | ADP5520_KR_INT | ADP5520_KP_INT); + + blocking_notifier_call_chain(&chip->notifier_list, events, NULL); + /* ACK, Sticky bits are W1C */ + __adp5520_ack_bits(chip->client, ADP5520_MODE_STATUS, events); + +out: + return IRQ_HANDLED; +} + +static int __remove_subdev(struct device *dev, void *unused) +{ + platform_device_unregister(to_platform_device(dev)); + return 0; +} + +static int adp5520_remove_subdevs(struct adp5520_chip *chip) +{ + return device_for_each_child(chip->dev, NULL, __remove_subdev); +} + +static int __devinit adp5520_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adp5520_platform_data *pdata = client->dev.platform_data; + struct platform_device *pdev; + struct adp5520_chip *chip; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_err(&client->dev, "SMBUS Word Data not Supported\n"); + return -EIO; + } + + if (pdata == NULL) { + dev_err(&client->dev, "missing platform data\n"); + return -ENODEV; + } + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + chip->client = client; + + chip->dev = &client->dev; + chip->irq = client->irq; + chip->id = id->driver_data; + mutex_init(&chip->lock); + + if (chip->irq) { + BLOCKING_INIT_NOTIFIER_HEAD(&chip->notifier_list); + + ret = request_threaded_irq(chip->irq, NULL, adp5520_irq_thread, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "adp5520", chip); + if (ret) { + dev_err(&client->dev, "failed to request irq %d\n", + chip->irq); + goto out_free_chip; + } + } + + ret = adp5520_write(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); + if (ret) { + dev_err(&client->dev, "failed to write\n"); + goto out_free_irq; + } + + if (pdata->keys) { + pdev = platform_device_register_data(chip->dev, "adp5520-keys", + chip->id, pdata->keys, sizeof(*pdata->keys)); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + goto out_remove_subdevs; + } + } + + if (pdata->gpio) { + pdev = platform_device_register_data(chip->dev, "adp5520-gpio", + chip->id, pdata->gpio, sizeof(*pdata->gpio)); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + goto out_remove_subdevs; + } + } + + if (pdata->leds) { + pdev = platform_device_register_data(chip->dev, "adp5520-led", + chip->id, pdata->leds, sizeof(*pdata->leds)); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + goto out_remove_subdevs; + } + } + + if (pdata->backlight) { + pdev = platform_device_register_data(chip->dev, + "adp5520-backlight", + chip->id, + pdata->backlight, + sizeof(*pdata->backlight)); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + goto out_remove_subdevs; + } + } + + return 0; + +out_remove_subdevs: + adp5520_remove_subdevs(chip); + +out_free_irq: + if (chip->irq) + free_irq(chip->irq, chip); + +out_free_chip: + i2c_set_clientdata(client, NULL); + kfree(chip); + + return ret; +} + +static int __devexit adp5520_remove(struct i2c_client *client) +{ + struct adp5520_chip *chip = dev_get_drvdata(&client->dev); + + if (chip->irq) + free_irq(chip->irq, chip); + + adp5520_remove_subdevs(chip); + adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); + i2c_set_clientdata(client, NULL); + kfree(chip); + return 0; +} + +#ifdef CONFIG_PM +static int adp5520_suspend(struct i2c_client *client, + pm_message_t state) +{ + struct adp5520_chip *chip = dev_get_drvdata(&client->dev); + + adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); + return 0; +} + +static int adp5520_resume(struct i2c_client *client) +{ + struct adp5520_chip *chip = dev_get_drvdata(&client->dev); + + adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); + return 0; +} +#else +#define adp5520_suspend NULL +#define adp5520_resume NULL +#endif + +static const struct i2c_device_id adp5520_id[] = { + { "pmic-adp5520", ID_ADP5520 }, + { "pmic-adp5501", ID_ADP5501 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adp5520_id); + +static struct i2c_driver adp5520_driver = { + .driver = { + .name = "adp5520", + .owner = THIS_MODULE, + }, + .probe = adp5520_probe, + .remove = __devexit_p(adp5520_remove), + .suspend = adp5520_suspend, + .resume = adp5520_resume, + .id_table = adp5520_id, +}; + +static int __init adp5520_init(void) +{ + return i2c_add_driver(&adp5520_driver); +} +module_init(adp5520_init); + +static void __exit adp5520_exit(void) +{ + i2c_del_driver(&adp5520_driver); +} +module_exit(adp5520_exit); + +MODULE_AUTHOR("Michael Hennerich "); +MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h new file mode 100644 index 000000000000..ac37558a4673 --- /dev/null +++ b/include/linux/mfd/adp5520.h @@ -0,0 +1,299 @@ +/* + * Definitions and platform data for Analog Devices + * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys) + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + + +#ifndef __LINUX_MFD_ADP5520_H +#define __LINUX_MFD_ADP5520_H + +#define ID_ADP5520 5520 +#define ID_ADP5501 5501 + +/* + * ADP5520/ADP5501 Register Map + */ + +#define ADP5520_MODE_STATUS 0x00 +#define ADP5520_INTERRUPT_ENABLE 0x01 +#define ADP5520_BL_CONTROL 0x02 +#define ADP5520_BL_TIME 0x03 +#define ADP5520_BL_FADE 0x04 +#define ADP5520_DAYLIGHT_MAX 0x05 +#define ADP5520_DAYLIGHT_DIM 0x06 +#define ADP5520_OFFICE_MAX 0x07 +#define ADP5520_OFFICE_DIM 0x08 +#define ADP5520_DARK_MAX 0x09 +#define ADP5520_DARK_DIM 0x0A +#define ADP5520_BL_VALUE 0x0B +#define ADP5520_ALS_CMPR_CFG 0x0C +#define ADP5520_L2_TRIP 0x0D +#define ADP5520_L2_HYS 0x0E +#define ADP5520_L3_TRIP 0x0F +#define ADP5520_L3_HYS 0x10 +#define ADP5520_LED_CONTROL 0x11 +#define ADP5520_LED_TIME 0x12 +#define ADP5520_LED_FADE 0x13 +#define ADP5520_LED1_CURRENT 0x14 +#define ADP5520_LED2_CURRENT 0x15 +#define ADP5520_LED3_CURRENT 0x16 + +/* + * ADP5520 Register Map + */ + +#define ADP5520_GPIO_CFG_1 0x17 +#define ADP5520_GPIO_CFG_2 0x18 +#define ADP5520_GPIO_IN 0x19 +#define ADP5520_GPIO_OUT 0x1A +#define ADP5520_GPIO_INT_EN 0x1B +#define ADP5520_GPIO_INT_STAT 0x1C +#define ADP5520_GPIO_INT_LVL 0x1D +#define ADP5520_GPIO_DEBOUNCE 0x1E +#define ADP5520_GPIO_PULLUP 0x1F +#define ADP5520_KP_INT_STAT_1 0x20 +#define ADP5520_KP_INT_STAT_2 0x21 +#define ADP5520_KR_INT_STAT_1 0x22 +#define ADP5520_KR_INT_STAT_2 0x23 +#define ADP5520_KEY_STAT_1 0x24 +#define ADP5520_KEY_STAT_2 0x25 + +/* + * MODE_STATUS bits + */ + +#define ADP5520_nSTNBY (1 << 7) +#define ADP5520_BL_EN (1 << 6) +#define ADP5520_DIM_EN (1 << 5) +#define ADP5520_OVP_INT (1 << 4) +#define ADP5520_CMPR_INT (1 << 3) +#define ADP5520_GPI_INT (1 << 2) +#define ADP5520_KR_INT (1 << 1) +#define ADP5520_KP_INT (1 << 0) + +/* + * INTERRUPT_ENABLE bits + */ + +#define ADP5520_AUTO_LD_EN (1 << 4) +#define ADP5520_CMPR_IEN (1 << 3) +#define ADP5520_OVP_IEN (1 << 2) +#define ADP5520_KR_IEN (1 << 1) +#define ADP5520_KP_IEN (1 << 0) + +/* + * BL_CONTROL bits + */ + +#define ADP5520_BL_LVL ((x) << 5) +#define ADP5520_BL_LAW ((x) << 4) +#define ADP5520_BL_AUTO_ADJ (1 << 3) +#define ADP5520_OVP_EN (1 << 2) +#define ADP5520_FOVR (1 << 1) +#define ADP5520_KP_BL_EN (1 << 0) + +/* + * ALS_CMPR_CFG bits + */ + +#define ADP5520_L3_OUT (1 << 3) +#define ADP5520_L2_OUT (1 << 2) +#define ADP5520_L3_EN (1 << 1) + +#define ADP5020_MAX_BRIGHTNESS 0x7F + +#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4)) +#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4)) +#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en) + +/* + * LEDs subdevice bits and masks + */ + +#define ADP5520_01_MAXLEDS 3 + +#define ADP5520_FLAG_LED_MASK 0x3 +#define ADP5520_FLAG_OFFT_SHIFT 8 +#define ADP5520_FLAG_OFFT_MASK 0x3 + +#define ADP5520_R3_MODE (1 << 5) +#define ADP5520_C3_MODE (1 << 4) +#define ADP5520_LED_LAW (1 << 3) +#define ADP5520_LED3_EN (1 << 2) +#define ADP5520_LED2_EN (1 << 1) +#define ADP5520_LED1_EN (1 << 0) + +/* + * GPIO subdevice bits and masks + */ + +#define ADP5520_MAXGPIOS 8 + +#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */ +#define ADP5520_GPIO_C2 (1 << 6) +#define ADP5520_GPIO_C1 (1 << 5) +#define ADP5520_GPIO_C0 (1 << 4) +#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */ +#define ADP5520_GPIO_R2 (1 << 2) +#define ADP5520_GPIO_R1 (1 << 1) +#define ADP5520_GPIO_R0 (1 << 0) + +struct adp5520_gpio_platform_data { + unsigned gpio_start; + u8 gpio_en_mask; + u8 gpio_pullup_mask; +}; + +/* + * Keypad subdevice bits and masks + */ + +#define ADP5520_MAXKEYS 16 + +#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */ +#define ADP5520_COL_C2 (1 << 6) +#define ADP5520_COL_C1 (1 << 5) +#define ADP5520_COL_C0 (1 << 4) +#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */ +#define ADP5520_ROW_R2 (1 << 2) +#define ADP5520_ROW_R1 (1 << 1) +#define ADP5520_ROW_R0 (1 << 0) + +#define ADP5520_KEY(row, col) (col + row * 4) +#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS + +struct adp5520_keys_platform_data { + int rows_en_mask; /* Number of rows */ + int cols_en_mask; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ +}; + + +/* + * LEDs subdevice platform data + */ + +#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */ +#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */ +#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */ + +#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT) + +#define ADP5520_LED_ONT_200ms 0 +#define ADP5520_LED_ONT_600ms 1 +#define ADP5520_LED_ONT_800ms 2 +#define ADP5520_LED_ONT_1200ms 3 + +struct adp5520_leds_platform_data { + int num_leds; + struct led_info *leds; + u8 fade_in; /* Backlight Fade-In Timer */ + u8 fade_out; /* Backlight Fade-Out Timer */ + u8 led_on_time; +}; + +/* + * Backlight subdevice platform data + */ + +#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP5520_FADE_T_600ms 2 +#define ADP5520_FADE_T_900ms 3 +#define ADP5520_FADE_T_1200ms 4 +#define ADP5520_FADE_T_1500ms 5 +#define ADP5520_FADE_T_1800ms 6 +#define ADP5520_FADE_T_2100ms 7 +#define ADP5520_FADE_T_2400ms 8 +#define ADP5520_FADE_T_2700ms 9 +#define ADP5520_FADE_T_3000ms 10 +#define ADP5520_FADE_T_3500ms 11 +#define ADP5520_FADE_T_4000ms 12 +#define ADP5520_FADE_T_4500ms 13 +#define ADP5520_FADE_T_5000ms 14 +#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP5520_BL_LAW_LINEAR 0 +#define ADP5520_BL_LAW_SQUARE 1 +#define ADP5520_BL_LAW_CUBIC1 2 +#define ADP5520_BL_LAW_CUBIC2 3 + +#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP5520_BL_AMBL_FILT_160ms 1 +#define ADP5520_BL_AMBL_FILT_320ms 2 +#define ADP5520_BL_AMBL_FILT_640ms 3 +#define ADP5520_BL_AMBL_FILT_1280ms 4 +#define ADP5520_BL_AMBL_FILT_2560ms 5 +#define ADP5520_BL_AMBL_FILT_5120ms 6 +#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + + /* + * Blacklight current 0..30mA + */ +#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30) + + /* + * L2 comparator current 0..1000uA + */ +#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000) + + /* + * L3 comparator current 0..127uA + */ +#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127) + +struct adp5520_backlight_platform_data { + u8 fade_in; /* Backlight Fade-In Timer */ + u8 fade_out; /* Backlight Fade-Out Timer */ + u8 fade_led_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */ +}; + +/* + * MFD chip platform data + */ + +struct adp5520_platform_data { + struct adp5520_keys_platform_data *keys; + struct adp5520_gpio_platform_data *gpio; + struct adp5520_leds_platform_data *leds; + struct adp5520_backlight_platform_data *backlight; +}; + +/* + * MFD chip functions + */ + +extern int adp5520_read(struct device *dev, int reg, uint8_t *val); +extern int adp5520_write(struct device *dev, int reg, u8 val); +extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask); + +extern int adp5520_register_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +extern int adp5520_unregister_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +#endif /* __LINUX_MFD_ADP5520_H */ -- cgit v1.2.3 From e0a3389ab9cb08813bf325616249abb29c4d2302 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 12 Oct 2009 16:15:09 +0100 Subject: mfd: Split wm8350 IRQ code into a separate file In preparation for refactoring - it's over 700 lines of well-isolated code and having it in a file by itself makes things more managable. While we're at it make sure that we clean up the IRQ if we fail after acquiring it on init. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/Makefile | 1 + drivers/mfd/wm8350-core.c | 769 +------------------------------------- drivers/mfd/wm8350-irq.c | 804 ++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/wm8350/core.h | 4 +- 4 files changed, 815 insertions(+), 763 deletions(-) create mode 100644 drivers/mfd/wm8350-irq.c (limited to 'include') diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 24558574a737..75638ca8288b 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_MFD_WM8400) += wm8400-core.o wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o obj-$(CONFIG_MFD_WM831X) += wm831x.o wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o +wm8350-objs += wm8350-irq.o obj-$(CONFIG_MFD_WM8350) += wm8350.o obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index ba27c9dc1ad3..242795feb90d 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c @@ -337,733 +337,6 @@ int wm8350_reg_unlock(struct wm8350 *wm8350) } EXPORT_SYMBOL_GPL(wm8350_reg_unlock); -static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq) -{ - mutex_lock(&wm8350->irq_mutex); - - if (wm8350->irq[irq].handler) - wm8350->irq[irq].handler(wm8350, irq, wm8350->irq[irq].data); - else { - dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n", - irq); - wm8350_mask_irq(wm8350, irq); - } - - mutex_unlock(&wm8350->irq_mutex); -} - -/* - * This is a threaded IRQ handler so can access I2C/SPI. Since all - * interrupts are clear on read the IRQ line will be reasserted and - * the physical IRQ will be handled again if another interrupt is - * asserted while we run - in the normal course of events this is a - * rare occurrence so we save I2C/SPI reads. - */ -static irqreturn_t wm8350_irq(int irq, void *data) -{ - struct wm8350 *wm8350 = data; - u16 level_one, status1, status2, comp; - - /* TODO: Use block reads to improve performance? */ - level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS) - & ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK); - status1 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_1) - & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_1_MASK); - status2 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_2) - & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_2_MASK); - comp = wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS) - & ~wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK); - - /* over current */ - if (level_one & WM8350_OC_INT) { - u16 oc; - - oc = wm8350_reg_read(wm8350, WM8350_OVER_CURRENT_INT_STATUS); - oc &= ~wm8350_reg_read(wm8350, - WM8350_OVER_CURRENT_INT_STATUS_MASK); - - if (oc & WM8350_OC_LS_EINT) /* limit switch */ - wm8350_irq_call_handler(wm8350, WM8350_IRQ_OC_LS); - } - - /* under voltage */ - if (level_one & WM8350_UV_INT) { - u16 uv; - - uv = wm8350_reg_read(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS); - uv &= ~wm8350_reg_read(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK); - - if (uv & WM8350_UV_DC1_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC1); - if (uv & WM8350_UV_DC2_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC2); - if (uv & WM8350_UV_DC3_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC3); - if (uv & WM8350_UV_DC4_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC4); - if (uv & WM8350_UV_DC5_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC5); - if (uv & WM8350_UV_DC6_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC6); - if (uv & WM8350_UV_LDO1_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO1); - if (uv & WM8350_UV_LDO2_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO2); - if (uv & WM8350_UV_LDO3_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO3); - if (uv & WM8350_UV_LDO4_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO4); - } - - /* charger, RTC */ - if (status1) { - if (status1 & WM8350_CHG_BAT_HOT_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_BAT_HOT); - if (status1 & WM8350_CHG_BAT_COLD_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_BAT_COLD); - if (status1 & WM8350_CHG_BAT_FAIL_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_BAT_FAIL); - if (status1 & WM8350_CHG_TO_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_TO); - if (status1 & WM8350_CHG_END_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_END); - if (status1 & WM8350_CHG_START_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_START); - if (status1 & WM8350_CHG_FAST_RDY_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_FAST_RDY); - if (status1 & WM8350_CHG_VBATT_LT_3P9_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_VBATT_LT_3P9); - if (status1 & WM8350_CHG_VBATT_LT_3P1_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_VBATT_LT_3P1); - if (status1 & WM8350_CHG_VBATT_LT_2P85_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_VBATT_LT_2P85); - if (status1 & WM8350_RTC_ALM_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_ALM); - if (status1 & WM8350_RTC_SEC_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_SEC); - if (status1 & WM8350_RTC_PER_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_PER); - } - - /* current sink, system, aux adc */ - if (status2) { - if (status2 & WM8350_CS1_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS1); - if (status2 & WM8350_CS2_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS2); - - if (status2 & WM8350_SYS_HYST_COMP_FAIL_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_HYST_COMP_FAIL); - if (status2 & WM8350_SYS_CHIP_GT115_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_CHIP_GT115); - if (status2 & WM8350_SYS_CHIP_GT140_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_CHIP_GT140); - if (status2 & WM8350_SYS_WDOG_TO_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_WDOG_TO); - - if (status2 & WM8350_AUXADC_DATARDY_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DATARDY); - if (status2 & WM8350_AUXADC_DCOMP4_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP4); - if (status2 & WM8350_AUXADC_DCOMP3_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP3); - if (status2 & WM8350_AUXADC_DCOMP2_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP2); - if (status2 & WM8350_AUXADC_DCOMP1_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP1); - - if (status2 & WM8350_USB_LIMIT_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_USB_LIMIT); - } - - /* wake, codec, ext */ - if (comp) { - if (comp & WM8350_WKUP_OFF_STATE_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_OFF_STATE); - if (comp & WM8350_WKUP_HIB_STATE_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_HIB_STATE); - if (comp & WM8350_WKUP_CONV_FAULT_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_CONV_FAULT); - if (comp & WM8350_WKUP_WDOG_RST_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_WDOG_RST); - if (comp & WM8350_WKUP_GP_PWR_ON_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_GP_PWR_ON); - if (comp & WM8350_WKUP_ONKEY_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_WKUP_ONKEY); - if (comp & WM8350_WKUP_GP_WAKEUP_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_GP_WAKEUP); - - if (comp & WM8350_CODEC_JCK_DET_L_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CODEC_JCK_DET_L); - if (comp & WM8350_CODEC_JCK_DET_R_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CODEC_JCK_DET_R); - if (comp & WM8350_CODEC_MICSCD_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CODEC_MICSCD); - if (comp & WM8350_CODEC_MICD_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CODEC_MICD); - - if (comp & WM8350_EXT_USB_FB_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_USB_FB); - if (comp & WM8350_EXT_WALL_FB_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_EXT_WALL_FB); - if (comp & WM8350_EXT_BAT_FB_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_BAT_FB); - } - - if (level_one & WM8350_GP_INT) { - int i; - u16 gpio; - - gpio = wm8350_reg_read(wm8350, WM8350_GPIO_INT_STATUS); - gpio &= ~wm8350_reg_read(wm8350, - WM8350_GPIO_INT_STATUS_MASK); - - for (i = 0; i < 12; i++) { - if (gpio & (1 << i)) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_GPIO(i)); - } - } - - return IRQ_HANDLED; -} - -int wm8350_register_irq(struct wm8350 *wm8350, int irq, - void (*handler) (struct wm8350 *, int, void *), - void *data) -{ - if (irq < 0 || irq > WM8350_NUM_IRQ || !handler) - return -EINVAL; - - if (wm8350->irq[irq].handler) - return -EBUSY; - - mutex_lock(&wm8350->irq_mutex); - wm8350->irq[irq].handler = handler; - wm8350->irq[irq].data = data; - mutex_unlock(&wm8350->irq_mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(wm8350_register_irq); - -int wm8350_free_irq(struct wm8350 *wm8350, int irq) -{ - if (irq < 0 || irq > WM8350_NUM_IRQ) - return -EINVAL; - - mutex_lock(&wm8350->irq_mutex); - wm8350->irq[irq].handler = NULL; - mutex_unlock(&wm8350->irq_mutex); - return 0; -} -EXPORT_SYMBOL_GPL(wm8350_free_irq); - -int wm8350_mask_irq(struct wm8350 *wm8350, int irq) -{ - switch (irq) { - case WM8350_IRQ_CHG_BAT_HOT: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_HOT_EINT); - case WM8350_IRQ_CHG_BAT_COLD: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_COLD_EINT); - case WM8350_IRQ_CHG_BAT_FAIL: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_FAIL_EINT); - case WM8350_IRQ_CHG_TO: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_TO_EINT); - case WM8350_IRQ_CHG_END: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_END_EINT); - case WM8350_IRQ_CHG_START: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_START_EINT); - case WM8350_IRQ_CHG_FAST_RDY: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_FAST_RDY_EINT); - case WM8350_IRQ_RTC_PER: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_PER_EINT); - case WM8350_IRQ_RTC_SEC: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_SEC_EINT); - case WM8350_IRQ_RTC_ALM: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_ALM_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P9: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P9_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P1: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P1_EINT); - case WM8350_IRQ_CHG_VBATT_LT_2P85: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_2P85_EINT); - case WM8350_IRQ_CS1: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS1_EINT); - case WM8350_IRQ_CS2: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS2_EINT); - case WM8350_IRQ_USB_LIMIT: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_USB_LIMIT_EINT); - case WM8350_IRQ_AUXADC_DATARDY: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DATARDY_EINT); - case WM8350_IRQ_AUXADC_DCOMP4: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP4_EINT); - case WM8350_IRQ_AUXADC_DCOMP3: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP3_EINT); - case WM8350_IRQ_AUXADC_DCOMP2: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP2_EINT); - case WM8350_IRQ_AUXADC_DCOMP1: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP1_EINT); - case WM8350_IRQ_SYS_HYST_COMP_FAIL: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_HYST_COMP_FAIL_EINT); - case WM8350_IRQ_SYS_CHIP_GT115: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT115_EINT); - case WM8350_IRQ_SYS_CHIP_GT140: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT140_EINT); - case WM8350_IRQ_SYS_WDOG_TO: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_WDOG_TO_EINT); - case WM8350_IRQ_UV_LDO4: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO4_EINT); - case WM8350_IRQ_UV_LDO3: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO3_EINT); - case WM8350_IRQ_UV_LDO2: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO2_EINT); - case WM8350_IRQ_UV_LDO1: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO1_EINT); - case WM8350_IRQ_UV_DC6: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC6_EINT); - case WM8350_IRQ_UV_DC5: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC5_EINT); - case WM8350_IRQ_UV_DC4: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC4_EINT); - case WM8350_IRQ_UV_DC3: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC3_EINT); - case WM8350_IRQ_UV_DC2: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC2_EINT); - case WM8350_IRQ_UV_DC1: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC1_EINT); - case WM8350_IRQ_OC_LS: - return wm8350_set_bits(wm8350, - WM8350_OVER_CURRENT_INT_STATUS_MASK, - WM8350_IM_OC_LS_EINT); - case WM8350_IRQ_EXT_USB_FB: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_USB_FB_EINT); - case WM8350_IRQ_EXT_WALL_FB: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_WALL_FB_EINT); - case WM8350_IRQ_EXT_BAT_FB: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_BAT_FB_EINT); - case WM8350_IRQ_CODEC_JCK_DET_L: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_L_EINT); - case WM8350_IRQ_CODEC_JCK_DET_R: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_R_EINT); - case WM8350_IRQ_CODEC_MICSCD: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICSCD_EINT); - case WM8350_IRQ_CODEC_MICD: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICD_EINT); - case WM8350_IRQ_WKUP_OFF_STATE: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_HIB_STATE: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_HIB_STATE_EINT); - case WM8350_IRQ_WKUP_CONV_FAULT: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_CONV_FAULT_EINT); - case WM8350_IRQ_WKUP_WDOG_RST: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_GP_PWR_ON: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_PWR_ON_EINT); - case WM8350_IRQ_WKUP_ONKEY: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_ONKEY_EINT); - case WM8350_IRQ_WKUP_GP_WAKEUP: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_WAKEUP_EINT); - case WM8350_IRQ_GPIO(0): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP0_EINT); - case WM8350_IRQ_GPIO(1): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP1_EINT); - case WM8350_IRQ_GPIO(2): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP2_EINT); - case WM8350_IRQ_GPIO(3): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP3_EINT); - case WM8350_IRQ_GPIO(4): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP4_EINT); - case WM8350_IRQ_GPIO(5): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP5_EINT); - case WM8350_IRQ_GPIO(6): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP6_EINT); - case WM8350_IRQ_GPIO(7): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP7_EINT); - case WM8350_IRQ_GPIO(8): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP8_EINT); - case WM8350_IRQ_GPIO(9): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP9_EINT); - case WM8350_IRQ_GPIO(10): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP10_EINT); - case WM8350_IRQ_GPIO(11): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP11_EINT); - case WM8350_IRQ_GPIO(12): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP12_EINT); - default: - dev_warn(wm8350->dev, "Attempting to mask unknown IRQ %d\n", - irq); - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL_GPL(wm8350_mask_irq); - -int wm8350_unmask_irq(struct wm8350 *wm8350, int irq) -{ - switch (irq) { - case WM8350_IRQ_CHG_BAT_HOT: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_HOT_EINT); - case WM8350_IRQ_CHG_BAT_COLD: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_COLD_EINT); - case WM8350_IRQ_CHG_BAT_FAIL: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_FAIL_EINT); - case WM8350_IRQ_CHG_TO: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_TO_EINT); - case WM8350_IRQ_CHG_END: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_END_EINT); - case WM8350_IRQ_CHG_START: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_START_EINT); - case WM8350_IRQ_CHG_FAST_RDY: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_FAST_RDY_EINT); - case WM8350_IRQ_RTC_PER: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_PER_EINT); - case WM8350_IRQ_RTC_SEC: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_SEC_EINT); - case WM8350_IRQ_RTC_ALM: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_ALM_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P9: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P9_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P1: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P1_EINT); - case WM8350_IRQ_CHG_VBATT_LT_2P85: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_2P85_EINT); - case WM8350_IRQ_CS1: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS1_EINT); - case WM8350_IRQ_CS2: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS2_EINT); - case WM8350_IRQ_USB_LIMIT: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_USB_LIMIT_EINT); - case WM8350_IRQ_AUXADC_DATARDY: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DATARDY_EINT); - case WM8350_IRQ_AUXADC_DCOMP4: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP4_EINT); - case WM8350_IRQ_AUXADC_DCOMP3: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP3_EINT); - case WM8350_IRQ_AUXADC_DCOMP2: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP2_EINT); - case WM8350_IRQ_AUXADC_DCOMP1: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP1_EINT); - case WM8350_IRQ_SYS_HYST_COMP_FAIL: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_HYST_COMP_FAIL_EINT); - case WM8350_IRQ_SYS_CHIP_GT115: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT115_EINT); - case WM8350_IRQ_SYS_CHIP_GT140: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT140_EINT); - case WM8350_IRQ_SYS_WDOG_TO: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_WDOG_TO_EINT); - case WM8350_IRQ_UV_LDO4: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO4_EINT); - case WM8350_IRQ_UV_LDO3: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO3_EINT); - case WM8350_IRQ_UV_LDO2: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO2_EINT); - case WM8350_IRQ_UV_LDO1: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO1_EINT); - case WM8350_IRQ_UV_DC6: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC6_EINT); - case WM8350_IRQ_UV_DC5: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC5_EINT); - case WM8350_IRQ_UV_DC4: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC4_EINT); - case WM8350_IRQ_UV_DC3: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC3_EINT); - case WM8350_IRQ_UV_DC2: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC2_EINT); - case WM8350_IRQ_UV_DC1: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC1_EINT); - case WM8350_IRQ_OC_LS: - return wm8350_clear_bits(wm8350, - WM8350_OVER_CURRENT_INT_STATUS_MASK, - WM8350_IM_OC_LS_EINT); - case WM8350_IRQ_EXT_USB_FB: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_USB_FB_EINT); - case WM8350_IRQ_EXT_WALL_FB: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_WALL_FB_EINT); - case WM8350_IRQ_EXT_BAT_FB: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_BAT_FB_EINT); - case WM8350_IRQ_CODEC_JCK_DET_L: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_L_EINT); - case WM8350_IRQ_CODEC_JCK_DET_R: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_R_EINT); - case WM8350_IRQ_CODEC_MICSCD: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICSCD_EINT); - case WM8350_IRQ_CODEC_MICD: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICD_EINT); - case WM8350_IRQ_WKUP_OFF_STATE: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_HIB_STATE: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_HIB_STATE_EINT); - case WM8350_IRQ_WKUP_CONV_FAULT: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_CONV_FAULT_EINT); - case WM8350_IRQ_WKUP_WDOG_RST: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_GP_PWR_ON: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_PWR_ON_EINT); - case WM8350_IRQ_WKUP_ONKEY: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_ONKEY_EINT); - case WM8350_IRQ_WKUP_GP_WAKEUP: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_WAKEUP_EINT); - case WM8350_IRQ_GPIO(0): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP0_EINT); - case WM8350_IRQ_GPIO(1): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP1_EINT); - case WM8350_IRQ_GPIO(2): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP2_EINT); - case WM8350_IRQ_GPIO(3): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP3_EINT); - case WM8350_IRQ_GPIO(4): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP4_EINT); - case WM8350_IRQ_GPIO(5): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP5_EINT); - case WM8350_IRQ_GPIO(6): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP6_EINT); - case WM8350_IRQ_GPIO(7): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP7_EINT); - case WM8350_IRQ_GPIO(8): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP8_EINT); - case WM8350_IRQ_GPIO(9): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP9_EINT); - case WM8350_IRQ_GPIO(10): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP10_EINT); - case WM8350_IRQ_GPIO(11): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP11_EINT); - case WM8350_IRQ_GPIO(12): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP12_EINT); - default: - dev_warn(wm8350->dev, "Attempting to unmask unknown IRQ %d\n", - irq); - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL_GPL(wm8350_unmask_irq); - int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref) { u16 reg, result = 0; @@ -1409,49 +682,18 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq, return ret; } - wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF); - wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF); - wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF); - wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF); - wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF); - wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF); - mutex_init(&wm8350->auxadc_mutex); - mutex_init(&wm8350->irq_mutex); - if (irq) { - int flags = IRQF_ONESHOT; - - if (pdata && pdata->irq_high) { - flags |= IRQF_TRIGGER_HIGH; - - wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1, - WM8350_IRQ_POL); - } else { - flags |= IRQF_TRIGGER_LOW; - - wm8350_clear_bits(wm8350, WM8350_SYSTEM_CONTROL_1, - WM8350_IRQ_POL); - } - ret = request_threaded_irq(irq, NULL, wm8350_irq, flags, - "wm8350", wm8350); - if (ret != 0) { - dev_err(wm8350->dev, "Failed to request IRQ: %d\n", - ret); - goto err; - } - } else { - dev_err(wm8350->dev, "No IRQ configured\n"); + ret = wm8350_irq_init(wm8350, irq, pdata); + if (ret < 0) goto err; - } - wm8350->chip_irq = irq; if (pdata && pdata->init) { ret = pdata->init(wm8350); if (ret != 0) { dev_err(wm8350->dev, "Platform init() failed: %d\n", ret); - goto err; + goto err_irq; } } @@ -1470,6 +712,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq, return 0; +err_irq: + wm8350_irq_exit(wm8350); err: kfree(wm8350->reg_cache); return ret; @@ -1493,7 +737,8 @@ void wm8350_device_exit(struct wm8350 *wm8350) platform_device_unregister(wm8350->gpio.pdev); platform_device_unregister(wm8350->codec.pdev); - free_irq(wm8350->chip_irq, wm8350); + wm8350_irq_exit(wm8350); + kfree(wm8350->reg_cache); } EXPORT_SYMBOL_GPL(wm8350_device_exit); diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c new file mode 100644 index 000000000000..a432e2b65c48 --- /dev/null +++ b/drivers/mfd/wm8350-irq.c @@ -0,0 +1,804 @@ +/* + * wm8350-irq.c -- IRQ support for Wolfson WM8350 + * + * Copyright 2007, 2008, 2009 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood, Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq) +{ + mutex_lock(&wm8350->irq_mutex); + + if (wm8350->irq[irq].handler) + wm8350->irq[irq].handler(wm8350, irq, wm8350->irq[irq].data); + else { + dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n", + irq); + wm8350_mask_irq(wm8350, irq); + } + + mutex_unlock(&wm8350->irq_mutex); +} + +/* + * This is a threaded IRQ handler so can access I2C/SPI. Since all + * interrupts are clear on read the IRQ line will be reasserted and + * the physical IRQ will be handled again if another interrupt is + * asserted while we run - in the normal course of events this is a + * rare occurrence so we save I2C/SPI reads. + */ +static irqreturn_t wm8350_irq(int irq, void *data) +{ + struct wm8350 *wm8350 = data; + u16 level_one, status1, status2, comp; + + /* TODO: Use block reads to improve performance? */ + level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS) + & ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK); + status1 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_1) + & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_1_MASK); + status2 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_2) + & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_2_MASK); + comp = wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS) + & ~wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK); + + /* over current */ + if (level_one & WM8350_OC_INT) { + u16 oc; + + oc = wm8350_reg_read(wm8350, WM8350_OVER_CURRENT_INT_STATUS); + oc &= ~wm8350_reg_read(wm8350, + WM8350_OVER_CURRENT_INT_STATUS_MASK); + + if (oc & WM8350_OC_LS_EINT) /* limit switch */ + wm8350_irq_call_handler(wm8350, WM8350_IRQ_OC_LS); + } + + /* under voltage */ + if (level_one & WM8350_UV_INT) { + u16 uv; + + uv = wm8350_reg_read(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS); + uv &= ~wm8350_reg_read(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK); + + if (uv & WM8350_UV_DC1_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC1); + if (uv & WM8350_UV_DC2_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC2); + if (uv & WM8350_UV_DC3_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC3); + if (uv & WM8350_UV_DC4_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC4); + if (uv & WM8350_UV_DC5_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC5); + if (uv & WM8350_UV_DC6_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC6); + if (uv & WM8350_UV_LDO1_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO1); + if (uv & WM8350_UV_LDO2_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO2); + if (uv & WM8350_UV_LDO3_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO3); + if (uv & WM8350_UV_LDO4_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO4); + } + + /* charger, RTC */ + if (status1) { + if (status1 & WM8350_CHG_BAT_HOT_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CHG_BAT_HOT); + if (status1 & WM8350_CHG_BAT_COLD_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CHG_BAT_COLD); + if (status1 & WM8350_CHG_BAT_FAIL_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CHG_BAT_FAIL); + if (status1 & WM8350_CHG_TO_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_TO); + if (status1 & WM8350_CHG_END_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_END); + if (status1 & WM8350_CHG_START_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_START); + if (status1 & WM8350_CHG_FAST_RDY_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CHG_FAST_RDY); + if (status1 & WM8350_CHG_VBATT_LT_3P9_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CHG_VBATT_LT_3P9); + if (status1 & WM8350_CHG_VBATT_LT_3P1_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CHG_VBATT_LT_3P1); + if (status1 & WM8350_CHG_VBATT_LT_2P85_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CHG_VBATT_LT_2P85); + if (status1 & WM8350_RTC_ALM_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_ALM); + if (status1 & WM8350_RTC_SEC_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_SEC); + if (status1 & WM8350_RTC_PER_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_PER); + } + + /* current sink, system, aux adc */ + if (status2) { + if (status2 & WM8350_CS1_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS1); + if (status2 & WM8350_CS2_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS2); + + if (status2 & WM8350_SYS_HYST_COMP_FAIL_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_SYS_HYST_COMP_FAIL); + if (status2 & WM8350_SYS_CHIP_GT115_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_SYS_CHIP_GT115); + if (status2 & WM8350_SYS_CHIP_GT140_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_SYS_CHIP_GT140); + if (status2 & WM8350_SYS_WDOG_TO_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_SYS_WDOG_TO); + + if (status2 & WM8350_AUXADC_DATARDY_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_AUXADC_DATARDY); + if (status2 & WM8350_AUXADC_DCOMP4_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_AUXADC_DCOMP4); + if (status2 & WM8350_AUXADC_DCOMP3_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_AUXADC_DCOMP3); + if (status2 & WM8350_AUXADC_DCOMP2_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_AUXADC_DCOMP2); + if (status2 & WM8350_AUXADC_DCOMP1_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_AUXADC_DCOMP1); + + if (status2 & WM8350_USB_LIMIT_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_USB_LIMIT); + } + + /* wake, codec, ext */ + if (comp) { + if (comp & WM8350_WKUP_OFF_STATE_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_WKUP_OFF_STATE); + if (comp & WM8350_WKUP_HIB_STATE_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_WKUP_HIB_STATE); + if (comp & WM8350_WKUP_CONV_FAULT_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_WKUP_CONV_FAULT); + if (comp & WM8350_WKUP_WDOG_RST_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_WKUP_WDOG_RST); + if (comp & WM8350_WKUP_GP_PWR_ON_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_WKUP_GP_PWR_ON); + if (comp & WM8350_WKUP_ONKEY_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_WKUP_ONKEY); + if (comp & WM8350_WKUP_GP_WAKEUP_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_WKUP_GP_WAKEUP); + + if (comp & WM8350_CODEC_JCK_DET_L_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CODEC_JCK_DET_L); + if (comp & WM8350_CODEC_JCK_DET_R_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CODEC_JCK_DET_R); + if (comp & WM8350_CODEC_MICSCD_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_CODEC_MICSCD); + if (comp & WM8350_CODEC_MICD_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_CODEC_MICD); + + if (comp & WM8350_EXT_USB_FB_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_USB_FB); + if (comp & WM8350_EXT_WALL_FB_EINT) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_EXT_WALL_FB); + if (comp & WM8350_EXT_BAT_FB_EINT) + wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_BAT_FB); + } + + if (level_one & WM8350_GP_INT) { + int i; + u16 gpio; + + gpio = wm8350_reg_read(wm8350, WM8350_GPIO_INT_STATUS); + gpio &= ~wm8350_reg_read(wm8350, + WM8350_GPIO_INT_STATUS_MASK); + + for (i = 0; i < 12; i++) { + if (gpio & (1 << i)) + wm8350_irq_call_handler(wm8350, + WM8350_IRQ_GPIO(i)); + } + } + + return IRQ_HANDLED; +} + +int wm8350_register_irq(struct wm8350 *wm8350, int irq, + void (*handler) (struct wm8350 *, int, void *), + void *data) +{ + if (irq < 0 || irq > WM8350_NUM_IRQ || !handler) + return -EINVAL; + + if (wm8350->irq[irq].handler) + return -EBUSY; + + mutex_lock(&wm8350->irq_mutex); + wm8350->irq[irq].handler = handler; + wm8350->irq[irq].data = data; + mutex_unlock(&wm8350->irq_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(wm8350_register_irq); + +int wm8350_free_irq(struct wm8350 *wm8350, int irq) +{ + if (irq < 0 || irq > WM8350_NUM_IRQ) + return -EINVAL; + + mutex_lock(&wm8350->irq_mutex); + wm8350->irq[irq].handler = NULL; + mutex_unlock(&wm8350->irq_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(wm8350_free_irq); + +int wm8350_mask_irq(struct wm8350 *wm8350, int irq) +{ + switch (irq) { + case WM8350_IRQ_CHG_BAT_HOT: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_BAT_HOT_EINT); + case WM8350_IRQ_CHG_BAT_COLD: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_BAT_COLD_EINT); + case WM8350_IRQ_CHG_BAT_FAIL: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_BAT_FAIL_EINT); + case WM8350_IRQ_CHG_TO: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_TO_EINT); + case WM8350_IRQ_CHG_END: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_END_EINT); + case WM8350_IRQ_CHG_START: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_START_EINT); + case WM8350_IRQ_CHG_FAST_RDY: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_FAST_RDY_EINT); + case WM8350_IRQ_RTC_PER: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_RTC_PER_EINT); + case WM8350_IRQ_RTC_SEC: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_RTC_SEC_EINT); + case WM8350_IRQ_RTC_ALM: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_RTC_ALM_EINT); + case WM8350_IRQ_CHG_VBATT_LT_3P9: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_VBATT_LT_3P9_EINT); + case WM8350_IRQ_CHG_VBATT_LT_3P1: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_VBATT_LT_3P1_EINT); + case WM8350_IRQ_CHG_VBATT_LT_2P85: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_VBATT_LT_2P85_EINT); + case WM8350_IRQ_CS1: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_CS1_EINT); + case WM8350_IRQ_CS2: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_CS2_EINT); + case WM8350_IRQ_USB_LIMIT: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_USB_LIMIT_EINT); + case WM8350_IRQ_AUXADC_DATARDY: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DATARDY_EINT); + case WM8350_IRQ_AUXADC_DCOMP4: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP4_EINT); + case WM8350_IRQ_AUXADC_DCOMP3: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP3_EINT); + case WM8350_IRQ_AUXADC_DCOMP2: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP2_EINT); + case WM8350_IRQ_AUXADC_DCOMP1: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP1_EINT); + case WM8350_IRQ_SYS_HYST_COMP_FAIL: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_HYST_COMP_FAIL_EINT); + case WM8350_IRQ_SYS_CHIP_GT115: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_CHIP_GT115_EINT); + case WM8350_IRQ_SYS_CHIP_GT140: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_CHIP_GT140_EINT); + case WM8350_IRQ_SYS_WDOG_TO: + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_WDOG_TO_EINT); + case WM8350_IRQ_UV_LDO4: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO4_EINT); + case WM8350_IRQ_UV_LDO3: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO3_EINT); + case WM8350_IRQ_UV_LDO2: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO2_EINT); + case WM8350_IRQ_UV_LDO1: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO1_EINT); + case WM8350_IRQ_UV_DC6: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC6_EINT); + case WM8350_IRQ_UV_DC5: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC5_EINT); + case WM8350_IRQ_UV_DC4: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC4_EINT); + case WM8350_IRQ_UV_DC3: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC3_EINT); + case WM8350_IRQ_UV_DC2: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC2_EINT); + case WM8350_IRQ_UV_DC1: + return wm8350_set_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC1_EINT); + case WM8350_IRQ_OC_LS: + return wm8350_set_bits(wm8350, + WM8350_OVER_CURRENT_INT_STATUS_MASK, + WM8350_IM_OC_LS_EINT); + case WM8350_IRQ_EXT_USB_FB: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_EXT_USB_FB_EINT); + case WM8350_IRQ_EXT_WALL_FB: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_EXT_WALL_FB_EINT); + case WM8350_IRQ_EXT_BAT_FB: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_EXT_BAT_FB_EINT); + case WM8350_IRQ_CODEC_JCK_DET_L: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_JCK_DET_L_EINT); + case WM8350_IRQ_CODEC_JCK_DET_R: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_JCK_DET_R_EINT); + case WM8350_IRQ_CODEC_MICSCD: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_MICSCD_EINT); + case WM8350_IRQ_CODEC_MICD: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_MICD_EINT); + case WM8350_IRQ_WKUP_OFF_STATE: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_OFF_STATE_EINT); + case WM8350_IRQ_WKUP_HIB_STATE: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_HIB_STATE_EINT); + case WM8350_IRQ_WKUP_CONV_FAULT: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_CONV_FAULT_EINT); + case WM8350_IRQ_WKUP_WDOG_RST: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_OFF_STATE_EINT); + case WM8350_IRQ_WKUP_GP_PWR_ON: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_GP_PWR_ON_EINT); + case WM8350_IRQ_WKUP_ONKEY: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_ONKEY_EINT); + case WM8350_IRQ_WKUP_GP_WAKEUP: + return wm8350_set_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_GP_WAKEUP_EINT); + case WM8350_IRQ_GPIO(0): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP0_EINT); + case WM8350_IRQ_GPIO(1): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP1_EINT); + case WM8350_IRQ_GPIO(2): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP2_EINT); + case WM8350_IRQ_GPIO(3): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP3_EINT); + case WM8350_IRQ_GPIO(4): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP4_EINT); + case WM8350_IRQ_GPIO(5): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP5_EINT); + case WM8350_IRQ_GPIO(6): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP6_EINT); + case WM8350_IRQ_GPIO(7): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP7_EINT); + case WM8350_IRQ_GPIO(8): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP8_EINT); + case WM8350_IRQ_GPIO(9): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP9_EINT); + case WM8350_IRQ_GPIO(10): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP10_EINT); + case WM8350_IRQ_GPIO(11): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP11_EINT); + case WM8350_IRQ_GPIO(12): + return wm8350_set_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP12_EINT); + default: + dev_warn(wm8350->dev, "Attempting to mask unknown IRQ %d\n", + irq); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(wm8350_mask_irq); + +int wm8350_unmask_irq(struct wm8350 *wm8350, int irq) +{ + switch (irq) { + case WM8350_IRQ_CHG_BAT_HOT: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_BAT_HOT_EINT); + case WM8350_IRQ_CHG_BAT_COLD: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_BAT_COLD_EINT); + case WM8350_IRQ_CHG_BAT_FAIL: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_BAT_FAIL_EINT); + case WM8350_IRQ_CHG_TO: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_TO_EINT); + case WM8350_IRQ_CHG_END: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_END_EINT); + case WM8350_IRQ_CHG_START: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_START_EINT); + case WM8350_IRQ_CHG_FAST_RDY: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_FAST_RDY_EINT); + case WM8350_IRQ_RTC_PER: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_RTC_PER_EINT); + case WM8350_IRQ_RTC_SEC: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_RTC_SEC_EINT); + case WM8350_IRQ_RTC_ALM: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_RTC_ALM_EINT); + case WM8350_IRQ_CHG_VBATT_LT_3P9: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_VBATT_LT_3P9_EINT); + case WM8350_IRQ_CHG_VBATT_LT_3P1: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_VBATT_LT_3P1_EINT); + case WM8350_IRQ_CHG_VBATT_LT_2P85: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, + WM8350_IM_CHG_VBATT_LT_2P85_EINT); + case WM8350_IRQ_CS1: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_CS1_EINT); + case WM8350_IRQ_CS2: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_CS2_EINT); + case WM8350_IRQ_USB_LIMIT: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_USB_LIMIT_EINT); + case WM8350_IRQ_AUXADC_DATARDY: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DATARDY_EINT); + case WM8350_IRQ_AUXADC_DCOMP4: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP4_EINT); + case WM8350_IRQ_AUXADC_DCOMP3: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP3_EINT); + case WM8350_IRQ_AUXADC_DCOMP2: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP2_EINT); + case WM8350_IRQ_AUXADC_DCOMP1: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_AUXADC_DCOMP1_EINT); + case WM8350_IRQ_SYS_HYST_COMP_FAIL: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_HYST_COMP_FAIL_EINT); + case WM8350_IRQ_SYS_CHIP_GT115: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_CHIP_GT115_EINT); + case WM8350_IRQ_SYS_CHIP_GT140: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_CHIP_GT140_EINT); + case WM8350_IRQ_SYS_WDOG_TO: + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, + WM8350_IM_SYS_WDOG_TO_EINT); + case WM8350_IRQ_UV_LDO4: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO4_EINT); + case WM8350_IRQ_UV_LDO3: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO3_EINT); + case WM8350_IRQ_UV_LDO2: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO2_EINT); + case WM8350_IRQ_UV_LDO1: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_LDO1_EINT); + case WM8350_IRQ_UV_DC6: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC6_EINT); + case WM8350_IRQ_UV_DC5: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC5_EINT); + case WM8350_IRQ_UV_DC4: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC4_EINT); + case WM8350_IRQ_UV_DC3: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC3_EINT); + case WM8350_IRQ_UV_DC2: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC2_EINT); + case WM8350_IRQ_UV_DC1: + return wm8350_clear_bits(wm8350, + WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, + WM8350_IM_UV_DC1_EINT); + case WM8350_IRQ_OC_LS: + return wm8350_clear_bits(wm8350, + WM8350_OVER_CURRENT_INT_STATUS_MASK, + WM8350_IM_OC_LS_EINT); + case WM8350_IRQ_EXT_USB_FB: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_EXT_USB_FB_EINT); + case WM8350_IRQ_EXT_WALL_FB: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_EXT_WALL_FB_EINT); + case WM8350_IRQ_EXT_BAT_FB: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_EXT_BAT_FB_EINT); + case WM8350_IRQ_CODEC_JCK_DET_L: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_JCK_DET_L_EINT); + case WM8350_IRQ_CODEC_JCK_DET_R: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_JCK_DET_R_EINT); + case WM8350_IRQ_CODEC_MICSCD: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_MICSCD_EINT); + case WM8350_IRQ_CODEC_MICD: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_CODEC_MICD_EINT); + case WM8350_IRQ_WKUP_OFF_STATE: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_OFF_STATE_EINT); + case WM8350_IRQ_WKUP_HIB_STATE: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_HIB_STATE_EINT); + case WM8350_IRQ_WKUP_CONV_FAULT: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_CONV_FAULT_EINT); + case WM8350_IRQ_WKUP_WDOG_RST: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_OFF_STATE_EINT); + case WM8350_IRQ_WKUP_GP_PWR_ON: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_GP_PWR_ON_EINT); + case WM8350_IRQ_WKUP_ONKEY: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_ONKEY_EINT); + case WM8350_IRQ_WKUP_GP_WAKEUP: + return wm8350_clear_bits(wm8350, + WM8350_COMPARATOR_INT_STATUS_MASK, + WM8350_IM_WKUP_GP_WAKEUP_EINT); + case WM8350_IRQ_GPIO(0): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP0_EINT); + case WM8350_IRQ_GPIO(1): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP1_EINT); + case WM8350_IRQ_GPIO(2): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP2_EINT); + case WM8350_IRQ_GPIO(3): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP3_EINT); + case WM8350_IRQ_GPIO(4): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP4_EINT); + case WM8350_IRQ_GPIO(5): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP5_EINT); + case WM8350_IRQ_GPIO(6): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP6_EINT); + case WM8350_IRQ_GPIO(7): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP7_EINT); + case WM8350_IRQ_GPIO(8): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP8_EINT); + case WM8350_IRQ_GPIO(9): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP9_EINT); + case WM8350_IRQ_GPIO(10): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP10_EINT); + case WM8350_IRQ_GPIO(11): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP11_EINT); + case WM8350_IRQ_GPIO(12): + return wm8350_clear_bits(wm8350, + WM8350_GPIO_INT_STATUS_MASK, + WM8350_IM_GP12_EINT); + default: + dev_warn(wm8350->dev, "Attempting to unmask unknown IRQ %d\n", + irq); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(wm8350_unmask_irq); + +int wm8350_irq_init(struct wm8350 *wm8350, int irq, + struct wm8350_platform_data *pdata) +{ + int ret; + int flags = IRQF_ONESHOT; + + if (!irq) { + dev_err(wm8350->dev, "No IRQ configured\n"); + return -EINVAL; + } + + wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF); + wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF); + wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF); + wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF); + wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF); + wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF); + + mutex_init(&wm8350->irq_mutex); + wm8350->chip_irq = irq; + + if (pdata && pdata->irq_high) { + flags |= IRQF_TRIGGER_HIGH; + + wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1, + WM8350_IRQ_POL); + } else { + flags |= IRQF_TRIGGER_LOW; + + wm8350_clear_bits(wm8350, WM8350_SYSTEM_CONTROL_1, + WM8350_IRQ_POL); + } + + ret = request_threaded_irq(irq, NULL, wm8350_irq, flags, + "wm8350", wm8350); + if (ret != 0) + dev_err(wm8350->dev, "Failed to request IRQ: %d\n", ret); + + return ret; +} + +int wm8350_irq_exit(struct wm8350 *wm8350) +{ + free_irq(wm8350->chip_irq, wm8350); + return 0; +} diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 1d595de6a055..32197fde904d 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -681,6 +681,8 @@ int wm8350_register_irq(struct wm8350 *wm8350, int irq, int wm8350_free_irq(struct wm8350 *wm8350, int irq); int wm8350_mask_irq(struct wm8350 *wm8350, int irq); int wm8350_unmask_irq(struct wm8350 *wm8350, int irq); - +int wm8350_irq_init(struct wm8350 *wm8350, int irq, + struct wm8350_platform_data *pdata); +int wm8350_irq_exit(struct wm8350 *wm8350); #endif -- cgit v1.2.3 From 0c7229f93a529145d52e1bd7b29e6c98a3a3294d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 12 Oct 2009 16:15:10 +0100 Subject: mfd: Convert WM835x IRQ handling to use a data table Rather than open coding individual IRQs in each function which manipulates them store data for IRQs in a table which is then referenced in the users. This is a substantial code shrink and should be a performance win in cases where only a single IRQ goes off at once since instead of reading four of the second level IRQ registers for each interrupt we read only the sub-registers which have had an interrupt flagged. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm8350-irq.c | 1017 ++++++++++++++------------------------- include/linux/mfd/wm8350/gpio.h | 18 + 2 files changed, 387 insertions(+), 648 deletions(-) (limited to 'include') diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c index a432e2b65c48..d9abfc94c685 100644 --- a/drivers/mfd/wm8350-irq.c +++ b/drivers/mfd/wm8350-irq.c @@ -29,6 +29,343 @@ #include #include +#define WM8350_NUM_IRQ_REGS 7 + +#define WM8350_INT_OFFSET_1 0 +#define WM8350_INT_OFFSET_2 1 +#define WM8350_POWER_UP_INT_OFFSET 2 +#define WM8350_UNDER_VOLTAGE_INT_OFFSET 3 +#define WM8350_OVER_CURRENT_INT_OFFSET 4 +#define WM8350_GPIO_INT_OFFSET 5 +#define WM8350_COMPARATOR_INT_OFFSET 6 + +struct wm8350_irq_data { + int primary; + int reg; + int mask; + int primary_only; +}; + +static struct wm8350_irq_data wm8350_irqs[] = { + [WM8350_IRQ_OC_LS] = { + .primary = WM8350_OC_INT, + .reg = WM8350_OVER_CURRENT_INT_OFFSET, + .mask = WM8350_OC_LS_EINT, + .primary_only = 1, + }, + [WM8350_IRQ_UV_DC1] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_DC1_EINT, + }, + [WM8350_IRQ_UV_DC2] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_DC2_EINT, + }, + [WM8350_IRQ_UV_DC3] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_DC3_EINT, + }, + [WM8350_IRQ_UV_DC4] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_DC4_EINT, + }, + [WM8350_IRQ_UV_DC5] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_DC5_EINT, + }, + [WM8350_IRQ_UV_DC6] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_DC6_EINT, + }, + [WM8350_IRQ_UV_LDO1] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_LDO1_EINT, + }, + [WM8350_IRQ_UV_LDO2] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_LDO2_EINT, + }, + [WM8350_IRQ_UV_LDO3] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_LDO3_EINT, + }, + [WM8350_IRQ_UV_LDO4] = { + .primary = WM8350_UV_INT, + .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET, + .mask = WM8350_UV_LDO4_EINT, + }, + [WM8350_IRQ_CHG_BAT_HOT] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_BAT_HOT_EINT, + }, + [WM8350_IRQ_CHG_BAT_COLD] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_BAT_COLD_EINT, + }, + [WM8350_IRQ_CHG_BAT_FAIL] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_BAT_FAIL_EINT, + }, + [WM8350_IRQ_CHG_TO] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_TO_EINT, + }, + [WM8350_IRQ_CHG_END] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_END_EINT, + }, + [WM8350_IRQ_CHG_START] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_START_EINT, + }, + [WM8350_IRQ_CHG_FAST_RDY] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_FAST_RDY_EINT, + }, + [WM8350_IRQ_CHG_VBATT_LT_3P9] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_VBATT_LT_3P9_EINT, + }, + [WM8350_IRQ_CHG_VBATT_LT_3P1] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_VBATT_LT_3P1_EINT, + }, + [WM8350_IRQ_CHG_VBATT_LT_2P85] = { + .primary = WM8350_CHG_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_CHG_VBATT_LT_2P85_EINT, + }, + [WM8350_IRQ_RTC_ALM] = { + .primary = WM8350_RTC_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_RTC_ALM_EINT, + }, + [WM8350_IRQ_RTC_SEC] = { + .primary = WM8350_RTC_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_RTC_SEC_EINT, + }, + [WM8350_IRQ_RTC_PER] = { + .primary = WM8350_RTC_INT, + .reg = WM8350_INT_OFFSET_1, + .mask = WM8350_RTC_PER_EINT, + }, + [WM8350_IRQ_CS1] = { + .primary = WM8350_CS_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_CS1_EINT, + }, + [WM8350_IRQ_CS2] = { + .primary = WM8350_CS_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_CS2_EINT, + }, + [WM8350_IRQ_SYS_HYST_COMP_FAIL] = { + .primary = WM8350_SYS_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_SYS_HYST_COMP_FAIL_EINT, + }, + [WM8350_IRQ_SYS_CHIP_GT115] = { + .primary = WM8350_SYS_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_SYS_CHIP_GT115_EINT, + }, + [WM8350_IRQ_SYS_CHIP_GT140] = { + .primary = WM8350_SYS_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_SYS_CHIP_GT140_EINT, + }, + [WM8350_IRQ_SYS_WDOG_TO] = { + .primary = WM8350_SYS_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_SYS_WDOG_TO_EINT, + }, + [WM8350_IRQ_AUXADC_DATARDY] = { + .primary = WM8350_AUXADC_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_AUXADC_DATARDY_EINT, + }, + [WM8350_IRQ_AUXADC_DCOMP4] = { + .primary = WM8350_AUXADC_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_AUXADC_DCOMP4_EINT, + }, + [WM8350_IRQ_AUXADC_DCOMP3] = { + .primary = WM8350_AUXADC_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_AUXADC_DCOMP3_EINT, + }, + [WM8350_IRQ_AUXADC_DCOMP2] = { + .primary = WM8350_AUXADC_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_AUXADC_DCOMP2_EINT, + }, + [WM8350_IRQ_AUXADC_DCOMP1] = { + .primary = WM8350_AUXADC_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_AUXADC_DCOMP1_EINT, + }, + [WM8350_IRQ_USB_LIMIT] = { + .primary = WM8350_USB_INT, + .reg = WM8350_INT_OFFSET_2, + .mask = WM8350_USB_LIMIT_EINT, + .primary_only = 1, + }, + [WM8350_IRQ_WKUP_OFF_STATE] = { + .primary = WM8350_WKUP_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_WKUP_OFF_STATE_EINT, + }, + [WM8350_IRQ_WKUP_HIB_STATE] = { + .primary = WM8350_WKUP_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_WKUP_HIB_STATE_EINT, + }, + [WM8350_IRQ_WKUP_CONV_FAULT] = { + .primary = WM8350_WKUP_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_WKUP_CONV_FAULT_EINT, + }, + [WM8350_IRQ_WKUP_WDOG_RST] = { + .primary = WM8350_WKUP_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_WKUP_WDOG_RST_EINT, + }, + [WM8350_IRQ_WKUP_GP_PWR_ON] = { + .primary = WM8350_WKUP_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_WKUP_GP_PWR_ON_EINT, + }, + [WM8350_IRQ_WKUP_ONKEY] = { + .primary = WM8350_WKUP_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_WKUP_ONKEY_EINT, + }, + [WM8350_IRQ_WKUP_GP_WAKEUP] = { + .primary = WM8350_WKUP_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_WKUP_GP_WAKEUP_EINT, + }, + [WM8350_IRQ_CODEC_JCK_DET_L] = { + .primary = WM8350_CODEC_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_CODEC_JCK_DET_L_EINT, + }, + [WM8350_IRQ_CODEC_JCK_DET_R] = { + .primary = WM8350_CODEC_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_CODEC_JCK_DET_R_EINT, + }, + [WM8350_IRQ_CODEC_MICSCD] = { + .primary = WM8350_CODEC_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_CODEC_MICSCD_EINT, + }, + [WM8350_IRQ_CODEC_MICD] = { + .primary = WM8350_CODEC_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_CODEC_MICD_EINT, + }, + [WM8350_IRQ_EXT_USB_FB] = { + .primary = WM8350_EXT_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_EXT_USB_FB_EINT, + }, + [WM8350_IRQ_EXT_WALL_FB] = { + .primary = WM8350_EXT_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_EXT_WALL_FB_EINT, + }, + [WM8350_IRQ_EXT_BAT_FB] = { + .primary = WM8350_EXT_INT, + .reg = WM8350_COMPARATOR_INT_OFFSET, + .mask = WM8350_EXT_BAT_FB_EINT, + }, + [WM8350_IRQ_GPIO(0)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP0_EINT, + }, + [WM8350_IRQ_GPIO(1)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP1_EINT, + }, + [WM8350_IRQ_GPIO(2)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP2_EINT, + }, + [WM8350_IRQ_GPIO(3)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP3_EINT, + }, + [WM8350_IRQ_GPIO(4)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP4_EINT, + }, + [WM8350_IRQ_GPIO(5)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP5_EINT, + }, + [WM8350_IRQ_GPIO(6)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP6_EINT, + }, + [WM8350_IRQ_GPIO(7)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP7_EINT, + }, + [WM8350_IRQ_GPIO(8)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP8_EINT, + }, + [WM8350_IRQ_GPIO(9)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP9_EINT, + }, + [WM8350_IRQ_GPIO(10)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP10_EINT, + }, + [WM8350_IRQ_GPIO(11)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP11_EINT, + }, + [WM8350_IRQ_GPIO(12)] = { + .primary = WM8350_GP_INT, + .reg = WM8350_GPIO_INT_OFFSET, + .mask = WM8350_GP12_EINT, + }, +}; + static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq) { mutex_lock(&wm8350->irq_mutex); @@ -51,197 +388,43 @@ static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq) * asserted while we run - in the normal course of events this is a * rare occurrence so we save I2C/SPI reads. */ -static irqreturn_t wm8350_irq(int irq, void *data) +static irqreturn_t wm8350_irq(int irq, void *irq_data) { - struct wm8350 *wm8350 = data; - u16 level_one, status1, status2, comp; + struct wm8350 *wm8350 = irq_data; + u16 level_one; + u16 sub_reg[WM8350_NUM_IRQ_REGS]; + int read_done[WM8350_NUM_IRQ_REGS]; + struct wm8350_irq_data *data; + int i; /* TODO: Use block reads to improve performance? */ level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS) & ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK); - status1 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_1) - & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_1_MASK); - status2 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_2) - & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_2_MASK); - comp = wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS) - & ~wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK); - - /* over current */ - if (level_one & WM8350_OC_INT) { - u16 oc; - - oc = wm8350_reg_read(wm8350, WM8350_OVER_CURRENT_INT_STATUS); - oc &= ~wm8350_reg_read(wm8350, - WM8350_OVER_CURRENT_INT_STATUS_MASK); - - if (oc & WM8350_OC_LS_EINT) /* limit switch */ - wm8350_irq_call_handler(wm8350, WM8350_IRQ_OC_LS); - } - - /* under voltage */ - if (level_one & WM8350_UV_INT) { - u16 uv; - - uv = wm8350_reg_read(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS); - uv &= ~wm8350_reg_read(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK); - - if (uv & WM8350_UV_DC1_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC1); - if (uv & WM8350_UV_DC2_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC2); - if (uv & WM8350_UV_DC3_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC3); - if (uv & WM8350_UV_DC4_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC4); - if (uv & WM8350_UV_DC5_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC5); - if (uv & WM8350_UV_DC6_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC6); - if (uv & WM8350_UV_LDO1_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO1); - if (uv & WM8350_UV_LDO2_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO2); - if (uv & WM8350_UV_LDO3_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO3); - if (uv & WM8350_UV_LDO4_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO4); - } - /* charger, RTC */ - if (status1) { - if (status1 & WM8350_CHG_BAT_HOT_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_BAT_HOT); - if (status1 & WM8350_CHG_BAT_COLD_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_BAT_COLD); - if (status1 & WM8350_CHG_BAT_FAIL_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_BAT_FAIL); - if (status1 & WM8350_CHG_TO_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_TO); - if (status1 & WM8350_CHG_END_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_END); - if (status1 & WM8350_CHG_START_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_START); - if (status1 & WM8350_CHG_FAST_RDY_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_FAST_RDY); - if (status1 & WM8350_CHG_VBATT_LT_3P9_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_VBATT_LT_3P9); - if (status1 & WM8350_CHG_VBATT_LT_3P1_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_VBATT_LT_3P1); - if (status1 & WM8350_CHG_VBATT_LT_2P85_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CHG_VBATT_LT_2P85); - if (status1 & WM8350_RTC_ALM_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_ALM); - if (status1 & WM8350_RTC_SEC_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_SEC); - if (status1 & WM8350_RTC_PER_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_PER); - } - - /* current sink, system, aux adc */ - if (status2) { - if (status2 & WM8350_CS1_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS1); - if (status2 & WM8350_CS2_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS2); - - if (status2 & WM8350_SYS_HYST_COMP_FAIL_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_HYST_COMP_FAIL); - if (status2 & WM8350_SYS_CHIP_GT115_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_CHIP_GT115); - if (status2 & WM8350_SYS_CHIP_GT140_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_CHIP_GT140); - if (status2 & WM8350_SYS_WDOG_TO_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_SYS_WDOG_TO); - - if (status2 & WM8350_AUXADC_DATARDY_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DATARDY); - if (status2 & WM8350_AUXADC_DCOMP4_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP4); - if (status2 & WM8350_AUXADC_DCOMP3_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP3); - if (status2 & WM8350_AUXADC_DCOMP2_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP2); - if (status2 & WM8350_AUXADC_DCOMP1_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_AUXADC_DCOMP1); - - if (status2 & WM8350_USB_LIMIT_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_USB_LIMIT); - } + if (!level_one) + return IRQ_NONE; - /* wake, codec, ext */ - if (comp) { - if (comp & WM8350_WKUP_OFF_STATE_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_OFF_STATE); - if (comp & WM8350_WKUP_HIB_STATE_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_HIB_STATE); - if (comp & WM8350_WKUP_CONV_FAULT_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_CONV_FAULT); - if (comp & WM8350_WKUP_WDOG_RST_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_WDOG_RST); - if (comp & WM8350_WKUP_GP_PWR_ON_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_GP_PWR_ON); - if (comp & WM8350_WKUP_ONKEY_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_WKUP_ONKEY); - if (comp & WM8350_WKUP_GP_WAKEUP_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_WKUP_GP_WAKEUP); - - if (comp & WM8350_CODEC_JCK_DET_L_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CODEC_JCK_DET_L); - if (comp & WM8350_CODEC_JCK_DET_R_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CODEC_JCK_DET_R); - if (comp & WM8350_CODEC_MICSCD_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_CODEC_MICSCD); - if (comp & WM8350_CODEC_MICD_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_CODEC_MICD); - - if (comp & WM8350_EXT_USB_FB_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_USB_FB); - if (comp & WM8350_EXT_WALL_FB_EINT) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_EXT_WALL_FB); - if (comp & WM8350_EXT_BAT_FB_EINT) - wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_BAT_FB); - } + memset(&read_done, 0, sizeof(read_done)); - if (level_one & WM8350_GP_INT) { - int i; - u16 gpio; + for (i = 0; i < ARRAY_SIZE(wm8350_irqs); i++) { + data = &wm8350_irqs[i]; - gpio = wm8350_reg_read(wm8350, WM8350_GPIO_INT_STATUS); - gpio &= ~wm8350_reg_read(wm8350, - WM8350_GPIO_INT_STATUS_MASK); + if (!(level_one & data->primary)) + continue; - for (i = 0; i < 12; i++) { - if (gpio & (1 << i)) - wm8350_irq_call_handler(wm8350, - WM8350_IRQ_GPIO(i)); + if (!read_done[data->reg]) { + sub_reg[data->reg] = + wm8350_reg_read(wm8350, WM8350_INT_STATUS_1 + + data->reg); + sub_reg[data->reg] &= + ~wm8350_reg_read(wm8350, + WM8350_INT_STATUS_1_MASK + + data->reg); + read_done[data->reg] = 1; } + + if (sub_reg[data->reg] & data->mask) + wm8350_irq_call_handler(wm8350, i); } return IRQ_HANDLED; @@ -280,479 +463,17 @@ EXPORT_SYMBOL_GPL(wm8350_free_irq); int wm8350_mask_irq(struct wm8350 *wm8350, int irq) { - switch (irq) { - case WM8350_IRQ_CHG_BAT_HOT: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_HOT_EINT); - case WM8350_IRQ_CHG_BAT_COLD: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_COLD_EINT); - case WM8350_IRQ_CHG_BAT_FAIL: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_FAIL_EINT); - case WM8350_IRQ_CHG_TO: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_TO_EINT); - case WM8350_IRQ_CHG_END: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_END_EINT); - case WM8350_IRQ_CHG_START: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_START_EINT); - case WM8350_IRQ_CHG_FAST_RDY: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_FAST_RDY_EINT); - case WM8350_IRQ_RTC_PER: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_PER_EINT); - case WM8350_IRQ_RTC_SEC: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_SEC_EINT); - case WM8350_IRQ_RTC_ALM: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_ALM_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P9: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P9_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P1: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P1_EINT); - case WM8350_IRQ_CHG_VBATT_LT_2P85: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_2P85_EINT); - case WM8350_IRQ_CS1: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS1_EINT); - case WM8350_IRQ_CS2: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS2_EINT); - case WM8350_IRQ_USB_LIMIT: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_USB_LIMIT_EINT); - case WM8350_IRQ_AUXADC_DATARDY: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DATARDY_EINT); - case WM8350_IRQ_AUXADC_DCOMP4: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP4_EINT); - case WM8350_IRQ_AUXADC_DCOMP3: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP3_EINT); - case WM8350_IRQ_AUXADC_DCOMP2: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP2_EINT); - case WM8350_IRQ_AUXADC_DCOMP1: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP1_EINT); - case WM8350_IRQ_SYS_HYST_COMP_FAIL: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_HYST_COMP_FAIL_EINT); - case WM8350_IRQ_SYS_CHIP_GT115: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT115_EINT); - case WM8350_IRQ_SYS_CHIP_GT140: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT140_EINT); - case WM8350_IRQ_SYS_WDOG_TO: - return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_WDOG_TO_EINT); - case WM8350_IRQ_UV_LDO4: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO4_EINT); - case WM8350_IRQ_UV_LDO3: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO3_EINT); - case WM8350_IRQ_UV_LDO2: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO2_EINT); - case WM8350_IRQ_UV_LDO1: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO1_EINT); - case WM8350_IRQ_UV_DC6: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC6_EINT); - case WM8350_IRQ_UV_DC5: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC5_EINT); - case WM8350_IRQ_UV_DC4: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC4_EINT); - case WM8350_IRQ_UV_DC3: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC3_EINT); - case WM8350_IRQ_UV_DC2: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC2_EINT); - case WM8350_IRQ_UV_DC1: - return wm8350_set_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC1_EINT); - case WM8350_IRQ_OC_LS: - return wm8350_set_bits(wm8350, - WM8350_OVER_CURRENT_INT_STATUS_MASK, - WM8350_IM_OC_LS_EINT); - case WM8350_IRQ_EXT_USB_FB: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_USB_FB_EINT); - case WM8350_IRQ_EXT_WALL_FB: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_WALL_FB_EINT); - case WM8350_IRQ_EXT_BAT_FB: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_BAT_FB_EINT); - case WM8350_IRQ_CODEC_JCK_DET_L: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_L_EINT); - case WM8350_IRQ_CODEC_JCK_DET_R: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_R_EINT); - case WM8350_IRQ_CODEC_MICSCD: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICSCD_EINT); - case WM8350_IRQ_CODEC_MICD: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICD_EINT); - case WM8350_IRQ_WKUP_OFF_STATE: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_HIB_STATE: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_HIB_STATE_EINT); - case WM8350_IRQ_WKUP_CONV_FAULT: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_CONV_FAULT_EINT); - case WM8350_IRQ_WKUP_WDOG_RST: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_GP_PWR_ON: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_PWR_ON_EINT); - case WM8350_IRQ_WKUP_ONKEY: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_ONKEY_EINT); - case WM8350_IRQ_WKUP_GP_WAKEUP: - return wm8350_set_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_WAKEUP_EINT); - case WM8350_IRQ_GPIO(0): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP0_EINT); - case WM8350_IRQ_GPIO(1): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP1_EINT); - case WM8350_IRQ_GPIO(2): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP2_EINT); - case WM8350_IRQ_GPIO(3): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP3_EINT); - case WM8350_IRQ_GPIO(4): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP4_EINT); - case WM8350_IRQ_GPIO(5): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP5_EINT); - case WM8350_IRQ_GPIO(6): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP6_EINT); - case WM8350_IRQ_GPIO(7): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP7_EINT); - case WM8350_IRQ_GPIO(8): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP8_EINT); - case WM8350_IRQ_GPIO(9): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP9_EINT); - case WM8350_IRQ_GPIO(10): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP10_EINT); - case WM8350_IRQ_GPIO(11): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP11_EINT); - case WM8350_IRQ_GPIO(12): - return wm8350_set_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP12_EINT); - default: - dev_warn(wm8350->dev, "Attempting to mask unknown IRQ %d\n", - irq); - return -EINVAL; - } - return 0; + return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK + + wm8350_irqs[irq].reg, + wm8350_irqs[irq].mask); } EXPORT_SYMBOL_GPL(wm8350_mask_irq); int wm8350_unmask_irq(struct wm8350 *wm8350, int irq) { - switch (irq) { - case WM8350_IRQ_CHG_BAT_HOT: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_HOT_EINT); - case WM8350_IRQ_CHG_BAT_COLD: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_COLD_EINT); - case WM8350_IRQ_CHG_BAT_FAIL: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_BAT_FAIL_EINT); - case WM8350_IRQ_CHG_TO: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_TO_EINT); - case WM8350_IRQ_CHG_END: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_END_EINT); - case WM8350_IRQ_CHG_START: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_START_EINT); - case WM8350_IRQ_CHG_FAST_RDY: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_FAST_RDY_EINT); - case WM8350_IRQ_RTC_PER: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_PER_EINT); - case WM8350_IRQ_RTC_SEC: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_SEC_EINT); - case WM8350_IRQ_RTC_ALM: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_RTC_ALM_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P9: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P9_EINT); - case WM8350_IRQ_CHG_VBATT_LT_3P1: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_3P1_EINT); - case WM8350_IRQ_CHG_VBATT_LT_2P85: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK, - WM8350_IM_CHG_VBATT_LT_2P85_EINT); - case WM8350_IRQ_CS1: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS1_EINT); - case WM8350_IRQ_CS2: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_CS2_EINT); - case WM8350_IRQ_USB_LIMIT: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_USB_LIMIT_EINT); - case WM8350_IRQ_AUXADC_DATARDY: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DATARDY_EINT); - case WM8350_IRQ_AUXADC_DCOMP4: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP4_EINT); - case WM8350_IRQ_AUXADC_DCOMP3: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP3_EINT); - case WM8350_IRQ_AUXADC_DCOMP2: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP2_EINT); - case WM8350_IRQ_AUXADC_DCOMP1: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_AUXADC_DCOMP1_EINT); - case WM8350_IRQ_SYS_HYST_COMP_FAIL: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_HYST_COMP_FAIL_EINT); - case WM8350_IRQ_SYS_CHIP_GT115: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT115_EINT); - case WM8350_IRQ_SYS_CHIP_GT140: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_CHIP_GT140_EINT); - case WM8350_IRQ_SYS_WDOG_TO: - return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK, - WM8350_IM_SYS_WDOG_TO_EINT); - case WM8350_IRQ_UV_LDO4: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO4_EINT); - case WM8350_IRQ_UV_LDO3: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO3_EINT); - case WM8350_IRQ_UV_LDO2: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO2_EINT); - case WM8350_IRQ_UV_LDO1: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_LDO1_EINT); - case WM8350_IRQ_UV_DC6: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC6_EINT); - case WM8350_IRQ_UV_DC5: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC5_EINT); - case WM8350_IRQ_UV_DC4: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC4_EINT); - case WM8350_IRQ_UV_DC3: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC3_EINT); - case WM8350_IRQ_UV_DC2: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC2_EINT); - case WM8350_IRQ_UV_DC1: - return wm8350_clear_bits(wm8350, - WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, - WM8350_IM_UV_DC1_EINT); - case WM8350_IRQ_OC_LS: - return wm8350_clear_bits(wm8350, - WM8350_OVER_CURRENT_INT_STATUS_MASK, - WM8350_IM_OC_LS_EINT); - case WM8350_IRQ_EXT_USB_FB: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_USB_FB_EINT); - case WM8350_IRQ_EXT_WALL_FB: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_WALL_FB_EINT); - case WM8350_IRQ_EXT_BAT_FB: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_EXT_BAT_FB_EINT); - case WM8350_IRQ_CODEC_JCK_DET_L: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_L_EINT); - case WM8350_IRQ_CODEC_JCK_DET_R: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_JCK_DET_R_EINT); - case WM8350_IRQ_CODEC_MICSCD: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICSCD_EINT); - case WM8350_IRQ_CODEC_MICD: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_CODEC_MICD_EINT); - case WM8350_IRQ_WKUP_OFF_STATE: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_HIB_STATE: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_HIB_STATE_EINT); - case WM8350_IRQ_WKUP_CONV_FAULT: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_CONV_FAULT_EINT); - case WM8350_IRQ_WKUP_WDOG_RST: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_OFF_STATE_EINT); - case WM8350_IRQ_WKUP_GP_PWR_ON: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_PWR_ON_EINT); - case WM8350_IRQ_WKUP_ONKEY: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_ONKEY_EINT); - case WM8350_IRQ_WKUP_GP_WAKEUP: - return wm8350_clear_bits(wm8350, - WM8350_COMPARATOR_INT_STATUS_MASK, - WM8350_IM_WKUP_GP_WAKEUP_EINT); - case WM8350_IRQ_GPIO(0): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP0_EINT); - case WM8350_IRQ_GPIO(1): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP1_EINT); - case WM8350_IRQ_GPIO(2): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP2_EINT); - case WM8350_IRQ_GPIO(3): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP3_EINT); - case WM8350_IRQ_GPIO(4): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP4_EINT); - case WM8350_IRQ_GPIO(5): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP5_EINT); - case WM8350_IRQ_GPIO(6): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP6_EINT); - case WM8350_IRQ_GPIO(7): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP7_EINT); - case WM8350_IRQ_GPIO(8): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP8_EINT); - case WM8350_IRQ_GPIO(9): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP9_EINT); - case WM8350_IRQ_GPIO(10): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP10_EINT); - case WM8350_IRQ_GPIO(11): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP11_EINT); - case WM8350_IRQ_GPIO(12): - return wm8350_clear_bits(wm8350, - WM8350_GPIO_INT_STATUS_MASK, - WM8350_IM_GP12_EINT); - default: - dev_warn(wm8350->dev, "Attempting to unmask unknown IRQ %d\n", - irq); - return -EINVAL; - } - return 0; + return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK + + wm8350_irqs[irq].reg, + wm8350_irqs[irq].mask); } EXPORT_SYMBOL_GPL(wm8350_unmask_irq); diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h index ed91e8f5d298..71af3d6ebe9d 100644 --- a/include/linux/mfd/wm8350/gpio.h +++ b/include/linux/mfd/wm8350/gpio.h @@ -172,6 +172,24 @@ #define WM8350_GPIO_DEBOUNCE_OFF 0 #define WM8350_GPIO_DEBOUNCE_ON 1 +/* + * R30 (0x1E) - GPIO Interrupt Status + */ +#define WM8350_GP12_EINT 0x1000 +#define WM8350_GP11_EINT 0x0800 +#define WM8350_GP10_EINT 0x0400 +#define WM8350_GP9_EINT 0x0200 +#define WM8350_GP8_EINT 0x0100 +#define WM8350_GP7_EINT 0x0080 +#define WM8350_GP6_EINT 0x0040 +#define WM8350_GP5_EINT 0x0020 +#define WM8350_GP4_EINT 0x0010 +#define WM8350_GP3_EINT 0x0008 +#define WM8350_GP2_EINT 0x0004 +#define WM8350_GP1_EINT 0x0002 +#define WM8350_GP0_EINT 0x0001 + + /* * R128 (0x80) - GPIO Debounce */ -- cgit v1.2.3 From 68d641efd86d901d000b888eeab5481257d49f12 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Wed, 14 Oct 2009 02:12:33 +0400 Subject: mfd: Fix memleak in pcf50633_client_dev_register Since platform_device_add_data copies the passed data, the allocated subdev_pdata is never freed. A simple fix would be to either free subdev_pdata or put it onto the stack. But since the pcf50633 child devices can rely on beeing children of the pcf50633 core device it's much more elegant to get access to pcf50633 core structure through that link. This allows to get completly rid of pcf5033_subdev_pdata. Signed-off-by: Lars-Peter Clausen Signed-off-by: Paul Fertser Signed-off-by: Samuel Ortiz --- drivers/input/misc/pcf50633-input.c | 7 +++---- drivers/mfd/pcf50633-adc.c | 5 ++--- drivers/mfd/pcf50633-core.c | 10 ---------- drivers/power/pcf50633-charger.c | 3 +-- drivers/rtc/rtc-pcf50633.c | 5 +---- include/linux/mfd/pcf50633/core.h | 10 +++++----- 6 files changed, 12 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c index 039dcb00ebd9..008de0c5834b 100644 --- a/drivers/input/misc/pcf50633-input.c +++ b/drivers/input/misc/pcf50633-input.c @@ -55,7 +55,6 @@ pcf50633_input_irq(int irq, void *data) static int __devinit pcf50633_input_probe(struct platform_device *pdev) { struct pcf50633_input *input; - struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data; struct input_dev *input_dev; int ret; @@ -71,7 +70,7 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, input); - input->pcf = pdata->pcf; + input->pcf = dev_to_pcf50633(pdev->dev.parent); input->input_dev = input_dev; input_dev->name = "PCF50633 PMU events"; @@ -85,9 +84,9 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev) kfree(input); return ret; } - pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYR, + pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYR, pcf50633_input_irq, input); - pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYF, + pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYF, pcf50633_input_irq, input); return 0; diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c index 3d31e97d6a45..6d2e8466df1d 100644 --- a/drivers/mfd/pcf50633-adc.c +++ b/drivers/mfd/pcf50633-adc.c @@ -209,17 +209,16 @@ static void pcf50633_adc_irq(int irq, void *data) static int __devinit pcf50633_adc_probe(struct platform_device *pdev) { - struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data; struct pcf50633_adc *adc; adc = kzalloc(sizeof(*adc), GFP_KERNEL); if (!adc) return -ENOMEM; - adc->pcf = pdata->pcf; + adc->pcf = dev_to_pcf50633(pdev->dev.parent); platform_set_drvdata(pdev, adc); - pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ADCRDY, + pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY, pcf50633_adc_irq, adc); mutex_init(&adc->queue_mutex); diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 48776d3018ed..69cdbdcd2e82 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -456,7 +456,6 @@ static void pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, struct platform_device **pdev) { - struct pcf50633_subdev_pdata *subdev_pdata; int ret; *pdev = platform_device_alloc(name, -1); @@ -465,15 +464,6 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, return; } - subdev_pdata = kmalloc(sizeof(*subdev_pdata), GFP_KERNEL); - if (!subdev_pdata) { - dev_err(pcf->dev, "Error allocating subdev pdata\n"); - platform_device_put(*pdev); - } - - subdev_pdata->pcf = pcf; - platform_device_add_data(*pdev, subdev_pdata, sizeof(*subdev_pdata)); - (*pdev)->dev.parent = pcf->dev; ret = platform_device_add(*pdev); diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c index e8b278f71781..6a84a8eb8d7a 100644 --- a/drivers/power/pcf50633-charger.c +++ b/drivers/power/pcf50633-charger.c @@ -303,7 +303,6 @@ static const u8 mbc_irq_handlers[] = { static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) { struct pcf50633_mbc *mbc; - struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data; int ret; int i; u8 mbcs1; @@ -313,7 +312,7 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, mbc); - mbc->pcf = pdata->pcf; + mbc->pcf = dev_to_pcf50633(pdev->dev.parent); /* Set up IRQ handlers */ for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++) diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c index 4c5d5d0c4cfc..9b74e9c9151c 100644 --- a/drivers/rtc/rtc-pcf50633.c +++ b/drivers/rtc/rtc-pcf50633.c @@ -277,16 +277,13 @@ static void pcf50633_rtc_irq(int irq, void *data) static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) { - struct pcf50633_subdev_pdata *pdata; struct pcf50633_rtc *rtc; - rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; - pdata = pdev->dev.platform_data; - rtc->pcf = pdata->pcf; + rtc->pcf = dev_to_pcf50633(pdev->dev.parent); platform_set_drvdata(pdev, rtc); rtc->rtc_dev = rtc_device_register("pcf50633-rtc", &pdev->dev, &pcf50633_rtc_ops, THIS_MODULE); diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 9aba7b779fbc..d9034cc87f18 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -40,10 +40,6 @@ struct pcf50633_platform_data { u8 resumers[5]; }; -struct pcf50633_subdev_pdata { - struct pcf50633 *pcf; -}; - struct pcf50633_irq { void (*handler) (int, void *); void *data; @@ -217,5 +213,9 @@ enum pcf50633_reg_int5 { #define PCF50633_REG_LEDCTL 0x2a #define PCF50633_REG_LEDDIM 0x2b -#endif +static inline struct pcf50633 *dev_to_pcf50633(struct device *dev) +{ + return dev_get_drvdata(dev); +} +#endif -- cgit v1.2.3 From b4ead61e570d7b7bcf20a5a1733dd0bc37236c99 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Mon, 19 Oct 2009 15:11:00 +0300 Subject: mfd: Add support for remapping twl4030-power power states The _REMAP register allows configuration of the in case of a sleep or off transition. Allow this property of resources to be configured (through twl4030_resconfig) and add code to parse these values to program the registers accordingly. Signed-off-by: Amit Kucheria Cc: linux-omap@vger.kernel.org Signed-off-by: Samuel Ortiz --- drivers/mfd/twl4030-power.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/i2c/twl4030.h | 3 +++ 2 files changed, 39 insertions(+) (limited to 'include') diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 2c38ac17ab64..3e41e0c0e4c6 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -75,6 +75,8 @@ static u8 twl4030_start_script_address = 0x2b; */ #define DEV_GRP_OFFSET 0 #define TYPE_OFFSET 1 +#define REMAP_OFFSET 2 +#define DEDICATED_OFFSET 3 /* Bit positions in the registers */ @@ -88,6 +90,12 @@ static u8 twl4030_start_script_address = 0x2b; #define TYPE2_SHIFT 3 #define TYPE2_MASK (3 << TYPE2_SHIFT) +/* _REMAP */ +#define SLEEP_STATE_SHIFT 0 +#define SLEEP_STATE_MASK (0xf << SLEEP_STATE_SHIFT) +#define OFF_STATE_SHIFT 4 +#define OFF_STATE_MASK (0xf << OFF_STATE_SHIFT) + static u8 res_config_addrs[] = { [RES_VAUX1] = 0x17, [RES_VAUX2] = 0x1b, @@ -325,6 +333,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) int err; u8 type; u8 grp; + u8 remap; if (rconfig->resource > TOTAL_RESOURCES) { pr_err("TWL4030 Resource %d does not exist\n", @@ -380,6 +389,33 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) return err; } + /* Set remap states */ + err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap, + rconfig_addr + REMAP_OFFSET); + if (err < 0) { + pr_err("TWL4030 Resource %d remap could not be read\n", + rconfig->resource); + return err; + } + + if (rconfig->remap_off >= 0) { + remap &= ~OFF_STATE_MASK; + remap |= rconfig->remap_off << OFF_STATE_SHIFT; + } + + if (rconfig->remap_sleep >= 0) { + remap &= ~SLEEP_STATE_MASK; + remap |= rconfig->remap_off << SLEEP_STATE_SHIFT; + } + + err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + remap, + rconfig_addr + REMAP_OFFSET); + if (err < 0) { + pr_err("TWL4030 failed to program remap\n"); + return err; + } + return 0; } diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 5306a759cbde..e87cb270d8a1 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -250,6 +250,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); #define RES_TYPE_ALL 0x7 +/* Resource states */ #define RES_STATE_WRST 0xF #define RES_STATE_ACTIVE 0xE #define RES_STATE_SLEEP 0x8 @@ -391,6 +392,8 @@ struct twl4030_resconfig { u8 devgroup; /* Processor group that Power resource belongs to */ u8 type; /* Power resource addressed, 6 / broadcast message */ u8 type2; /* Power resource addressed, 3 / broadcast message */ + u8 remap_off; /* off state remapping */ + u8 remap_sleep; /* sleep state remapping */ }; struct twl4030_power_data { -- cgit v1.2.3 From 56baa667973e53d6d38af2ad3731d558566d818b Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Mon, 19 Oct 2009 21:24:02 +0200 Subject: mfd: fix undefined twl4030-power resconfig value checks The code tries to skip values initialized with -1, but since the values are unsigned the comparison is always true. The patch eliminates the following compiler warnings: drivers/mfd/twl4030-power.c: In function 'twl4030_configure_resource': drivers/mfd/twl4030-power.c:338: warning: comparison is always true due to limited range of data type drivers/mfd/twl4030-power.c:358: warning: comparison is always true due to limited range of data type drivers/mfd/twl4030-power.c:363: warning: comparison is always true due to limited range of data type Signed-off-by: Aaro Koskinen Signed-off-by: Samuel Ortiz --- drivers/mfd/twl4030-power.c | 6 +++--- include/linux/i2c/twl4030.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 3e41e0c0e4c6..9f98c36273d8 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -352,7 +352,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) return err; } - if (rconfig->devgroup >= 0) { + if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) { grp &= ~DEV_GRP_MASK; grp |= rconfig->devgroup << DEV_GRP_SHIFT; err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, @@ -372,12 +372,12 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) return err; } - if (rconfig->type >= 0) { + if (rconfig->type != TWL4030_RESCONFIG_UNDEF) { type &= ~TYPE_MASK; type |= rconfig->type << TYPE_SHIFT; } - if (rconfig->type2 >= 0) { + if (rconfig->type2 != TWL4030_RESCONFIG_UNDEF) { type &= ~TYPE2_MASK; type |= rconfig->type2 << TYPE2_SHIFT; } diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index e87cb270d8a1..8a4a58ff8dad 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -400,6 +400,7 @@ struct twl4030_power_data { struct twl4030_script **scripts; unsigned num; struct twl4030_resconfig *resource_config; +#define TWL4030_RESCONFIG_UNDEF ((u8)-1) }; extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); -- cgit v1.2.3 From 75b75722b4eb1032b3fe2e56b7015f23c6080529 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 21 Oct 2009 19:11:34 +0100 Subject: mfd: Allow platforms to specify an IRQ base for WM8350 This is currently unused by the wm8350 drivers but getting it merged now will reduce merge issues in the future when implementing wm8350 genirq support. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- include/linux/mfd/wm8350/core.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 32197fde904d..ffce508a9109 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -646,10 +646,12 @@ struct wm8350 { * @init: Function called during driver initialisation. Should be * used by the platform to configure GPIO functions and similar. * @irq_high: Set if WM8350 IRQ is active high. + * @irq_base: Base IRQ for genirq (not currently used). */ struct wm8350_platform_data { int (*init)(struct wm8350 *wm8350); int irq_high; + int irq_base; }; -- cgit v1.2.3 From 38a684963f619eb9117cb898b92bde92cdd09127 Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Thu, 22 Oct 2009 14:14:09 +0300 Subject: mfd: Enable twl4030 32kHz oscillator low-power mode Allows TWL's 32kHz oscillator to go in low-power mode when main battery voltage is running low. Signed-off-by: Ilkka Koskinen Signed-off-by: Samuel Ortiz --- drivers/mfd/twl4030-core.c | 9 +++++++-- include/linux/i2c/twl4030.h | 5 +++++ 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index e3abbf66cc5c..90a38e43c313 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c @@ -183,6 +183,7 @@ #define HFCLK_FREQ_26_MHZ (2 << 0) #define HFCLK_FREQ_38p4_MHZ (3 << 0) #define HIGH_PERF_SQ (1 << 3) +#define CK32K_LOWPWR_EN (1 << 7) /* chip-specific feature flags, for i2c_device_id.driver_data */ @@ -695,7 +696,8 @@ static inline int __init unprotect_pm_master(void) return e; } -static void clocks_init(struct device *dev) +static void clocks_init(struct device *dev, + struct twl4030_clock_init_data *clock) { int e = 0; struct clk *osc; @@ -742,6 +744,9 @@ static void clocks_init(struct device *dev) } ctrl |= HIGH_PERF_SQ; + if (clock && clock->ck32k_lowpwr_enable) + ctrl |= CK32K_LOWPWR_EN; + e |= unprotect_pm_master(); /* effect->MADC+USB ck en */ e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT); @@ -820,7 +825,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) inuse = true; /* setup clock framework */ - clocks_init(&client->dev); + clocks_init(&client->dev, pdata->clock); /* load power event scripts */ if (twl_has_power() && pdata->power) diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 8a4a58ff8dad..0c84cfa059e9 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -313,6 +313,10 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); /*----------------------------------------------------------------------*/ +struct twl4030_clock_init_data { + bool ck32k_lowpwr_enable; +}; + struct twl4030_bci_platform_data { int *battery_tmp_tbl; unsigned int tblsize; @@ -425,6 +429,7 @@ struct twl4030_codec_data { struct twl4030_platform_data { unsigned irq_base, irq_end; + struct twl4030_clock_init_data *clock; struct twl4030_bci_platform_data *bci; struct twl4030_gpio_platform_data *gpio; struct twl4030_madc_platform_data *madc; -- cgit v1.2.3 From b45440c33a0bdd824b98e4a4c56767c50d3275eb Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 2 Nov 2009 16:52:30 +0000 Subject: mfd: Allow configuration of VDCDC2 for tps65010 Add function to allow the configuation fo the VDCDC2 register by external users, to allow changing of the standard and low-power running modes. This is needed, for example, for the Simtec IM2440D20 where we need to use the low-power mode to shutdown the LDO/DCDC that are not needed during suspend (saving substantial power) and the runtime use of the low-power mode to change VCore. Signed-off-by: Ben Dooks Signed-off-by: Simtec Linux Team Signed-off-by: Samuel Ortiz --- drivers/mfd/tps65010.c | 28 ++++++++++++++++++++++++++++ include/linux/i2c/tps65010.h | 19 +++++++++++++++++++ 2 files changed, 47 insertions(+) (limited to 'include') diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index 755c4030ea31..e5955306c2fa 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -964,6 +964,34 @@ int tps65010_config_vregs1(unsigned value) } EXPORT_SYMBOL(tps65010_config_vregs1); +int tps65010_config_vdcdc2(unsigned value) +{ + struct i2c_client *c; + int status; + + if (!the_tps) + return -ENODEV; + + c = the_tps->client; + mutex_lock(&the_tps->lock); + + pr_debug("%s: vdcdc2 0x%02x\n", DRIVER_NAME, + i2c_smbus_read_byte_data(c, TPS_VDCDC2)); + + status = i2c_smbus_write_byte_data(c, TPS_VDCDC2, value); + + if (status != 0) + printk(KERN_ERR "%s: Failed to write vdcdc2 register\n", + DRIVER_NAME); + else + pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME, + i2c_smbus_read_byte_data(c, TPS_VDCDC2)); + + mutex_unlock(&the_tps->lock); + return status; +} +EXPORT_SYMBOL(tps65010_config_vdcdc2); + /*-------------------------------------------------------------------------*/ /* tps65013_set_low_pwr parameter: * mode: ON or OFF diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h index 918c5354d9b8..08aa92278d71 100644 --- a/include/linux/i2c/tps65010.h +++ b/include/linux/i2c/tps65010.h @@ -72,6 +72,21 @@ #define TPS_VDCDC1 0x0c # define TPS_ENABLE_LP (1 << 3) #define TPS_VDCDC2 0x0d +# define TPS_LP_COREOFF (1 << 7) +# define TPS_VCORE_1_8V (7<<4) +# define TPS_VCORE_1_5V (6 << 4) +# define TPS_VCORE_1_4V (5 << 4) +# define TPS_VCORE_1_3V (4 << 4) +# define TPS_VCORE_1_2V (3 << 4) +# define TPS_VCORE_1_1V (2 << 4) +# define TPS_VCORE_1_0V (1 << 4) +# define TPS_VCORE_0_85V (0 << 4) +# define TPS_VCORE_LP_1_2V (3 << 2) +# define TPS_VCORE_LP_1_1V (2 << 2) +# define TPS_VCORE_LP_1_0V (1 << 2) +# define TPS_VCORE_LP_0_85V (0 << 2) +# define TPS_VIB (1 << 1) +# define TPS_VCORE_DISCH (1 << 0) #define TPS_VREGS1 0x0e # define TPS_LDO2_ENABLE (1 << 7) # define TPS_LDO2_OFF (1 << 6) @@ -152,6 +167,10 @@ extern int tps65010_config_vregs1(unsigned value); */ extern int tps65013_set_low_pwr(unsigned mode); +/* tps65010_set_vdcdc2 + * value to be written to VDCDC2 + */ +extern int tps65010_config_vdcdc2(unsigned value); struct i2c_client; -- cgit v1.2.3 From 5a65edbc12b6b34ef912114f1fc8215786f85b25 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 4 Nov 2009 16:10:51 +0000 Subject: mfd: Convert wm8350 IRQ handlers to irq_handler_t This is done as simple code transformation, the semantics of the IRQ API provided by the core are are still very different to those of genirq (mainly with regard to masking). Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm8350-irq.c | 6 +++--- drivers/power/wm8350_power.c | 39 +++++++++++++++++++++++------------- drivers/regulator/wm8350-regulator.c | 7 +++++-- drivers/rtc/rtc-wm8350.c | 18 +++++++++++------ include/linux/mfd/wm8350/core.h | 8 ++++---- sound/soc/codecs/wm8350.c | 15 +++++++++----- 6 files changed, 59 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c index d9abfc94c685..2ea2b8b4c72a 100644 --- a/drivers/mfd/wm8350-irq.c +++ b/drivers/mfd/wm8350-irq.c @@ -371,7 +371,7 @@ static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq) mutex_lock(&wm8350->irq_mutex); if (wm8350->irq[irq].handler) - wm8350->irq[irq].handler(wm8350, irq, wm8350->irq[irq].data); + wm8350->irq[irq].handler(irq, wm8350->irq[irq].data); else { dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n", irq); @@ -431,8 +431,8 @@ static irqreturn_t wm8350_irq(int irq, void *irq_data) } int wm8350_register_irq(struct wm8350 *wm8350, int irq, - void (*handler) (struct wm8350 *, int, void *), - void *data) + irq_handler_t handler, unsigned long flags, + const char *name, void *data) { if (irq < 0 || irq > WM8350_NUM_IRQ || !handler) return -EINVAL; diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c index 28b0299c0043..6e634cf7fc14 100644 --- a/drivers/power/wm8350_power.c +++ b/drivers/power/wm8350_power.c @@ -184,8 +184,9 @@ static ssize_t charger_state_show(struct device *dev, static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL); -static void wm8350_charger_handler(struct wm8350 *wm8350, int irq, void *data) +static irqreturn_t wm8350_charger_handler(int irq, void *data) { + struct wm8350 *wm8350 = data; struct wm8350_power *power = &wm8350->power; struct wm8350_charger_policy *policy = power->policy; @@ -238,6 +239,8 @@ static void wm8350_charger_handler(struct wm8350 *wm8350, int irq, void *data) default: dev_err(wm8350->dev, "Unknown interrupt %d\n", irq); } + + return IRQ_HANDLED; } /********************************************************************* @@ -387,45 +390,53 @@ static void wm8350_init_charger(struct wm8350 *wm8350) { /* register our interest in charger events */ wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, "Battery hot", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, "Battery cold", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, "Battery fail", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, + "Charger timeout", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_TO); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, + "Charge end", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_END); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, + "Charge start", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_START); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, + "Fast charge ready", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, + "Battery <3.9V", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, + "Battery <3.1V", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1); wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, + "Battery <2.85V", wm8350); + wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85); /* and supply change events */ wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, "USB", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_USB_FB); wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, "Wall", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_WALL_FB); wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, - wm8350_charger_handler, NULL); + wm8350_charger_handler, 0, "Battery", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_BAT_FB); } diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 768bd0e5b48b..8c289fd4add2 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c @@ -1330,9 +1330,10 @@ static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { }, }; -static void pmic_uv_handler(struct wm8350 *wm8350, int irq, void *data) +static irqreturn_t pmic_uv_handler(int irq, void *data) { struct regulator_dev *rdev = (struct regulator_dev *)data; + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); mutex_lock(&rdev->mutex); if (irq == WM8350_IRQ_CS1 || irq == WM8350_IRQ_CS2) @@ -1344,6 +1345,8 @@ static void pmic_uv_handler(struct wm8350 *wm8350, int irq, void *data) REGULATOR_EVENT_UNDER_VOLTAGE, wm8350); mutex_unlock(&rdev->mutex); + + return IRQ_HANDLED; } static int wm8350_regulator_probe(struct platform_device *pdev) @@ -1388,7 +1391,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev) /* register regulator IRQ */ ret = wm8350_register_irq(wm8350, wm8350_reg[pdev->id].irq, - pmic_uv_handler, rdev); + pmic_uv_handler, 0, "UV", rdev); if (ret < 0) { regulator_unregister(rdev); dev_err(&pdev->dev, "failed to register regulator %s IRQ\n", diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c index c91edc572eb6..56e56e552813 100644 --- a/drivers/rtc/rtc-wm8350.c +++ b/drivers/rtc/rtc-wm8350.c @@ -315,9 +315,9 @@ static int wm8350_rtc_update_irq_enable(struct device *dev, return 0; } -static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq, - void *data) +static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data) { + struct wm8350 *wm8350 = data; struct rtc_device *rtc = wm8350->rtc.rtc; int ret; @@ -330,14 +330,18 @@ static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq, dev_err(&(wm8350->rtc.pdev->dev), "Failed to disable alarm: %d\n", ret); } + + return IRQ_HANDLED; } -static void wm8350_rtc_update_handler(struct wm8350 *wm8350, int irq, - void *data) +static irqreturn_t wm8350_rtc_update_handler(int irq, void *data) { + struct wm8350 *wm8350 = data; struct rtc_device *rtc = wm8350->rtc.rtc; rtc_update_irq(rtc, 1, RTC_IRQF | RTC_UF); + + return IRQ_HANDLED; } static const struct rtc_class_ops wm8350_rtc_ops = { @@ -459,10 +463,12 @@ static int wm8350_rtc_probe(struct platform_device *pdev) wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_PER); wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, - wm8350_rtc_update_handler, NULL); + wm8350_rtc_update_handler, 0, + "RTC Seconds", wm8350); wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, - wm8350_rtc_alarm_handler, NULL); + wm8350_rtc_alarm_handler, 0, + "RTC Alarm", wm8350); wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_ALM); return 0; diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index ffce508a9109..43868899bf49 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include @@ -601,7 +601,7 @@ extern const u16 wm8352_mode3_defaults[]; struct wm8350; struct wm8350_irq { - void (*handler) (struct wm8350 *, int, void *); + irq_handler_t handler; void *data; }; @@ -678,8 +678,8 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src); * WM8350 internal interrupts */ int wm8350_register_irq(struct wm8350 *wm8350, int irq, - void (*handler) (struct wm8350 *, int, void *), - void *data); + irq_handler_t handler, unsigned long flags, + const char *name, void *data); int wm8350_free_irq(struct wm8350 *wm8350, int irq); int wm8350_mask_irq(struct wm8350 *wm8350, int irq); int wm8350_unmask_irq(struct wm8350 *wm8350, int irq); diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index f82125d9e85a..17a327d67fd5 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c @@ -1340,9 +1340,10 @@ static int wm8350_resume(struct platform_device *pdev) return 0; } -static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data) +static irqreturn_t wm8350_hp_jack_handler(int irq, void *data) { struct wm8350_data *priv = data; + struct wm8350 *wm8350 = priv->codec.control_data; u16 reg; int report; int mask; @@ -1365,7 +1366,7 @@ static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data) if (!jack->jack) { dev_warn(wm8350->dev, "Jack interrupt called with no jack\n"); - return; + return IRQ_NONE; } /* Debounce */ @@ -1378,6 +1379,8 @@ static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data) report = 0; snd_soc_jack_report(jack->jack, report, jack->report); + + return IRQ_HANDLED; } /** @@ -1421,7 +1424,7 @@ int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which, wm8350_set_bits(wm8350, WM8350_JACK_DETECT, ena); /* Sync status */ - wm8350_hp_jack_handler(wm8350, irq, priv); + wm8350_hp_jack_handler(irq, priv); wm8350_unmask_irq(wm8350, irq); @@ -1485,9 +1488,11 @@ static int wm8350_probe(struct platform_device *pdev) wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L); wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R); wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, - wm8350_hp_jack_handler, priv); + wm8350_hp_jack_handler, 0, "Left jack detect", + priv); wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, - wm8350_hp_jack_handler, priv); + wm8350_hp_jack_handler, 0, "Right jack detect", + priv); ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { -- cgit v1.2.3 From 1920a61e208fac73d1a30a7cf4005701802fe69f Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Tue, 10 Nov 2009 17:26:15 +0200 Subject: mfd: Initial support for twl5031 TWL5031 introduces two new interrupts in PIH. Moreover, BCI has changed remarkably and, thus, it's disabled when TWL5031 is in use. Signed-off-by: Ilkka Koskinen Signed-off-by: Samuel Ortiz --- drivers/mfd/twl4030-core.c | 13 ++++- drivers/mfd/twl4030-irq.c | 131 ++++++++++++++++++++++++++++++++++++++++++-- include/linux/i2c/twl4030.h | 47 ++++++++++++++-- 3 files changed, 180 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index 90a38e43c313..334c86fccede 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c @@ -158,6 +158,10 @@ #define TWL4030_BASEADD_PWMB 0x00F1 #define TWL4030_BASEADD_KEYPAD 0x00D2 +#define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */ +#define TWL5031_BASEADD_INTERRUPTS 0x00B9 /* Different than TWL4030's + one */ + /* subchip/slave 3 - POWER ID */ #define TWL4030_BASEADD_BACKUP 0x0014 #define TWL4030_BASEADD_INT 0x002E @@ -189,6 +193,7 @@ /* chip-specific feature flags, for i2c_device_id.driver_data */ #define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */ #define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ +#define TWL5031 BIT(2) /* twl5031 has different registers */ /*----------------------------------------------------------------------*/ @@ -241,6 +246,8 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { { 2, TWL4030_BASEADD_PWM1 }, { 2, TWL4030_BASEADD_PWMA }, { 2, TWL4030_BASEADD_PWMB }, + { 2, TWL5031_BASEADD_ACCESSORY }, + { 2, TWL5031_BASEADD_INTERRUPTS }, { 3, TWL4030_BASEADD_BACKUP }, { 3, TWL4030_BASEADD_INT }, @@ -488,7 +495,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) { struct device *child; - if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) { + if (twl_has_bci() && pdata->bci && + !(features & (TPS_SUBSET | TWL5031))) { child = add_child(3, "twl4030_bci", pdata->bci, sizeof(*pdata->bci), false, @@ -760,6 +768,7 @@ static void clocks_init(struct device *dev, int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); int twl_exit_irq(void); +int twl_init_chip_irq(const char *chip); static int twl4030_remove(struct i2c_client *client) { @@ -835,6 +844,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) if (client->irq && pdata->irq_base && pdata->irq_end > pdata->irq_base) { + twl_init_chip_irq(id->name); status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); if (status < 0) goto fail; @@ -850,6 +860,7 @@ fail: static const struct i2c_device_id twl4030_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ + { "twl5031", TWL5031 }, /* TWL5030 updated */ { "tps65950", 0 }, /* catalog version of twl5030 */ { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index fb194fe244c1..0b733028828f 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -74,6 +74,8 @@ struct sih { u8 edr_offset; u8 bytes_edr; /* bytelen of EDR */ + u8 irq_lines; /* number of supported irq lines */ + /* SIR ignored -- set interrupt, for testing only */ struct irq_data { u8 isr_offset; @@ -82,6 +84,9 @@ struct sih { /* + 2 bytes padding */ }; +static const struct sih *sih_modules; +static int nr_sih_modules; + #define SIH_INITIALIZER(modname, nbits) \ .module = TWL4030_MODULE_ ## modname, \ .control_offset = TWL4030_ ## modname ## _SIH_CTRL, \ @@ -89,6 +94,7 @@ struct sih { .bytes_ixr = DIV_ROUND_UP(nbits, 8), \ .edr_offset = TWL4030_ ## modname ## _EDR, \ .bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \ + .irq_lines = 2, \ .mask = { { \ .isr_offset = TWL4030_ ## modname ## _ISR1, \ .imr_offset = TWL4030_ ## modname ## _IMR1, \ @@ -107,7 +113,8 @@ struct sih { /* Order in this table matches order in PIH_ISR. That is, * BIT(n) in PIH_ISR is sih_modules[n]. */ -static const struct sih sih_modules[6] = { +/* sih_modules_twl4030 is used both in twl4030 and twl5030 */ +static const struct sih sih_modules_twl4030[6] = { [0] = { .name = "gpio", .module = TWL4030_MODULE_GPIO, @@ -118,6 +125,7 @@ static const struct sih sih_modules[6] = { /* Note: *all* of these IRQs default to no-trigger */ .edr_offset = REG_GPIO_EDR1, .bytes_edr = 5, + .irq_lines = 2, .mask = { { .isr_offset = REG_GPIO_ISR1A, .imr_offset = REG_GPIO_IMR1A, @@ -140,6 +148,7 @@ static const struct sih sih_modules[6] = { .edr_offset = TWL4030_INTERRUPTS_BCIEDR1, /* Note: most of these IRQs default to no-trigger */ .bytes_edr = 3, + .irq_lines = 2, .mask = { { .isr_offset = TWL4030_INTERRUPTS_BCIISR1A, .imr_offset = TWL4030_INTERRUPTS_BCIIMR1A, @@ -164,6 +173,99 @@ static const struct sih sih_modules[6] = { /* there are no SIH modules #6 or #7 ... */ }; +static const struct sih sih_modules_twl5031[8] = { + [0] = { + .name = "gpio", + .module = TWL4030_MODULE_GPIO, + .control_offset = REG_GPIO_SIH_CTRL, + .set_cor = true, + .bits = TWL4030_GPIO_MAX, + .bytes_ixr = 3, + /* Note: *all* of these IRQs default to no-trigger */ + .edr_offset = REG_GPIO_EDR1, + .bytes_edr = 5, + .irq_lines = 2, + .mask = { { + .isr_offset = REG_GPIO_ISR1A, + .imr_offset = REG_GPIO_IMR1A, + }, { + .isr_offset = REG_GPIO_ISR1B, + .imr_offset = REG_GPIO_IMR1B, + }, }, + }, + [1] = { + .name = "keypad", + .set_cor = true, + SIH_INITIALIZER(KEYPAD_KEYP, 4) + }, + [2] = { + .name = "bci", + .module = TWL5031_MODULE_INTERRUPTS, + .control_offset = TWL5031_INTERRUPTS_BCISIHCTRL, + .bits = 7, + .bytes_ixr = 1, + .edr_offset = TWL5031_INTERRUPTS_BCIEDR1, + /* Note: most of these IRQs default to no-trigger */ + .bytes_edr = 2, + .irq_lines = 2, + .mask = { { + .isr_offset = TWL5031_INTERRUPTS_BCIISR1, + .imr_offset = TWL5031_INTERRUPTS_BCIIMR1, + }, { + .isr_offset = TWL5031_INTERRUPTS_BCIISR2, + .imr_offset = TWL5031_INTERRUPTS_BCIIMR2, + }, }, + }, + [3] = { + .name = "madc", + SIH_INITIALIZER(MADC, 4) + }, + [4] = { + /* USB doesn't use the same SIH organization */ + .name = "usb", + }, + [5] = { + .name = "power", + .set_cor = true, + SIH_INITIALIZER(INT_PWR, 8) + }, + [6] = { + /* + * ACI doesn't use the same SIH organization. + * For example, it supports only one interrupt line + */ + .name = "aci", + .module = TWL5031_MODULE_ACCESSORY, + .bits = 9, + .bytes_ixr = 2, + .irq_lines = 1, + .mask = { { + .isr_offset = TWL5031_ACIIDR_LSB, + .imr_offset = TWL5031_ACIIMR_LSB, + }, }, + + }, + [7] = { + /* Accessory */ + .name = "acc", + .module = TWL5031_MODULE_ACCESSORY, + .control_offset = TWL5031_ACCSIHCTRL, + .bits = 2, + .bytes_ixr = 1, + .edr_offset = TWL5031_ACCEDR1, + /* Note: most of these IRQs default to no-trigger */ + .bytes_edr = 1, + .irq_lines = 2, + .mask = { { + .isr_offset = TWL5031_ACCISR1, + .imr_offset = TWL5031_ACCIMR1, + }, { + .isr_offset = TWL5031_ACCISR2, + .imr_offset = TWL5031_ACCIMR2, + }, }, + }, +}; + #undef TWL4030_MODULE_KEYPAD_KEYP #undef TWL4030_MODULE_INT_PWR #undef TWL4030_INT_PWR_EDR @@ -284,12 +386,16 @@ static int twl4030_init_sih_modules(unsigned line) /* disable all interrupts on our line */ memset(buf, 0xff, sizeof buf); sih = sih_modules; - for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) { + for (i = 0; i < nr_sih_modules; i++, sih++) { /* skip USB -- it's funky */ if (!sih->bytes_ixr) continue; + /* Not all the SIH modules support multiple interrupt lines */ + if (sih->irq_lines <= line) + continue; + status = twl4030_i2c_write(sih->module, buf, sih->mask[line].imr_offset, sih->bytes_ixr); if (status < 0) @@ -314,7 +420,7 @@ static int twl4030_init_sih_modules(unsigned line) } sih = sih_modules; - for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) { + for (i = 0; i < nr_sih_modules; i++, sih++) { u8 rxbuf[4]; int j; @@ -322,6 +428,10 @@ static int twl4030_init_sih_modules(unsigned line) if (!sih->bytes_ixr) continue; + /* Not all the SIH modules support multiple interrupt lines */ + if (sih->irq_lines <= line) + continue; + /* Clear pending interrupt status. Either the read was * enough, or we need to write those bits. Repeat, in * case an IRQ is pending (PENDDIS=0) ... that's not @@ -611,7 +721,7 @@ int twl4030_sih_setup(int module) /* only support modules with standard clear-on-read for now */ for (sih_mod = 0, sih = sih_modules; - sih_mod < ARRAY_SIZE(sih_modules); + sih_mod < nr_sih_modules; sih_mod++, sih++) { if (sih->module == module && sih->set_cor) { if (!WARN((irq_base + sih->bits) > NR_IRQS, @@ -756,3 +866,16 @@ int twl_exit_irq(void) } return 0; } + +int twl_init_chip_irq(const char *chip) +{ + if (!strcmp(chip, "twl5031")) { + sih_modules = sih_modules_twl5031; + nr_sih_modules = ARRAY_SIZE(sih_modules_twl5031); + } else { + sih_modules = sih_modules_twl4030; + nr_sih_modules = ARRAY_SIZE(sih_modules_twl4030); + } + + return 0; +} diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 0c84cfa059e9..efa62eb497b8 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -61,13 +61,16 @@ #define TWL4030_MODULE_PWMA 0x0E #define TWL4030_MODULE_PWMB 0x0F +#define TWL5031_MODULE_ACCESSORY 0x10 +#define TWL5031_MODULE_INTERRUPTS 0x11 + /* Slave 3 (i2c address 0x4b) */ -#define TWL4030_MODULE_BACKUP 0x10 -#define TWL4030_MODULE_INT 0x11 -#define TWL4030_MODULE_PM_MASTER 0x12 -#define TWL4030_MODULE_PM_RECEIVER 0x13 -#define TWL4030_MODULE_RTC 0x14 -#define TWL4030_MODULE_SECURED_REG 0x15 +#define TWL4030_MODULE_BACKUP 0x12 +#define TWL4030_MODULE_INT 0x13 +#define TWL4030_MODULE_PM_MASTER 0x14 +#define TWL4030_MODULE_PM_RECEIVER 0x15 +#define TWL4030_MODULE_RTC 0x16 +#define TWL4030_MODULE_SECURED_REG 0x17 /* * Read and write single 8-bit registers @@ -221,6 +224,38 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); /*----------------------------------------------------------------------*/ +/* + * Accessory Interrupts + */ +#define TWL5031_ACIIMR_LSB 0x05 +#define TWL5031_ACIIMR_MSB 0x06 +#define TWL5031_ACIIDR_LSB 0x07 +#define TWL5031_ACIIDR_MSB 0x08 +#define TWL5031_ACCISR1 0x0F +#define TWL5031_ACCIMR1 0x10 +#define TWL5031_ACCISR2 0x11 +#define TWL5031_ACCIMR2 0x12 +#define TWL5031_ACCSIR 0x13 +#define TWL5031_ACCEDR1 0x14 +#define TWL5031_ACCSIHCTRL 0x15 + +/*----------------------------------------------------------------------*/ + +/* + * Battery Charger Controller + */ + +#define TWL5031_INTERRUPTS_BCIISR1 0x0 +#define TWL5031_INTERRUPTS_BCIIMR1 0x1 +#define TWL5031_INTERRUPTS_BCIISR2 0x2 +#define TWL5031_INTERRUPTS_BCIIMR2 0x3 +#define TWL5031_INTERRUPTS_BCISIR 0x4 +#define TWL5031_INTERRUPTS_BCIEDR1 0x5 +#define TWL5031_INTERRUPTS_BCIEDR2 0x6 +#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7 + +/*----------------------------------------------------------------------*/ + /* Power bus message definitions */ /* The TWL4030/5030 splits its power-management resources (the various -- cgit v1.2.3 From 5fb4d38b19d95a5f980f0a10adba798f5b92128c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 11 Nov 2009 16:10:22 +0000 Subject: mfd: Move WM831x to generic IRQ Replace the wm831x-local IRQ infrastructure with genirq, allowing access to the diagnostic infrastructure of genirq and allowing us to implement interrupt support for the GPIOs. The switchover is done within the wm831x specific IRQ API, further patches will convert the individual drivers to use genirq directly. Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 9 +- drivers/mfd/wm831x-irq.c | 209 ++++++++++++++++----------------------- include/linux/mfd/wm831x/core.h | 40 +++++--- include/linux/mfd/wm831x/pdata.h | 1 + 4 files changed, 122 insertions(+), 137 deletions(-) (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 163029f06185..223a90c7492f 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -1504,19 +1504,19 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8310: ret = mfd_add_devices(wm831x->dev, -1, wm8310_devs, ARRAY_SIZE(wm8310_devs), - NULL, 0); + NULL, wm831x->irq_base); break; case WM8311: ret = mfd_add_devices(wm831x->dev, -1, wm8311_devs, ARRAY_SIZE(wm8311_devs), - NULL, 0); + NULL, wm831x->irq_base); break; case WM8312: ret = mfd_add_devices(wm831x->dev, -1, wm8312_devs, ARRAY_SIZE(wm8312_devs), - NULL, 0); + NULL, wm831x->irq_base); break; case WM8320: @@ -1538,7 +1538,8 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) if (pdata && pdata->backlight) { /* Treat errors as non-critical */ ret = mfd_add_devices(wm831x->dev, -1, backlight_devs, - ARRAY_SIZE(backlight_devs), NULL, 0); + ARRAY_SIZE(backlight_devs), NULL, + wm831x->irq_base); if (ret < 0) dev_err(wm831x->dev, "Failed to add backlight: %d\n", ret); diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index ac056ea6b66e..301327697117 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -339,110 +340,71 @@ static inline int irq_data_to_mask_reg(struct wm831x_irq_data *irq_data) return WM831X_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg; } -static void __wm831x_enable_irq(struct wm831x *wm831x, int irq) +static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x, + int irq) { - struct wm831x_irq_data *irq_data = &wm831x_irqs[irq]; - - wm831x->irq_masks[irq_data->reg - 1] &= ~irq_data->mask; - wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data), - wm831x->irq_masks[irq_data->reg - 1]); + return &wm831x_irqs[irq - wm831x->irq_base]; } -void wm831x_enable_irq(struct wm831x *wm831x, int irq) +static void wm831x_irq_lock(unsigned int irq) { - mutex_lock(&wm831x->irq_lock); - __wm831x_enable_irq(wm831x, irq); - mutex_unlock(&wm831x->irq_lock); -} -EXPORT_SYMBOL_GPL(wm831x_enable_irq); + struct wm831x *wm831x = get_irq_chip_data(irq); -static void __wm831x_disable_irq(struct wm831x *wm831x, int irq) -{ - struct wm831x_irq_data *irq_data = &wm831x_irqs[irq]; - - wm831x->irq_masks[irq_data->reg - 1] |= irq_data->mask; - wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data), - wm831x->irq_masks[irq_data->reg - 1]); -} - -void wm831x_disable_irq(struct wm831x *wm831x, int irq) -{ mutex_lock(&wm831x->irq_lock); - __wm831x_disable_irq(wm831x, irq); - mutex_unlock(&wm831x->irq_lock); } -EXPORT_SYMBOL_GPL(wm831x_disable_irq); -int wm831x_request_irq(struct wm831x *wm831x, - unsigned int irq, irq_handler_t handler, - unsigned long flags, const char *name, - void *dev) +static void wm831x_irq_sync_unlock(unsigned int irq) { - int ret = 0; - - if (irq < 0 || irq >= WM831X_NUM_IRQS) - return -EINVAL; - - mutex_lock(&wm831x->irq_lock); - - if (wm831x_irqs[irq].handler) { - dev_err(wm831x->dev, "Already have handler for IRQ %d\n", irq); - ret = -EINVAL; - goto out; + struct wm831x *wm831x = get_irq_chip_data(irq); + int i; + + for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { + /* If there's been a change in the mask write it back + * to the hardware. */ + if (wm831x->irq_masks_cur[i] != wm831x->irq_masks_cache[i]) { + wm831x->irq_masks_cache[i] = wm831x->irq_masks_cur[i]; + wm831x_reg_write(wm831x, + WM831X_INTERRUPT_STATUS_1_MASK + i, + wm831x->irq_masks_cur[i]); + } } - wm831x_irqs[irq].handler = handler; - wm831x_irqs[irq].handler_data = dev; - - __wm831x_enable_irq(wm831x, irq); - -out: mutex_unlock(&wm831x->irq_lock); - - return ret; } -EXPORT_SYMBOL_GPL(wm831x_request_irq); -void wm831x_free_irq(struct wm831x *wm831x, unsigned int irq, void *data) +static void wm831x_irq_unmask(unsigned int irq) { - if (irq < 0 || irq >= WM831X_NUM_IRQS) - return; - - mutex_lock(&wm831x->irq_lock); + struct wm831x *wm831x = get_irq_chip_data(irq); + struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq); - wm831x_irqs[irq].handler = NULL; - wm831x_irqs[irq].handler_data = NULL; - - __wm831x_disable_irq(wm831x, irq); - - mutex_unlock(&wm831x->irq_lock); + wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; } -EXPORT_SYMBOL_GPL(wm831x_free_irq); - -static void wm831x_handle_irq(struct wm831x *wm831x, int irq, int status) +static void wm831x_irq_mask(unsigned int irq) { - struct wm831x_irq_data *irq_data = &wm831x_irqs[irq]; - - if (irq_data->handler) { - irq_data->handler(irq, irq_data->handler_data); - wm831x_reg_write(wm831x, irq_data_to_status_reg(irq_data), - irq_data->mask); - } else { - dev_err(wm831x->dev, "Unhandled IRQ %d, masking\n", irq); - __wm831x_disable_irq(wm831x, irq); - } + struct wm831x *wm831x = get_irq_chip_data(irq); + struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq); + + wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; } -/* Main interrupt handling occurs in a workqueue since we need - * interrupts enabled to interact with the chip. */ -static void wm831x_irq_worker(struct work_struct *work) +static struct irq_chip wm831x_irq_chip = { + .name = "wm831x", + .bus_lock = wm831x_irq_lock, + .bus_sync_unlock = wm831x_irq_sync_unlock, + .mask = wm831x_irq_mask, + .unmask = wm831x_irq_unmask, +}; + +/* The processing of the primary interrupt occurs in a thread so that + * we can interact with the device over I2C or SPI. */ +static irqreturn_t wm831x_irq_thread(int irq, void *data) { - struct wm831x *wm831x = container_of(work, struct wm831x, irq_work); + struct wm831x *wm831x = data; unsigned int i; int primary; - int status_regs[5]; - int read[5] = { 0 }; + int status_regs[WM831X_NUM_IRQ_REGS] = { 0 }; + int read[WM831X_NUM_IRQ_REGS] = { 0 }; int *status; primary = wm831x_reg_read(wm831x, WM831X_SYSTEM_INTERRUPTS); @@ -452,8 +414,6 @@ static void wm831x_irq_worker(struct work_struct *work) goto out; } - mutex_lock(&wm831x->irq_lock); - for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) { int offset = wm831x_irqs[i].reg - 1; @@ -471,41 +431,34 @@ static void wm831x_irq_worker(struct work_struct *work) dev_err(wm831x->dev, "Failed to read IRQ status: %d\n", *status); - goto out_lock; + goto out; } - /* Mask out the disabled IRQs */ - *status &= ~wm831x->irq_masks[offset]; read[offset] = 1; } - if (*status & wm831x_irqs[i].mask) - wm831x_handle_irq(wm831x, i, *status); + /* Report it if it isn't masked, or forget the status. */ + if ((*status & ~wm831x->irq_masks_cur[offset]) + & wm831x_irqs[i].mask) + handle_nested_irq(wm831x->irq_base + i); + else + *status &= ~wm831x_irqs[i].mask; } -out_lock: - mutex_unlock(&wm831x->irq_lock); out: - enable_irq(wm831x->irq); -} - - -static irqreturn_t wm831x_cpu_irq(int irq, void *data) -{ - struct wm831x *wm831x = data; - - /* Shut the interrupt to the CPU up and schedule the actual - * handler; we can't check that the IRQ is asserted. */ - disable_irq_nosync(irq); - - queue_work(wm831x->irq_wq, &wm831x->irq_work); + for (i = 0; i < ARRAY_SIZE(status_regs); i++) { + if (status_regs[i]) + wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i, + status_regs[i]); + } return IRQ_HANDLED; } int wm831x_irq_init(struct wm831x *wm831x, int irq) { - int i, ret; + struct wm831x_pdata *pdata = wm831x->dev->platform_data; + int i, cur_irq, ret; mutex_init(&wm831x->irq_lock); @@ -515,41 +468,53 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) return 0; } - - wm831x->irq_wq = create_singlethread_workqueue("wm831x-irq"); - if (!wm831x->irq_wq) { - dev_err(wm831x->dev, "Failed to allocate IRQ worker\n"); - return -ESRCH; + if (!pdata || !pdata->irq_base) { + dev_err(wm831x->dev, + "No interrupt base specified, no interrupts\n"); + return 0; } wm831x->irq = irq; - INIT_WORK(&wm831x->irq_work, wm831x_irq_worker); + wm831x->irq_base = pdata->irq_base; /* Mask the individual interrupt sources */ - for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks); i++) { - wm831x->irq_masks[i] = 0xffff; + for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { + wm831x->irq_masks_cur[i] = 0xffff; + wm831x->irq_masks_cache[i] = 0xffff; wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, 0xffff); } - /* Enable top level interrupts, we mask at secondary level */ - wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0); + /* Register them with genirq */ + for (cur_irq = wm831x->irq_base; + cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; + cur_irq++) { + set_irq_chip_data(cur_irq, wm831x); + set_irq_chip_and_handler(cur_irq, &wm831x_irq_chip, + handle_edge_irq); + set_irq_nested_thread(cur_irq, 1); + + /* ARM needs us to explicitly flag the IRQ as valid + * and will set them noprobe when we do so. */ +#ifdef CONFIG_ARM + set_irq_flags(cur_irq, IRQF_VALID); +#else + set_irq_noprobe(cur_irq); +#endif + } - /* We're good to go. We set IRQF_SHARED since there's a - * chance the driver will interoperate with another driver but - * the need to disable the IRQ while handing via I2C/SPI means - * that this may break and performance will be impacted. If - * this does happen it's a hardware design issue and the only - * other alternative would be polling. - */ - ret = request_irq(irq, wm831x_cpu_irq, IRQF_TRIGGER_LOW | IRQF_SHARED, - "wm831x", wm831x); + ret = request_threaded_irq(irq, NULL, wm831x_irq_thread, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "wm831x", wm831x); if (ret != 0) { dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n", irq, ret); return ret; } + /* Enable top level interrupts, we mask at secondary level */ + wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0); + return 0; } diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index d01d293a6b25..5184b79c700b 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -16,7 +16,6 @@ #define __MFD_WM831X_CORE_H__ #include -#include /* * Register values. @@ -236,6 +235,8 @@ struct regulator_dev; +#define WM831X_NUM_IRQ_REGS 5 + struct wm831x { struct mutex io_lock; @@ -249,10 +250,9 @@ struct wm831x { int irq; /* Our chip IRQ */ struct mutex irq_lock; - struct workqueue_struct *irq_wq; - struct work_struct irq_work; unsigned int irq_base; - int irq_masks[5]; + int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */ + int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */ int num_gpio; @@ -281,12 +281,30 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, int wm831x_irq_init(struct wm831x *wm831x, int irq); void wm831x_irq_exit(struct wm831x *wm831x); -int __must_check wm831x_request_irq(struct wm831x *wm831x, - unsigned int irq, irq_handler_t handler, - unsigned long flags, const char *name, - void *dev); -void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *); -void wm831x_disable_irq(struct wm831x *wm831x, int irq); -void wm831x_enable_irq(struct wm831x *wm831x, int irq); +static inline int __must_check wm831x_request_irq(struct wm831x *wm831x, + unsigned int irq, + irq_handler_t handler, + unsigned long flags, + const char *name, + void *dev) +{ + return request_threaded_irq(irq, NULL, handler, flags, name, dev); +} + +static inline void wm831x_free_irq(struct wm831x *wm831x, + unsigned int irq, void *dev) +{ + free_irq(irq, dev); +} + +static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq) +{ + disable_irq(irq); +} + +static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq) +{ + enable_irq(irq); +} #endif diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 90d820260aad..415c228743d5 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h @@ -91,6 +91,7 @@ struct wm831x_pdata { /** Called after subdevices are set up */ int (*post_init)(struct wm831x *wm831x); + int irq_base; int gpio_base; struct wm831x_backlight_pdata *backlight; struct wm831x_backup_pdata *backup; -- cgit v1.2.3 From 9e2726776d45b1e383625b3ce4f8b511456e09ad Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Mon, 30 Nov 2009 00:53:17 +0100 Subject: mfd: Near complete mc13783 rewrite MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes several things while still providing the old API: - simplify and fix locking - better error handling - don't ack all irqs making it impossible to detect a reset of the rtc - use a timeout variant to wait for completion of ADC conversion - provide platform-data to regulator subdevice (This allows making struct mc13783 opaque for other drivers after the regulator driver is updated to use its platform_data.) - expose all interrupts - use threaded irq After all users in mainline are converted to the new API, some things (e.g. mc13783-private.h) can go away. Signed-off-by: Uwe Kleine-König Cc: Sascha Hauer Cc: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/mc13783-core.c | 755 ++++++++++++++++++++++++------------ include/linux/mfd/mc13783-private.h | 208 +--------- include/linux/mfd/mc13783.h | 120 ++++-- 3 files changed, 617 insertions(+), 466 deletions(-) (limited to 'include') diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c index e354d2912ef1..dc1add0c4949 100644 --- a/drivers/mfd/mc13783-core.c +++ b/drivers/mfd/mc13783-core.c @@ -1,286 +1,549 @@ /* - * Copyright 2009 Pengutronix, Sascha Hauer - * - * This code is in parts based on wm8350-core.c and pcf50633-core.c - * - * Initial development of this code was funded by - * Phytec Messtechnik GmbH, http://www.phytec.de + * Copyright 2009 Pengutronix + * Uwe Kleine-Koenig * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * loosely based on an earlier driver that has + * Copyright 2009 Pengutronix, Sascha Hauer * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include +#include +#include +#include + +#define MC13783_IRQSTAT0 0 +#define MC13783_IRQSTAT0_ADCDONEI (1 << 0) +#define MC13783_IRQSTAT0_ADCBISDONEI (1 << 1) +#define MC13783_IRQSTAT0_TSI (1 << 2) +#define MC13783_IRQSTAT0_WHIGHI (1 << 3) +#define MC13783_IRQSTAT0_WLOWI (1 << 4) +#define MC13783_IRQSTAT0_CHGDETI (1 << 6) +#define MC13783_IRQSTAT0_CHGOVI (1 << 7) +#define MC13783_IRQSTAT0_CHGREVI (1 << 8) +#define MC13783_IRQSTAT0_CHGSHORTI (1 << 9) +#define MC13783_IRQSTAT0_CCCVI (1 << 10) +#define MC13783_IRQSTAT0_CHGCURRI (1 << 11) +#define MC13783_IRQSTAT0_BPONI (1 << 12) +#define MC13783_IRQSTAT0_LOBATLI (1 << 13) +#define MC13783_IRQSTAT0_LOBATHI (1 << 14) +#define MC13783_IRQSTAT0_UDPI (1 << 15) +#define MC13783_IRQSTAT0_USBI (1 << 16) +#define MC13783_IRQSTAT0_IDI (1 << 19) +#define MC13783_IRQSTAT0_SE1I (1 << 21) +#define MC13783_IRQSTAT0_CKDETI (1 << 22) +#define MC13783_IRQSTAT0_UDMI (1 << 23) + +#define MC13783_IRQMASK0 1 +#define MC13783_IRQMASK0_ADCDONEM MC13783_IRQSTAT0_ADCDONEI +#define MC13783_IRQMASK0_ADCBISDONEM MC13783_IRQSTAT0_ADCBISDONEI +#define MC13783_IRQMASK0_TSM MC13783_IRQSTAT0_TSI +#define MC13783_IRQMASK0_WHIGHM MC13783_IRQSTAT0_WHIGHI +#define MC13783_IRQMASK0_WLOWM MC13783_IRQSTAT0_WLOWI +#define MC13783_IRQMASK0_CHGDETM MC13783_IRQSTAT0_CHGDETI +#define MC13783_IRQMASK0_CHGOVM MC13783_IRQSTAT0_CHGOVI +#define MC13783_IRQMASK0_CHGREVM MC13783_IRQSTAT0_CHGREVI +#define MC13783_IRQMASK0_CHGSHORTM MC13783_IRQSTAT0_CHGSHORTI +#define MC13783_IRQMASK0_CCCVM MC13783_IRQSTAT0_CCCVI +#define MC13783_IRQMASK0_CHGCURRM MC13783_IRQSTAT0_CHGCURRI +#define MC13783_IRQMASK0_BPONM MC13783_IRQSTAT0_BPONI +#define MC13783_IRQMASK0_LOBATLM MC13783_IRQSTAT0_LOBATLI +#define MC13783_IRQMASK0_LOBATHM MC13783_IRQSTAT0_LOBATHI +#define MC13783_IRQMASK0_UDPM MC13783_IRQSTAT0_UDPI +#define MC13783_IRQMASK0_USBM MC13783_IRQSTAT0_USBI +#define MC13783_IRQMASK0_IDM MC13783_IRQSTAT0_IDI +#define MC13783_IRQMASK0_SE1M MC13783_IRQSTAT0_SE1I +#define MC13783_IRQMASK0_CKDETM MC13783_IRQSTAT0_CKDETI +#define MC13783_IRQMASK0_UDMM MC13783_IRQSTAT0_UDMI + +#define MC13783_IRQSTAT1 3 +#define MC13783_IRQSTAT1_1HZI (1 << 0) +#define MC13783_IRQSTAT1_TODAI (1 << 1) +#define MC13783_IRQSTAT1_ONOFD1I (1 << 3) +#define MC13783_IRQSTAT1_ONOFD2I (1 << 4) +#define MC13783_IRQSTAT1_ONOFD3I (1 << 5) +#define MC13783_IRQSTAT1_SYSRSTI (1 << 6) +#define MC13783_IRQSTAT1_RTCRSTI (1 << 7) +#define MC13783_IRQSTAT1_PCI (1 << 8) +#define MC13783_IRQSTAT1_WARMI (1 << 9) +#define MC13783_IRQSTAT1_MEMHLDI (1 << 10) +#define MC13783_IRQSTAT1_PWRRDYI (1 << 11) +#define MC13783_IRQSTAT1_THWARNLI (1 << 12) +#define MC13783_IRQSTAT1_THWARNHI (1 << 13) +#define MC13783_IRQSTAT1_CLKI (1 << 14) +#define MC13783_IRQSTAT1_SEMAFI (1 << 15) +#define MC13783_IRQSTAT1_MC2BI (1 << 17) +#define MC13783_IRQSTAT1_HSDETI (1 << 18) +#define MC13783_IRQSTAT1_HSLI (1 << 19) +#define MC13783_IRQSTAT1_ALSPTHI (1 << 20) +#define MC13783_IRQSTAT1_AHSSHORTI (1 << 21) + +#define MC13783_IRQMASK1 4 +#define MC13783_IRQMASK1_1HZM MC13783_IRQSTAT1_1HZI +#define MC13783_IRQMASK1_TODAM MC13783_IRQSTAT1_TODAI +#define MC13783_IRQMASK1_ONOFD1M MC13783_IRQSTAT1_ONOFD1I +#define MC13783_IRQMASK1_ONOFD2M MC13783_IRQSTAT1_ONOFD2I +#define MC13783_IRQMASK1_ONOFD3M MC13783_IRQSTAT1_ONOFD3I +#define MC13783_IRQMASK1_SYSRSTM MC13783_IRQSTAT1_SYSRSTI +#define MC13783_IRQMASK1_RTCRSTM MC13783_IRQSTAT1_RTCRSTI +#define MC13783_IRQMASK1_PCM MC13783_IRQSTAT1_PCI +#define MC13783_IRQMASK1_WARMM MC13783_IRQSTAT1_WARMI +#define MC13783_IRQMASK1_MEMHLDM MC13783_IRQSTAT1_MEMHLDI +#define MC13783_IRQMASK1_PWRRDYM MC13783_IRQSTAT1_PWRRDYI +#define MC13783_IRQMASK1_THWARNLM MC13783_IRQSTAT1_THWARNLI +#define MC13783_IRQMASK1_THWARNHM MC13783_IRQSTAT1_THWARNHI +#define MC13783_IRQMASK1_CLKM MC13783_IRQSTAT1_CLKI +#define MC13783_IRQMASK1_SEMAFM MC13783_IRQSTAT1_SEMAFI +#define MC13783_IRQMASK1_MC2BM MC13783_IRQSTAT1_MC2BI +#define MC13783_IRQMASK1_HSDETM MC13783_IRQSTAT1_HSDETI +#define MC13783_IRQMASK1_HSLM MC13783_IRQSTAT1_HSLI +#define MC13783_IRQMASK1_ALSPTHM MC13783_IRQSTAT1_ALSPTHI +#define MC13783_IRQMASK1_AHSSHORTM MC13783_IRQSTAT1_AHSSHORTI + +#define MC13783_ADC1 44 +#define MC13783_ADC1_ADEN (1 << 0) +#define MC13783_ADC1_RAND (1 << 1) +#define MC13783_ADC1_ADSEL (1 << 3) +#define MC13783_ADC1_ASC (1 << 20) +#define MC13783_ADC1_ADTRIGIGN (1 << 21) + +#define MC13783_NUMREGS 0x3f + +void mc13783_lock(struct mc13783 *mc13783) +{ + if (!mutex_trylock(&mc13783->lock)) { + dev_dbg(&mc13783->spidev->dev, "wait for %s from %pf\n", + __func__, __builtin_return_address(0)); + + mutex_lock(&mc13783->lock); + } + dev_dbg(&mc13783->spidev->dev, "%s from %pf\n", + __func__, __builtin_return_address(0)); +} +EXPORT_SYMBOL(mc13783_lock); -#define MC13783_MAX_REG_NUM 0x3f -#define MC13783_FRAME_MASK 0x00ffffff -#define MC13783_MAX_REG_NUM 0x3f -#define MC13783_REG_NUM_SHIFT 0x19 -#define MC13783_WRITE_BIT_SHIFT 31 +void mc13783_unlock(struct mc13783 *mc13783) +{ + dev_dbg(&mc13783->spidev->dev, "%s from %pf\n", + __func__, __builtin_return_address(0)); + mutex_unlock(&mc13783->lock); +} +EXPORT_SYMBOL(mc13783_unlock); -static inline int spi_rw(struct spi_device *spi, u8 * buf, size_t len) +#define MC13783_REGOFFSET_SHIFT 25 +int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val) { - struct spi_transfer t = { - .tx_buf = (const void *)buf, - .rx_buf = buf, - .len = len, - .cs_change = 0, - .delay_usecs = 0, - }; + struct spi_transfer t; struct spi_message m; + int ret; + + BUG_ON(!mutex_is_locked(&mc13783->lock)); + + if (offset > MC13783_NUMREGS) + return -EINVAL; + + *val = offset << MC13783_REGOFFSET_SHIFT; + + memset(&t, 0, sizeof(t)); + + t.tx_buf = val; + t.rx_buf = val; + t.len = sizeof(u32); spi_message_init(&m); spi_message_add_tail(&t, &m); - if (spi_sync(spi, &m) != 0 || m.status != 0) - return -EINVAL; - return len - m.actual_length; -} -static int mc13783_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val) -{ - unsigned int frame = 0; - int ret = 0; + ret = spi_sync(mc13783->spidev, &m); - if (reg_num > MC13783_MAX_REG_NUM) - return -EINVAL; + /* error in message.status implies error return from spi_sync */ + BUG_ON(!ret && m.status); - frame |= reg_num << MC13783_REG_NUM_SHIFT; + if (ret) + return ret; - ret = spi_rw(mc13783->spi_device, (u8 *)&frame, 4); + *val &= 0xffffff; - *reg_val = frame & MC13783_FRAME_MASK; + dev_vdbg(&mc13783->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val); - return ret; + return 0; } +EXPORT_SYMBOL(mc13783_reg_read); -static int mc13783_write(struct mc13783 *mc13783, int reg_num, u32 reg_val) +int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val) { - unsigned int frame = 0; + u32 buf; + struct spi_transfer t; + struct spi_message m; + int ret; + + BUG_ON(!mutex_is_locked(&mc13783->lock)); - if (reg_num > MC13783_MAX_REG_NUM) + dev_vdbg(&mc13783->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val); + + if (offset > MC13783_NUMREGS || val > 0xffffff) return -EINVAL; - frame |= (1 << MC13783_WRITE_BIT_SHIFT); - frame |= reg_num << MC13783_REG_NUM_SHIFT; - frame |= reg_val & MC13783_FRAME_MASK; + buf = 1 << 31 | offset << MC13783_REGOFFSET_SHIFT | val; + + memset(&t, 0, sizeof(t)); - return spi_rw(mc13783->spi_device, (u8 *)&frame, 4); + t.tx_buf = &buf; + t.rx_buf = &buf; + t.len = sizeof(u32); + + spi_message_init(&m); + spi_message_add_tail(&t, &m); + + ret = spi_sync(mc13783->spidev, &m); + + BUG_ON(!ret && m.status); + + if (ret) + return ret; + + return 0; } +EXPORT_SYMBOL(mc13783_reg_write); -int mc13783_reg_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val) +int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset, + u32 mask, u32 val) { int ret; + u32 valread; - mutex_lock(&mc13783->io_lock); - ret = mc13783_read(mc13783, reg_num, reg_val); - mutex_unlock(&mc13783->io_lock); + BUG_ON(val & ~mask); - return ret; + ret = mc13783_reg_read(mc13783, offset, &valread); + if (ret) + return ret; + + valread = (valread & ~mask) | val; + + return mc13783_reg_write(mc13783, offset, valread); } -EXPORT_SYMBOL_GPL(mc13783_reg_read); +EXPORT_SYMBOL(mc13783_reg_rmw); -int mc13783_reg_write(struct mc13783 *mc13783, int reg_num, u32 reg_val) +int mc13783_mask(struct mc13783 *mc13783, int irq) { int ret; + unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1; + u32 irqbit = 1 << (irq < 24 ? irq : irq - 24); + u32 mask; - mutex_lock(&mc13783->io_lock); - ret = mc13783_write(mc13783, reg_num, reg_val); - mutex_unlock(&mc13783->io_lock); + if (irq < 0 || irq >= MC13783_NUM_IRQ) + return -EINVAL; - return ret; + ret = mc13783_reg_read(mc13783, offmask, &mask); + if (ret) + return ret; + + if (mask & irqbit) + /* already masked */ + return 0; + + return mc13783_reg_write(mc13783, offmask, mask | irqbit); } -EXPORT_SYMBOL_GPL(mc13783_reg_write); +EXPORT_SYMBOL(mc13783_mask); -/** - * mc13783_set_bits - Bitmask write - * - * @mc13783: Pointer to mc13783 control structure - * @reg: Register to access - * @mask: Mask of bits to change - * @val: Value to set for masked bits - */ -int mc13783_set_bits(struct mc13783 *mc13783, int reg, u32 mask, u32 val) +int mc13783_unmask(struct mc13783 *mc13783, int irq) { - u32 tmp; int ret; + unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1; + u32 irqbit = 1 << (irq < 24 ? irq : irq - 24); + u32 mask; - mutex_lock(&mc13783->io_lock); + if (irq < 0 || irq >= MC13783_NUM_IRQ) + return -EINVAL; - ret = mc13783_read(mc13783, reg, &tmp); - tmp = (tmp & ~mask) | val; - if (ret == 0) - ret = mc13783_write(mc13783, reg, tmp); + ret = mc13783_reg_read(mc13783, offmask, &mask); + if (ret) + return ret; - mutex_unlock(&mc13783->io_lock); + if (!(mask & irqbit)) + /* already unmasked */ + return 0; - return ret; + return mc13783_reg_write(mc13783, offmask, mask & ~irqbit); } -EXPORT_SYMBOL_GPL(mc13783_set_bits); +EXPORT_SYMBOL(mc13783_unmask); -int mc13783_register_irq(struct mc13783 *mc13783, int irq, - void (*handler) (int, void *), void *data) +int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq, + irq_handler_t handler, const char *name, void *dev) { - if (irq < 0 || irq > MC13783_NUM_IRQ || !handler) + BUG_ON(!mutex_is_locked(&mc13783->lock)); + BUG_ON(!handler); + + if (irq < 0 || irq >= MC13783_NUM_IRQ) return -EINVAL; - if (WARN_ON(mc13783->irq_handler[irq].handler)) + if (mc13783->irqhandler[irq]) return -EBUSY; - mutex_lock(&mc13783->io_lock); - mc13783->irq_handler[irq].handler = handler; - mc13783->irq_handler[irq].data = data; - mutex_unlock(&mc13783->io_lock); + mc13783->irqhandler[irq] = handler; + mc13783->irqdata[irq] = dev; return 0; } -EXPORT_SYMBOL_GPL(mc13783_register_irq); +EXPORT_SYMBOL(mc13783_irq_request_nounmask); -int mc13783_free_irq(struct mc13783 *mc13783, int irq) +int mc13783_irq_request(struct mc13783 *mc13783, int irq, + irq_handler_t handler, const char *name, void *dev) { - if (irq < 0 || irq > MC13783_NUM_IRQ) + int ret; + + ret = mc13783_irq_request_nounmask(mc13783, irq, handler, name, dev); + if (ret) + return ret; + + ret = mc13783_unmask(mc13783, irq); + if (ret) { + mc13783->irqhandler[irq] = NULL; + mc13783->irqdata[irq] = NULL; + return ret; + } + + return 0; +} +EXPORT_SYMBOL(mc13783_irq_request); + +int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev) +{ + int ret; + BUG_ON(!mutex_is_locked(&mc13783->lock)); + + if (irq < 0 || irq >= MC13783_NUM_IRQ || !mc13783->irqhandler[irq] || + mc13783->irqdata[irq] != dev) return -EINVAL; - mutex_lock(&mc13783->io_lock); - mc13783->irq_handler[irq].handler = NULL; - mutex_unlock(&mc13783->io_lock); + ret = mc13783_mask(mc13783, irq); + if (ret) + return ret; + + mc13783->irqhandler[irq] = NULL; + mc13783->irqdata[irq] = NULL; return 0; } -EXPORT_SYMBOL_GPL(mc13783_free_irq); +EXPORT_SYMBOL(mc13783_irq_free); -static void mc13783_irq_work(struct work_struct *work) +static inline irqreturn_t mc13783_irqhandler(struct mc13783 *mc13783, int irq) { - struct mc13783 *mc13783 = container_of(work, struct mc13783, work); - int i; - unsigned int adc_sts; - - /* check if the adc has finished any completion */ - mc13783_reg_read(mc13783, MC13783_REG_INTERRUPT_STATUS_0, &adc_sts); - mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0, - adc_sts & MC13783_INT_STAT_ADCDONEI); - - if (adc_sts & MC13783_INT_STAT_ADCDONEI) - complete_all(&mc13783->adc_done); - - for (i = 0; i < MC13783_NUM_IRQ; i++) - if (mc13783->irq_handler[i].handler) - mc13783->irq_handler[i].handler(i, - mc13783->irq_handler[i].data); - enable_irq(mc13783->irq); + return mc13783->irqhandler[irq](irq, mc13783->irqdata[irq]); } -static irqreturn_t mc13783_interrupt(int irq, void *dev_id) +int mc13783_ackirq(struct mc13783 *mc13783, int irq) { - struct mc13783 *mc13783 = dev_id; + unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1; + unsigned int val = 1 << (irq < 24 ? irq : irq - 24); - disable_irq_nosync(irq); + BUG_ON(irq < 0 || irq >= MC13783_NUM_IRQ); - schedule_work(&mc13783->work); - return IRQ_HANDLED; + return mc13783_reg_write(mc13783, offstat, val); } +EXPORT_SYMBOL(mc13783_ackirq); -/* set adc to ts interrupt mode, which generates touchscreen wakeup interrupt */ -static inline void mc13783_adc_set_ts_irq_mode(struct mc13783 *mc13783) +/* + * returns: number of handled irqs or negative error + * locking: holds mc13783->lock + */ +static int mc13783_irq_handle(struct mc13783 *mc13783, + unsigned int offstat, unsigned int offmask, int baseirq) { - unsigned int reg_adc0, reg_adc1; + u32 stat, mask; + int ret = mc13783_reg_read(mc13783, offstat, &stat); + int num_handled = 0; + + if (ret) + return ret; + + ret = mc13783_reg_read(mc13783, offmask, &mask); + if (ret) + return ret; + + while (stat & ~mask) { + int irq = __ffs(stat & ~mask); + + stat &= ~(1 << irq); + + if (likely(mc13783->irqhandler[baseirq + irq])) { + irqreturn_t handled; - reg_adc0 = MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE - | MC13783_ADC0_TSMOD0; - reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN; + handled = mc13783_irqhandler(mc13783, baseirq + irq); + if (handled == IRQ_HANDLED) + num_handled++; + } else { + dev_err(&mc13783->spidev->dev, + "BUG: irq %u but no handler\n", + baseirq + irq); - mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0); - mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1); + mask |= 1 << irq; + + ret = mc13783_reg_write(mc13783, offmask, mask); + } + } + + return num_handled; } +static irqreturn_t mc13783_irq_thread(int irq, void *data) +{ + struct mc13783 *mc13783 = data; + irqreturn_t ret; + int handled = 0; + + mc13783_lock(mc13783); + + ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT0, + MC13783_IRQMASK0, MC13783_IRQ_ADCDONE); + if (ret > 0) + handled = 1; + + ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT1, + MC13783_IRQMASK1, MC13783_IRQ_1HZ); + if (ret > 0) + handled = 1; + + mc13783_unlock(mc13783); + + return IRQ_RETVAL(handled); +} + +#define MC13783_ADC1_CHAN0_SHIFT 5 +#define MC13783_ADC1_CHAN1_SHIFT 8 + +struct mc13783_adcdone_data { + struct mc13783 *mc13783; + struct completion done; +}; + +static irqreturn_t mc13783_handler_adcdone(int irq, void *data) +{ + struct mc13783_adcdone_data *adcdone_data = data; + + mc13783_ackirq(adcdone_data->mc13783, irq); + + complete_all(&adcdone_data->done); + + return IRQ_HANDLED; +} + +#define MC13783_ADC_WORKING (1 << 16) + int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode, unsigned int channel, unsigned int *sample) { - unsigned int reg_adc0, reg_adc1; - int i; + u32 adc0, adc1, old_adc0; + int i, ret; + struct mc13783_adcdone_data adcdone_data = { + .mc13783 = mc13783, + }; + init_completion(&adcdone_data.done); + + dev_dbg(&mc13783->spidev->dev, "%s\n", __func__); + + mc13783_lock(mc13783); + + if (mc13783->flags & MC13783_ADC_WORKING) { + ret = -EBUSY; + goto out; + } + + mc13783->flags |= MC13783_ADC_WORKING; - mutex_lock(&mc13783->adc_conv_lock); + mc13783_reg_read(mc13783, MC13783_ADC0, &old_adc0); - /* set up auto incrementing anyway to make quick read */ - reg_adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2; - /* enable the adc, ignore external triggering and set ASC to trigger - * conversion */ - reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN - | MC13783_ADC1_ASC; + adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2; + adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN | MC13783_ADC1_ASC; - /* setup channel number */ if (channel > 7) - reg_adc1 |= MC13783_ADC1_ADSEL; + adc1 |= MC13783_ADC1_ADSEL; switch (mode) { case MC13783_ADC_MODE_TS: - /* enables touch screen reference mode and set touchscreen mode - * to position mode */ - reg_adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE + adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE | MC13783_ADC0_TSMOD0 | MC13783_ADC0_TSMOD1; - reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT; + adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT; break; + case MC13783_ADC_MODE_SINGLE_CHAN: - reg_adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT; - reg_adc1 |= MC13783_ADC1_RAND; + adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK; + adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT; + adc1 |= MC13783_ADC1_RAND; break; + case MC13783_ADC_MODE_MULT_CHAN: - reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT; + adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK; + adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT; break; + default: + mc13783_unlock(mc13783); return -EINVAL; } - mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0); - mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1); + dev_dbg(&mc13783->spidev->dev, "%s: request irq\n", __func__); + mc13783_irq_request(mc13783, MC13783_IRQ_ADCDONE, + mc13783_handler_adcdone, __func__, &adcdone_data); + mc13783_ackirq(mc13783, MC13783_IRQ_ADCDONE); - wait_for_completion_interruptible(&mc13783->adc_done); + mc13783_reg_write(mc13783, MC13783_REG_ADC_0, adc0); + mc13783_reg_write(mc13783, MC13783_REG_ADC_1, adc1); - for (i = 0; i < 4; i++) - mc13783_reg_read(mc13783, MC13783_REG_ADC_2, &sample[i]); + mc13783_unlock(mc13783); - if (mc13783->ts_active) - mc13783_adc_set_ts_irq_mode(mc13783); + ret = wait_for_completion_interruptible_timeout(&adcdone_data.done, HZ); - mutex_unlock(&mc13783->adc_conv_lock); + if (!ret) + ret = -ETIMEDOUT; - return 0; + mc13783_lock(mc13783); + + mc13783_irq_free(mc13783, MC13783_IRQ_ADCDONE, &adcdone_data); + + if (ret > 0) + for (i = 0; i < 4; ++i) { + ret = mc13783_reg_read(mc13783, + MC13783_REG_ADC_2, &sample[i]); + if (ret) + break; + } + + if (mode == MC13783_ADC_MODE_TS) + /* restore TSMOD */ + mc13783_reg_write(mc13783, MC13783_REG_ADC_0, old_adc0); + + mc13783->flags &= ~MC13783_ADC_WORKING; +out: + mc13783_unlock(mc13783); + + return ret; } EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion); -void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status) +static int mc13783_add_subdevice_pdata(struct mc13783 *mc13783, + const char *name, void *pdata, size_t pdata_size) { - mc13783->ts_active = status; + struct mfd_cell cell = { + .name = name, + .platform_data = pdata, + .data_size = pdata_size, + }; + + return mfd_add_devices(&mc13783->spidev->dev, -1, &cell, 1, NULL, 0); +} + +static int mc13783_add_subdevice(struct mc13783 *mc13783, const char *name) +{ + return mc13783_add_subdevice_pdata(mc13783, name, NULL, 0); } -EXPORT_SYMBOL_GPL(mc13783_adc_set_ts_status); static int mc13783_check_revision(struct mc13783 *mc13783) { u32 rev_id, rev1, rev2, finid, icid; - mc13783_read(mc13783, MC13783_REG_REVISION, &rev_id); + mc13783_reg_read(mc13783, MC13783_REG_REVISION, &rev_id); rev1 = (rev_id & 0x018) >> 3; rev2 = (rev_id & 0x007); @@ -292,38 +555,24 @@ static int mc13783_check_revision(struct mc13783 *mc13783) rev1 = 3; if (rev1 == 0 || icid != 2) { - dev_err(mc13783->dev, "No MC13783 detected.\n"); + dev_err(&mc13783->spidev->dev, "No MC13783 detected.\n"); return -ENODEV; } - mc13783->revision = ((rev1 * 10) + rev2); - dev_info(mc13783->dev, "MC13783 Rev %d.%d FinVer %x detected\n", rev1, - rev2, finid); + dev_info(&mc13783->spidev->dev, + "MC13783 Rev %d.%d FinVer %x detected\n", + rev1, rev2, finid); return 0; } -/* - * Register a client device. This is non-fatal since there is no need to - * fail the entire device init due to a single platform device failing. - */ -static void mc13783_client_dev_register(struct mc13783 *mc13783, - const char *name) -{ - struct mfd_cell cell = {}; - - cell.name = name; - - mfd_add_devices(mc13783->dev, -1, &cell, 1, NULL, 0); -} - -static int __devinit mc13783_probe(struct spi_device *spi) +static int mc13783_probe(struct spi_device *spi) { struct mc13783 *mc13783; - struct mc13783_platform_data *pdata = spi->dev.platform_data; + struct mc13783_platform_data *pdata = dev_get_platdata(&spi->dev); int ret; - mc13783 = kzalloc(sizeof(struct mc13783), GFP_KERNEL); + mc13783 = kzalloc(sizeof(*mc13783), GFP_KERNEL); if (!mc13783) return -ENOMEM; @@ -332,96 +581,104 @@ static int __devinit mc13783_probe(struct spi_device *spi) spi->bits_per_word = 32; spi_setup(spi); - mc13783->spi_device = spi; - mc13783->dev = &spi->dev; - mc13783->irq = spi->irq; + mc13783->spidev = spi; + + mutex_init(&mc13783->lock); + mc13783_lock(mc13783); + + ret = mc13783_check_revision(mc13783); + if (ret) + goto err_revision; + + /* mask all irqs */ + ret = mc13783_reg_write(mc13783, MC13783_IRQMASK0, 0x00ffffff); + if (ret) + goto err_mask; - INIT_WORK(&mc13783->work, mc13783_irq_work); - mutex_init(&mc13783->io_lock); - mutex_init(&mc13783->adc_conv_lock); - init_completion(&mc13783->adc_done); + ret = mc13783_reg_write(mc13783, MC13783_IRQMASK1, 0x00ffffff); + if (ret) + goto err_mask; + + ret = request_threaded_irq(spi->irq, NULL, mc13783_irq_thread, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13783", mc13783); + + if (ret) { +err_mask: +err_revision: + mutex_unlock(&mc13783->lock); + dev_set_drvdata(&spi->dev, NULL); + kfree(mc13783); + return ret; + } + /* This should go away (BEGIN) */ if (pdata) { mc13783->flags = pdata->flags; mc13783->regulators = pdata->regulators; mc13783->num_regulators = pdata->num_regulators; } + /* This should go away (END) */ - if (mc13783_check_revision(mc13783)) { - ret = -ENODEV; - goto err_out; + if (pdata->flags & MC13783_USE_ADC) + mc13783_add_subdevice(mc13783, "mc13783-adc"); + + if (pdata->flags & MC13783_USE_CODEC) + mc13783_add_subdevice(mc13783, "mc13783-codec"); + + if (pdata->flags & MC13783_USE_REGULATOR) { + struct mc13783_regulator_platform_data regulator_pdata = { + .num_regulators = pdata->num_regulators, + .regulators = pdata->regulators, + }; + + mc13783_add_subdevice_pdata(mc13783, "mc13783-regulator", + ®ulator_pdata, sizeof(regulator_pdata)); } - /* clear and mask all interrupts */ - mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0, 0x00ffffff); - mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_0, 0x00ffffff); - mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_1, 0x00ffffff); - mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_1, 0x00ffffff); + if (pdata->flags & MC13783_USE_RTC) + mc13783_add_subdevice(mc13783, "mc13783-rtc"); - /* unmask adcdone interrupts */ - mc13783_set_bits(mc13783, MC13783_REG_INTERRUPT_MASK_0, - MC13783_INT_MASK_ADCDONEM, 0); + if (pdata->flags & MC13783_USE_TOUCHSCREEN) + mc13783_add_subdevice(mc13783, "mc13783-ts"); - ret = request_irq(mc13783->irq, mc13783_interrupt, - IRQF_DISABLED | IRQF_TRIGGER_HIGH, "mc13783", - mc13783); - if (ret) - goto err_out; - - if (mc13783->flags & MC13783_USE_CODEC) - mc13783_client_dev_register(mc13783, "mc13783-codec"); - if (mc13783->flags & MC13783_USE_ADC) - mc13783_client_dev_register(mc13783, "mc13783-adc"); - if (mc13783->flags & MC13783_USE_RTC) - mc13783_client_dev_register(mc13783, "mc13783-rtc"); - if (mc13783->flags & MC13783_USE_REGULATOR) - mc13783_client_dev_register(mc13783, "mc13783-regulator"); - if (mc13783->flags & MC13783_USE_TOUCHSCREEN) - mc13783_client_dev_register(mc13783, "mc13783-ts"); + mc13783_unlock(mc13783); return 0; - -err_out: - kfree(mc13783); - return ret; } static int __devexit mc13783_remove(struct spi_device *spi) { - struct mc13783 *mc13783; + struct mc13783 *mc13783 = dev_get_drvdata(&spi->dev); - mc13783 = dev_get_drvdata(&spi->dev); - - free_irq(mc13783->irq, mc13783); + free_irq(mc13783->spidev->irq, mc13783); mfd_remove_devices(&spi->dev); return 0; } -static struct spi_driver pmic_driver = { +static struct spi_driver mc13783_driver = { .driver = { - .name = "mc13783", - .bus = &spi_bus_type, - .owner = THIS_MODULE, + .name = "mc13783", + .bus = &spi_bus_type, + .owner = THIS_MODULE, }, .probe = mc13783_probe, .remove = __devexit_p(mc13783_remove), }; -static int __init pmic_init(void) +static int __init mc13783_init(void) { - return spi_register_driver(&pmic_driver); + return spi_register_driver(&mc13783_driver); } -subsys_initcall(pmic_init); +subsys_initcall(mc13783_init); -static void __exit pmic_exit(void) +static void __exit mc13783_exit(void) { - spi_unregister_driver(&pmic_driver); + spi_unregister_driver(&mc13783_driver); } -module_exit(pmic_exit); - -MODULE_DESCRIPTION("Core/Protocol driver for Freescale MC13783 PMIC"); -MODULE_AUTHOR("Sascha Hauer "); -MODULE_LICENSE("GPL"); +module_exit(mc13783_exit); +MODULE_DESCRIPTION("Core driver for Freescale MC13783 PMIC"); +MODULE_AUTHOR("Uwe Kleine-Koenig "); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h index 47e698cb0f16..95cf9360553f 100644 --- a/include/linux/mfd/mc13783-private.h +++ b/include/linux/mfd/mc13783-private.h @@ -24,52 +24,23 @@ #include #include -#include #include - -struct mc13783_irq { - void (*handler)(int, void *); - void *data; -}; - -#define MC13783_NUM_IRQ 2 -#define MC13783_IRQ_TS 0 -#define MC13783_IRQ_REGULATOR 1 - -#define MC13783_ADC_MODE_TS 1 -#define MC13783_ADC_MODE_SINGLE_CHAN 2 -#define MC13783_ADC_MODE_MULT_CHAN 3 +#include struct mc13783 { - int revision; - struct device *dev; - struct spi_device *spi_device; - - int (*read_dev)(void *data, char reg, int count, u32 *dst); - int (*write_dev)(void *data, char reg, int count, const u32 *src); - - struct mutex io_lock; - void *io_data; + struct spi_device *spidev; + struct mutex lock; int irq; - unsigned int flags; + int flags; - struct mc13783_irq irq_handler[MC13783_NUM_IRQ]; - struct work_struct work; - struct completion adc_done; - unsigned int ts_active; - struct mutex adc_conv_lock; + irq_handler_t irqhandler[MC13783_NUM_IRQ]; + void *irqdata[MC13783_NUM_IRQ]; + /* XXX these should go as platformdata to the regulator subdevice */ struct mc13783_regulator_init_data *regulators; int num_regulators; }; -int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *); -int mc13783_reg_write(struct mc13783 *, int, u32); -int mc13783_set_bits(struct mc13783 *, int, u32, u32); -int mc13783_free_irq(struct mc13783 *mc13783, int irq); -int mc13783_register_irq(struct mc13783 *mc13783, int irq, - void (*handler) (int, void *), void *data); - #define MC13783_REG_INTERRUPT_STATUS_0 0 #define MC13783_REG_INTERRUPT_MASK_0 1 #define MC13783_REG_INTERRUPT_SENSE_0 2 @@ -136,55 +107,6 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq, #define MC13783_REG_TEST_3 63 #define MC13783_REG_NB 64 - -/* - * Interrupt Status - */ -#define MC13783_INT_STAT_ADCDONEI (1 << 0) -#define MC13783_INT_STAT_ADCBISDONEI (1 << 1) -#define MC13783_INT_STAT_TSI (1 << 2) -#define MC13783_INT_STAT_WHIGHI (1 << 3) -#define MC13783_INT_STAT_WLOWI (1 << 4) -#define MC13783_INT_STAT_CHGDETI (1 << 6) -#define MC13783_INT_STAT_CHGOVI (1 << 7) -#define MC13783_INT_STAT_CHGREVI (1 << 8) -#define MC13783_INT_STAT_CHGSHORTI (1 << 9) -#define MC13783_INT_STAT_CCCVI (1 << 10) -#define MC13783_INT_STAT_CHGCURRI (1 << 11) -#define MC13783_INT_STAT_BPONI (1 << 12) -#define MC13783_INT_STAT_LOBATLI (1 << 13) -#define MC13783_INT_STAT_LOBATHI (1 << 14) -#define MC13783_INT_STAT_UDPI (1 << 15) -#define MC13783_INT_STAT_USBI (1 << 16) -#define MC13783_INT_STAT_IDI (1 << 19) -#define MC13783_INT_STAT_Unused (1 << 20) -#define MC13783_INT_STAT_SE1I (1 << 21) -#define MC13783_INT_STAT_CKDETI (1 << 22) -#define MC13783_INT_STAT_UDMI (1 << 23) - -/* - * Interrupt Mask - */ -#define MC13783_INT_MASK_ADCDONEM (1 << 0) -#define MC13783_INT_MASK_ADCBISDONEM (1 << 1) -#define MC13783_INT_MASK_TSM (1 << 2) -#define MC13783_INT_MASK_WHIGHM (1 << 3) -#define MC13783_INT_MASK_WLOWM (1 << 4) -#define MC13783_INT_MASK_CHGDETM (1 << 6) -#define MC13783_INT_MASK_CHGOVM (1 << 7) -#define MC13783_INT_MASK_CHGREVM (1 << 8) -#define MC13783_INT_MASK_CHGSHORTM (1 << 9) -#define MC13783_INT_MASK_CCCVM (1 << 10) -#define MC13783_INT_MASK_CHGCURRM (1 << 11) -#define MC13783_INT_MASK_BPONM (1 << 12) -#define MC13783_INT_MASK_LOBATLM (1 << 13) -#define MC13783_INT_MASK_LOBATHM (1 << 14) -#define MC13783_INT_MASK_UDPM (1 << 15) -#define MC13783_INT_MASK_USBM (1 << 16) -#define MC13783_INT_MASK_IDM (1 << 19) -#define MC13783_INT_MASK_SE1M (1 << 21) -#define MC13783_INT_MASK_CKDETM (1 << 22) - /* * Reg Regulator Mode 0 */ @@ -284,113 +206,15 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq, #define MC13783_SWCTRL_SW3_STBY (1 << 21) #define MC13783_SWCTRL_SW3_MODE (1 << 22) -/* - * ADC/Touch - */ -#define MC13783_ADC0_LICELLCON (1 << 0) -#define MC13783_ADC0_CHRGICON (1 << 1) -#define MC13783_ADC0_BATICON (1 << 2) -#define MC13783_ADC0_RTHEN (1 << 3) -#define MC13783_ADC0_DTHEN (1 << 4) -#define MC13783_ADC0_UIDEN (1 << 5) -#define MC13783_ADC0_ADOUTEN (1 << 6) -#define MC13783_ADC0_ADOUTPER (1 << 7) -#define MC13783_ADC0_ADREFEN (1 << 10) -#define MC13783_ADC0_ADREFMODE (1 << 11) -#define MC13783_ADC0_TSMOD0 (1 << 12) -#define MC13783_ADC0_TSMOD1 (1 << 13) -#define MC13783_ADC0_TSMOD2 (1 << 14) -#define MC13783_ADC0_CHRGRAWDIV (1 << 15) -#define MC13783_ADC0_ADINC1 (1 << 16) -#define MC13783_ADC0_ADINC2 (1 << 17) -#define MC13783_ADC0_WCOMP (1 << 18) -#define MC13783_ADC0_ADCBIS0 (1 << 23) - -#define MC13783_ADC1_ADEN (1 << 0) -#define MC13783_ADC1_RAND (1 << 1) -#define MC13783_ADC1_ADSEL (1 << 3) -#define MC13783_ADC1_TRIGMASK (1 << 4) -#define MC13783_ADC1_ADA10 (1 << 5) -#define MC13783_ADC1_ADA11 (1 << 6) -#define MC13783_ADC1_ADA12 (1 << 7) -#define MC13783_ADC1_ADA20 (1 << 8) -#define MC13783_ADC1_ADA21 (1 << 9) -#define MC13783_ADC1_ADA22 (1 << 10) -#define MC13783_ADC1_ATO0 (1 << 11) -#define MC13783_ADC1_ATO1 (1 << 12) -#define MC13783_ADC1_ATO2 (1 << 13) -#define MC13783_ADC1_ATO3 (1 << 14) -#define MC13783_ADC1_ATO4 (1 << 15) -#define MC13783_ADC1_ATO5 (1 << 16) -#define MC13783_ADC1_ATO6 (1 << 17) -#define MC13783_ADC1_ATO7 (1 << 18) -#define MC13783_ADC1_ATOX (1 << 19) -#define MC13783_ADC1_ASC (1 << 20) -#define MC13783_ADC1_ADTRIGIGN (1 << 21) -#define MC13783_ADC1_ADONESHOT (1 << 22) -#define MC13783_ADC1_ADCBIS1 (1 << 23) - -#define MC13783_ADC1_CHAN0_SHIFT 5 -#define MC13783_ADC1_CHAN1_SHIFT 8 - -#define MC13783_ADC2_ADD10 (1 << 2) -#define MC13783_ADC2_ADD11 (1 << 3) -#define MC13783_ADC2_ADD12 (1 << 4) -#define MC13783_ADC2_ADD13 (1 << 5) -#define MC13783_ADC2_ADD14 (1 << 6) -#define MC13783_ADC2_ADD15 (1 << 7) -#define MC13783_ADC2_ADD16 (1 << 8) -#define MC13783_ADC2_ADD17 (1 << 9) -#define MC13783_ADC2_ADD18 (1 << 10) -#define MC13783_ADC2_ADD19 (1 << 11) -#define MC13783_ADC2_ADD20 (1 << 14) -#define MC13783_ADC2_ADD21 (1 << 15) -#define MC13783_ADC2_ADD22 (1 << 16) -#define MC13783_ADC2_ADD23 (1 << 17) -#define MC13783_ADC2_ADD24 (1 << 18) -#define MC13783_ADC2_ADD25 (1 << 19) -#define MC13783_ADC2_ADD26 (1 << 20) -#define MC13783_ADC2_ADD27 (1 << 21) -#define MC13783_ADC2_ADD28 (1 << 22) -#define MC13783_ADC2_ADD29 (1 << 23) +static inline int mc13783_set_bits(struct mc13783 *mc13783, unsigned int offset, + u32 mask, u32 val) +{ + int ret; + mc13783_lock(mc13783); + ret = mc13783_reg_rmw(mc13783, offset, mask, val); + mc13783_unlock(mc13783); -#define MC13783_ADC3_WHIGH0 (1 << 0) -#define MC13783_ADC3_WHIGH1 (1 << 1) -#define MC13783_ADC3_WHIGH2 (1 << 2) -#define MC13783_ADC3_WHIGH3 (1 << 3) -#define MC13783_ADC3_WHIGH4 (1 << 4) -#define MC13783_ADC3_WHIGH5 (1 << 5) -#define MC13783_ADC3_ICID0 (1 << 6) -#define MC13783_ADC3_ICID1 (1 << 7) -#define MC13783_ADC3_ICID2 (1 << 8) -#define MC13783_ADC3_WLOW0 (1 << 9) -#define MC13783_ADC3_WLOW1 (1 << 10) -#define MC13783_ADC3_WLOW2 (1 << 11) -#define MC13783_ADC3_WLOW3 (1 << 12) -#define MC13783_ADC3_WLOW4 (1 << 13) -#define MC13783_ADC3_WLOW5 (1 << 14) -#define MC13783_ADC3_ADCBIS2 (1 << 23) - -#define MC13783_ADC4_ADDBIS10 (1 << 2) -#define MC13783_ADC4_ADDBIS11 (1 << 3) -#define MC13783_ADC4_ADDBIS12 (1 << 4) -#define MC13783_ADC4_ADDBIS13 (1 << 5) -#define MC13783_ADC4_ADDBIS14 (1 << 6) -#define MC13783_ADC4_ADDBIS15 (1 << 7) -#define MC13783_ADC4_ADDBIS16 (1 << 8) -#define MC13783_ADC4_ADDBIS17 (1 << 9) -#define MC13783_ADC4_ADDBIS18 (1 << 10) -#define MC13783_ADC4_ADDBIS19 (1 << 11) -#define MC13783_ADC4_ADDBIS20 (1 << 14) -#define MC13783_ADC4_ADDBIS21 (1 << 15) -#define MC13783_ADC4_ADDBIS22 (1 << 16) -#define MC13783_ADC4_ADDBIS23 (1 << 17) -#define MC13783_ADC4_ADDBIS24 (1 << 18) -#define MC13783_ADC4_ADDBIS25 (1 << 19) -#define MC13783_ADC4_ADDBIS26 (1 << 20) -#define MC13783_ADC4_ADDBIS27 (1 << 21) -#define MC13783_ADC4_ADDBIS28 (1 << 22) -#define MC13783_ADC4_ADDBIS29 (1 << 23) + return ret; +} #endif /* __LINUX_MFD_MC13783_PRIV_H */ - diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h index b3a2a7243573..35680409b8cf 100644 --- a/include/linux/mfd/mc13783.h +++ b/include/linux/mfd/mc13783.h @@ -1,28 +1,50 @@ /* - * Copyright 2009 Pengutronix, Sascha Hauer + * Copyright 2009 Pengutronix + * Uwe Kleine-Koenig * - * Initial development of this code was funded by - * Phytec Messtechnik GmbH, http://www.phytec.de - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. */ +#ifndef __LINUX_MFD_MC13783_H +#define __LINUX_MFD_MC13783_H -#ifndef __INCLUDE_LINUX_MFD_MC13783_H -#define __INCLUDE_LINUX_MFD_MC13783_H +#include struct mc13783; + +void mc13783_lock(struct mc13783 *mc13783); +void mc13783_unlock(struct mc13783 *mc13783); + +int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val); +int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val); +int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset, + u32 mask, u32 val); + +int mc13783_irq_request(struct mc13783 *mc13783, int irq, + irq_handler_t handler, const char *name, void *dev); +int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq, + irq_handler_t handler, const char *name, void *dev); +int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev); +int mc13783_ackirq(struct mc13783 *mc13783, int irq); + +int mc13783_mask(struct mc13783 *mc13783, int irq); +int mc13783_unmask(struct mc13783 *mc13783, int irq); + +#define MC13783_ADC0 43 +#define MC13783_ADC0_ADREFEN (1 << 10) +#define MC13783_ADC0_ADREFMODE (1 << 11) +#define MC13783_ADC0_TSMOD0 (1 << 12) +#define MC13783_ADC0_TSMOD1 (1 << 13) +#define MC13783_ADC0_TSMOD2 (1 << 14) +#define MC13783_ADC0_ADINC1 (1 << 16) +#define MC13783_ADC0_ADINC2 (1 << 17) + +#define MC13783_ADC0_TSMOD_MASK (MC13783_ADC0_TSMOD0 | \ + MC13783_ADC0_TSMOD1 | \ + MC13783_ADC0_TSMOD2) + +/* to be cleaned up */ struct regulator_init_data; struct mc13783_regulator_init_data { @@ -30,23 +52,30 @@ struct mc13783_regulator_init_data { struct regulator_init_data *init_data; }; -struct mc13783_platform_data { - struct mc13783_regulator_init_data *regulators; +struct mc13783_regulator_platform_data { int num_regulators; - unsigned int flags; + struct mc13783_regulator_init_data *regulators; }; -/* mc13783_platform_data flags */ +struct mc13783_platform_data { + int num_regulators; + struct mc13783_regulator_init_data *regulators; + #define MC13783_USE_TOUCHSCREEN (1 << 0) #define MC13783_USE_CODEC (1 << 1) #define MC13783_USE_ADC (1 << 2) #define MC13783_USE_RTC (1 << 3) #define MC13783_USE_REGULATOR (1 << 4) + unsigned int flags; +}; + +#define MC13783_ADC_MODE_TS 1 +#define MC13783_ADC_MODE_SINGLE_CHAN 2 +#define MC13783_ADC_MODE_MULT_CHAN 3 int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode, unsigned int channel, unsigned int *sample); -void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status); #define MC13783_SW_SW1A 0 #define MC13783_SW_SW1B 1 @@ -80,5 +109,46 @@ void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status); #define MC13783_REGU_V3 29 #define MC13783_REGU_V4 30 -#endif /* __INCLUDE_LINUX_MFD_MC13783_H */ +#define MC13783_IRQ_ADCDONE 0 +#define MC13783_IRQ_ADCBISDONE 1 +#define MC13783_IRQ_TS 2 +#define MC13783_IRQ_WHIGH 3 +#define MC13783_IRQ_WLOW 4 +#define MC13783_IRQ_CHGDET 6 +#define MC13783_IRQ_CHGOV 7 +#define MC13783_IRQ_CHGREV 8 +#define MC13783_IRQ_CHGSHORT 9 +#define MC13783_IRQ_CCCV 10 +#define MC13783_IRQ_CHGCURR 11 +#define MC13783_IRQ_BPON 12 +#define MC13783_IRQ_LOBATL 13 +#define MC13783_IRQ_LOBATH 14 +#define MC13783_IRQ_UDP 15 +#define MC13783_IRQ_USB 16 +#define MC13783_IRQ_ID 19 +#define MC13783_IRQ_SE1 21 +#define MC13783_IRQ_CKDET 22 +#define MC13783_IRQ_UDM 23 +#define MC13783_IRQ_1HZ 24 +#define MC13783_IRQ_TODA 25 +#define MC13783_IRQ_ONOFD1 27 +#define MC13783_IRQ_ONOFD2 28 +#define MC13783_IRQ_ONOFD3 29 +#define MC13783_IRQ_SYSRST 30 +#define MC13783_IRQ_RTCRST 31 +#define MC13783_IRQ_PC 32 +#define MC13783_IRQ_WARM 33 +#define MC13783_IRQ_MEMHLD 34 +#define MC13783_IRQ_PWRRDY 35 +#define MC13783_IRQ_THWARNL 36 +#define MC13783_IRQ_THWARNH 37 +#define MC13783_IRQ_CLK 38 +#define MC13783_IRQ_SEMAF 39 +#define MC13783_IRQ_MC2B 41 +#define MC13783_IRQ_HSDET 42 +#define MC13783_IRQ_HSL 43 +#define MC13783_IRQ_ALSPTH 44 +#define MC13783_IRQ_AHSSHORT 45 +#define MC13783_NUM_IRQ 46 +#endif /* __LINUX_MFD_MC13783_H */ -- cgit v1.2.3 From ed89a7551c65e1db44f1a6b7be6efce178fc87d0 Mon Sep 17 00:00:00 2001 From: Antonio Ospite Date: Sun, 29 Nov 2009 13:36:10 +0100 Subject: mfd: Remove ezx-pcap defines for custom led gpio encoding We used these, in a first version of leds-pcap driver, in order to encode gpio enabling and gpio inversion for a led inside the variable used for the gpio number. In the new leds-pcap driver we rely on gpio_is_valid() to derive if a led is gpio enabled and we have a dedicated flag to tell if the gpio value has to be inverted. Signed-off-by: Antonio Ospite Signed-off-by: Samuel Ortiz --- include/linux/mfd/ezx-pcap.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index 3402042ddc31..40c372165f3e 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -231,9 +231,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32); #define PCAP_LED_4MA 1 #define PCAP_LED_5MA 2 #define PCAP_LED_9MA 3 -#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff -#define PCAP_LED_GPIO_EN 0x01000000 -#define PCAP_LED_GPIO_INVERT 0x02000000 #define PCAP_LED_T_MASK 0xf #define PCAP_LED_C_MASK 0x3 #define PCAP_BL_MASK 0x1f -- cgit v1.2.3 From ab4abe056d8828341d2a7d6463b13eafaf210181 Mon Sep 17 00:00:00 2001 From: Juha Keski-Saari Date: Fri, 11 Dec 2009 11:12:15 +0100 Subject: mfd: Add all twl4030 regulators to the twl4030 mfd driver Add all twl4030 regulators to the twl4030 mfd driver and twl4030_platform_data Signed-off-by: Juha Keski-Saari Acked-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/twl4030-core.c | 26 ++++++++++++++++++++++++-- include/linux/i2c/twl4030.h | 8 ++++++-- 2 files changed, 30 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index 334c86fccede..e4a5d489b8e7 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c @@ -625,11 +625,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) } if (twl_has_regulator()) { - /* child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); if (IS_ERR(child)) return PTR_ERR(child); - */ + + child = add_regulator(TWL4030_REG_VIO, pdata->vio); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2); + if (IS_ERR(child)) + return PTR_ERR(child); child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1); if (IS_ERR(child)) @@ -645,6 +655,18 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) pdata->vaux2); if (IS_ERR(child)) return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig); + if (IS_ERR(child)) + return PTR_ERR(child); } /* maybe add LDOs that are omitted on cost-reduced parts */ diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index efa62eb497b8..a50bcf8a4048 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -484,8 +484,12 @@ struct twl4030_platform_data { struct regulator_init_data *vaux2; struct regulator_init_data *vaux3; struct regulator_init_data *vaux4; - - /* REVISIT more to come ... _nothing_ should be hard-wired */ + struct regulator_init_data *vio; + struct regulator_init_data *vdd1; + struct regulator_init_data *vdd2; + struct regulator_init_data *vintana1; + struct regulator_init_data *vintana2; + struct regulator_init_data *vintdig; }; /*----------------------------------------------------------------------*/ -- cgit v1.2.3 From b07682b6056eb6701f8cb86aa5800e6f2ea7919b Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 13 Dec 2009 20:05:51 +0100 Subject: mfd: Rename twl4030* driver files to enable re-use The upcoming TWL6030 is companion chip for OMAP4 like the current TWL4030 for OMAP3. The common modules like RTC, Regulator creates opportunity to re-use the most of the code from twl4030. This patch renames few common drivers twl4030* files to twl* to enable the code re-use. Signed-off-by: Rajendra Nayak Signed-off-by: Balaji T K Signed-off-by: Santosh Shilimkar Acked-by: Kevin Hilman Signed-off-by: Samuel Ortiz --- arch/arm/mach-omap2/board-2430sdp.c | 2 +- arch/arm/mach-omap2/board-3430sdp.c | 2 +- arch/arm/mach-omap2/board-ldp.c | 2 +- arch/arm/mach-omap2/board-omap3beagle.c | 2 +- arch/arm/mach-omap2/board-omap3pandora.c | 2 +- arch/arm/mach-omap2/board-overo.c | 2 +- drivers/gpio/twl4030-gpio.c | 2 +- drivers/input/keyboard/twl4030_keypad.c | 2 +- drivers/input/misc/twl4030-pwrbutton.c | 2 +- drivers/mfd/Makefile | 2 +- drivers/mfd/twl-core.c | 929 +++++++++++++++++++++++++++++++ drivers/mfd/twl4030-core.c | 929 ------------------------------- drivers/mfd/twl4030-irq.c | 2 +- drivers/mfd/twl4030-power.c | 2 +- drivers/regulator/Makefile | 2 +- drivers/regulator/twl-regulator.c | 500 +++++++++++++++++ drivers/regulator/twl4030-regulator.c | 500 ----------------- drivers/rtc/Makefile | 2 +- drivers/rtc/rtc-twl.c | 540 ++++++++++++++++++ drivers/rtc/rtc-twl4030.c | 540 ------------------ drivers/usb/otg/twl4030-usb.c | 2 +- drivers/video/omap/lcd_2430sdp.c | 2 +- drivers/watchdog/twl4030_wdt.c | 2 +- include/linux/i2c/twl.h | 550 ++++++++++++++++++ include/linux/i2c/twl4030.h | 550 ------------------ sound/soc/codecs/twl4030.c | 2 +- 26 files changed, 2537 insertions(+), 2537 deletions(-) create mode 100644 drivers/mfd/twl-core.c delete mode 100644 drivers/mfd/twl4030-core.c create mode 100644 drivers/regulator/twl-regulator.c delete mode 100644 drivers/regulator/twl4030-regulator.c create mode 100644 drivers/rtc/rtc-twl.c delete mode 100644 drivers/rtc/rtc-twl4030.c create mode 100644 include/linux/i2c/twl.h delete mode 100644 include/linux/i2c/twl4030.h (limited to 'include') diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index db9374bc528b..e508904fb67e 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index 4cfb7b68dfad..c90b0d0b1927 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 37431738f1c2..995d4a2b2dfd 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 6ada8029f9a8..231cb4ec1847 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 6f6c601eeab7..ef17cf1ab6d7 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index 5b78a87217e0..d192dd98a591 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c index 49384a7c5492..a320d7bfe67c 100644 --- a/drivers/gpio/twl4030-gpio.c +++ b/drivers/gpio/twl4030-gpio.c @@ -34,7 +34,7 @@ #include #include -#include +#include /* diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index 9a2977c21696..1d1536a2fe46 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include /* diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c index f5fc9974a111..a73b889fff79 100644 --- a/drivers/input/misc/twl4030-pwrbutton.c +++ b/drivers/input/misc/twl4030-pwrbutton.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #define PWR_PWRON_IRQ (1 << 0) diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 75638ca8288b..f4d14b7589bf 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -26,7 +26,7 @@ obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o obj-$(CONFIG_TPS65010) += tps65010.o obj-$(CONFIG_MENELAUS) += menelaus.o -obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o +obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c new file mode 100644 index 000000000000..44714f5cf495 --- /dev/null +++ b/drivers/mfd/twl-core.c @@ -0,0 +1,929 @@ +/* + * twl4030_core.c - driver for TWL4030/TPS659x0 PM and audio CODEC devices + * + * Copyright (C) 2005-2006 Texas Instruments, Inc. + * + * Modifications to defer interrupt handling to a kernel thread: + * Copyright (C) 2006 MontaVista Software, Inc. + * + * Based on tlv320aic23.c: + * Copyright (c) by Kai Svahn + * + * Code cleanup and modifications to IRQ handler. + * by syed khasim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) +#include +#endif + +/* + * The TWL4030 "Triton 2" is one of a family of a multi-function "Power + * Management and System Companion Device" chips originally designed for + * use in OMAP2 and OMAP 3 based systems. Its control interfaces use I2C, + * often at around 3 Mbit/sec, including for interrupt handling. + * + * This driver core provides genirq support for the interrupts emitted, + * by the various modules, and exports register access primitives. + * + * FIXME this driver currently requires use of the first interrupt line + * (and associated registers). + */ + +#define DRIVER_NAME "twl4030" + +#if defined(CONFIG_TWL4030_BCI_BATTERY) || \ + defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) +#define twl_has_bci() true +#else +#define twl_has_bci() false +#endif + +#if defined(CONFIG_KEYBOARD_TWL4030) || defined(CONFIG_KEYBOARD_TWL4030_MODULE) +#define twl_has_keypad() true +#else +#define twl_has_keypad() false +#endif + +#if defined(CONFIG_GPIO_TWL4030) || defined(CONFIG_GPIO_TWL4030_MODULE) +#define twl_has_gpio() true +#else +#define twl_has_gpio() false +#endif + +#if defined(CONFIG_REGULATOR_TWL4030) \ + || defined(CONFIG_REGULATOR_TWL4030_MODULE) +#define twl_has_regulator() true +#else +#define twl_has_regulator() false +#endif + +#if defined(CONFIG_TWL4030_MADC) || defined(CONFIG_TWL4030_MADC_MODULE) +#define twl_has_madc() true +#else +#define twl_has_madc() false +#endif + +#ifdef CONFIG_TWL4030_POWER +#define twl_has_power() true +#else +#define twl_has_power() false +#endif + +#if defined(CONFIG_RTC_DRV_TWL4030) || defined(CONFIG_RTC_DRV_TWL4030_MODULE) +#define twl_has_rtc() true +#else +#define twl_has_rtc() false +#endif + +#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE) +#define twl_has_usb() true +#else +#define twl_has_usb() false +#endif + +#if defined(CONFIG_TWL4030_WATCHDOG) || \ + defined(CONFIG_TWL4030_WATCHDOG_MODULE) +#define twl_has_watchdog() true +#else +#define twl_has_watchdog() false +#endif + +#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) +#define twl_has_codec() true +#else +#define twl_has_codec() false +#endif + +/* Triton Core internal information (BEGIN) */ + +/* Last - for index max*/ +#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG + +#define TWL4030_NUM_SLAVES 4 + +#if defined(CONFIG_INPUT_TWL4030_PWRBUTTON) \ + || defined(CONFIG_INPUT_TWL4030_PWBUTTON_MODULE) +#define twl_has_pwrbutton() true +#else +#define twl_has_pwrbutton() false +#endif + +/* Base Address defns for twl4030_map[] */ + +/* subchip/slave 0 - USB ID */ +#define TWL4030_BASEADD_USB 0x0000 + +/* subchip/slave 1 - AUD ID */ +#define TWL4030_BASEADD_AUDIO_VOICE 0x0000 +#define TWL4030_BASEADD_GPIO 0x0098 +#define TWL4030_BASEADD_INTBR 0x0085 +#define TWL4030_BASEADD_PIH 0x0080 +#define TWL4030_BASEADD_TEST 0x004C + +/* subchip/slave 2 - AUX ID */ +#define TWL4030_BASEADD_INTERRUPTS 0x00B9 +#define TWL4030_BASEADD_LED 0x00EE +#define TWL4030_BASEADD_MADC 0x0000 +#define TWL4030_BASEADD_MAIN_CHARGE 0x0074 +#define TWL4030_BASEADD_PRECHARGE 0x00AA +#define TWL4030_BASEADD_PWM0 0x00F8 +#define TWL4030_BASEADD_PWM1 0x00FB +#define TWL4030_BASEADD_PWMA 0x00EF +#define TWL4030_BASEADD_PWMB 0x00F1 +#define TWL4030_BASEADD_KEYPAD 0x00D2 + +#define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */ +#define TWL5031_BASEADD_INTERRUPTS 0x00B9 /* Different than TWL4030's + one */ + +/* subchip/slave 3 - POWER ID */ +#define TWL4030_BASEADD_BACKUP 0x0014 +#define TWL4030_BASEADD_INT 0x002E +#define TWL4030_BASEADD_PM_MASTER 0x0036 +#define TWL4030_BASEADD_PM_RECEIVER 0x005B +#define TWL4030_BASEADD_RTC 0x001C +#define TWL4030_BASEADD_SECURED_REG 0x0000 + +/* Triton Core internal information (END) */ + + +/* Few power values */ +#define R_CFG_BOOT 0x05 +#define R_PROTECT_KEY 0x0E + +/* access control values for R_PROTECT_KEY */ +#define KEY_UNLOCK1 0xce +#define KEY_UNLOCK2 0xec +#define KEY_LOCK 0x00 + +/* some fields in R_CFG_BOOT */ +#define HFCLK_FREQ_19p2_MHZ (1 << 0) +#define HFCLK_FREQ_26_MHZ (2 << 0) +#define HFCLK_FREQ_38p4_MHZ (3 << 0) +#define HIGH_PERF_SQ (1 << 3) +#define CK32K_LOWPWR_EN (1 << 7) + + +/* chip-specific feature flags, for i2c_device_id.driver_data */ +#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */ +#define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ +#define TWL5031 BIT(2) /* twl5031 has different registers */ + +/*----------------------------------------------------------------------*/ + +/* is driver active, bound to a chip? */ +static bool inuse; + +/* Structure for each TWL4030 Slave */ +struct twl4030_client { + struct i2c_client *client; + u8 address; + + /* max numb of i2c_msg required is for read =2 */ + struct i2c_msg xfer_msg[2]; + + /* To lock access to xfer_msg */ + struct mutex xfer_lock; +}; + +static struct twl4030_client twl4030_modules[TWL4030_NUM_SLAVES]; + + +/* mapping the module id to slave id and base address */ +struct twl4030mapping { + unsigned char sid; /* Slave ID */ + unsigned char base; /* base address */ +}; + +static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { + /* + * NOTE: don't change this table without updating the + * defines for TWL4030_MODULE_* + * so they continue to match the order in this table. + */ + + { 0, TWL4030_BASEADD_USB }, + + { 1, TWL4030_BASEADD_AUDIO_VOICE }, + { 1, TWL4030_BASEADD_GPIO }, + { 1, TWL4030_BASEADD_INTBR }, + { 1, TWL4030_BASEADD_PIH }, + { 1, TWL4030_BASEADD_TEST }, + + { 2, TWL4030_BASEADD_KEYPAD }, + { 2, TWL4030_BASEADD_MADC }, + { 2, TWL4030_BASEADD_INTERRUPTS }, + { 2, TWL4030_BASEADD_LED }, + { 2, TWL4030_BASEADD_MAIN_CHARGE }, + { 2, TWL4030_BASEADD_PRECHARGE }, + { 2, TWL4030_BASEADD_PWM0 }, + { 2, TWL4030_BASEADD_PWM1 }, + { 2, TWL4030_BASEADD_PWMA }, + { 2, TWL4030_BASEADD_PWMB }, + { 2, TWL5031_BASEADD_ACCESSORY }, + { 2, TWL5031_BASEADD_INTERRUPTS }, + + { 3, TWL4030_BASEADD_BACKUP }, + { 3, TWL4030_BASEADD_INT }, + { 3, TWL4030_BASEADD_PM_MASTER }, + { 3, TWL4030_BASEADD_PM_RECEIVER }, + { 3, TWL4030_BASEADD_RTC }, + { 3, TWL4030_BASEADD_SECURED_REG }, +}; + +/*----------------------------------------------------------------------*/ + +/* Exported Functions */ + +/** + * twl4030_i2c_write - Writes a n bit register in TWL4030 + * @mod_no: module number + * @value: an array of num_bytes+1 containing data to write + * @reg: register address (just offset will do) + * @num_bytes: number of bytes to transfer + * + * IMPORTANT: for 'value' parameter: Allocate value num_bytes+1 and + * valid data starts at Offset 1. + * + * Returns the result of operation - 0 is success + */ +int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) +{ + int ret; + int sid; + struct twl4030_client *twl; + struct i2c_msg *msg; + + if (unlikely(mod_no > TWL4030_MODULE_LAST)) { + pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); + return -EPERM; + } + sid = twl4030_map[mod_no].sid; + twl = &twl4030_modules[sid]; + + if (unlikely(!inuse)) { + pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + return -EPERM; + } + mutex_lock(&twl->xfer_lock); + /* + * [MSG1]: fill the register address data + * fill the data Tx buffer + */ + msg = &twl->xfer_msg[0]; + msg->addr = twl->address; + msg->len = num_bytes + 1; + msg->flags = 0; + msg->buf = value; + /* over write the first byte of buffer with the register address */ + *value = twl4030_map[mod_no].base + reg; + ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1); + mutex_unlock(&twl->xfer_lock); + + /* i2c_transfer returns number of messages transferred */ + if (ret != 1) { + pr_err("%s: i2c_write failed to transfer all messages\n", + DRIVER_NAME); + if (ret < 0) + return ret; + else + return -EIO; + } else { + return 0; + } +} +EXPORT_SYMBOL(twl4030_i2c_write); + +/** + * twl4030_i2c_read - Reads a n bit register in TWL4030 + * @mod_no: module number + * @value: an array of num_bytes containing data to be read + * @reg: register address (just offset will do) + * @num_bytes: number of bytes to transfer + * + * Returns result of operation - num_bytes is success else failure. + */ +int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) +{ + int ret; + u8 val; + int sid; + struct twl4030_client *twl; + struct i2c_msg *msg; + + if (unlikely(mod_no > TWL4030_MODULE_LAST)) { + pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); + return -EPERM; + } + sid = twl4030_map[mod_no].sid; + twl = &twl4030_modules[sid]; + + if (unlikely(!inuse)) { + pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + return -EPERM; + } + mutex_lock(&twl->xfer_lock); + /* [MSG1] fill the register address data */ + msg = &twl->xfer_msg[0]; + msg->addr = twl->address; + msg->len = 1; + msg->flags = 0; /* Read the register value */ + val = twl4030_map[mod_no].base + reg; + msg->buf = &val; + /* [MSG2] fill the data rx buffer */ + msg = &twl->xfer_msg[1]; + msg->addr = twl->address; + msg->flags = I2C_M_RD; /* Read the register value */ + msg->len = num_bytes; /* only n bytes */ + msg->buf = value; + ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2); + mutex_unlock(&twl->xfer_lock); + + /* i2c_transfer returns number of messages transferred */ + if (ret != 2) { + pr_err("%s: i2c_read failed to transfer all messages\n", + DRIVER_NAME); + if (ret < 0) + return ret; + else + return -EIO; + } else { + return 0; + } +} +EXPORT_SYMBOL(twl4030_i2c_read); + +/** + * twl4030_i2c_write_u8 - Writes a 8 bit register in TWL4030 + * @mod_no: module number + * @value: the value to be written 8 bit + * @reg: register address (just offset will do) + * + * Returns result of operation - 0 is success + */ +int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg) +{ + + /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */ + u8 temp_buffer[2] = { 0 }; + /* offset 1 contains the data */ + temp_buffer[1] = value; + return twl4030_i2c_write(mod_no, temp_buffer, reg, 1); +} +EXPORT_SYMBOL(twl4030_i2c_write_u8); + +/** + * twl4030_i2c_read_u8 - Reads a 8 bit register from TWL4030 + * @mod_no: module number + * @value: the value read 8 bit + * @reg: register address (just offset will do) + * + * Returns result of operation - 0 is success + */ +int twl4030_i2c_read_u8(u8 mod_no, u8 *value, u8 reg) +{ + return twl4030_i2c_read(mod_no, value, reg, 1); +} +EXPORT_SYMBOL(twl4030_i2c_read_u8); + +/*----------------------------------------------------------------------*/ + +static struct device * +add_numbered_child(unsigned chip, const char *name, int num, + void *pdata, unsigned pdata_len, + bool can_wakeup, int irq0, int irq1) +{ + struct platform_device *pdev; + struct twl4030_client *twl = &twl4030_modules[chip]; + int status; + + pdev = platform_device_alloc(name, num); + if (!pdev) { + dev_dbg(&twl->client->dev, "can't alloc dev\n"); + status = -ENOMEM; + goto err; + } + + device_init_wakeup(&pdev->dev, can_wakeup); + pdev->dev.parent = &twl->client->dev; + + if (pdata) { + status = platform_device_add_data(pdev, pdata, pdata_len); + if (status < 0) { + dev_dbg(&pdev->dev, "can't add platform_data\n"); + goto err; + } + } + + if (irq0) { + struct resource r[2] = { + { .start = irq0, .flags = IORESOURCE_IRQ, }, + { .start = irq1, .flags = IORESOURCE_IRQ, }, + }; + + status = platform_device_add_resources(pdev, r, irq1 ? 2 : 1); + if (status < 0) { + dev_dbg(&pdev->dev, "can't add irqs\n"); + goto err; + } + } + + status = platform_device_add(pdev); + +err: + if (status < 0) { + platform_device_put(pdev); + dev_err(&twl->client->dev, "can't add %s dev\n", name); + return ERR_PTR(status); + } + return &pdev->dev; +} + +static inline struct device *add_child(unsigned chip, const char *name, + void *pdata, unsigned pdata_len, + bool can_wakeup, int irq0, int irq1) +{ + return add_numbered_child(chip, name, -1, pdata, pdata_len, + can_wakeup, irq0, irq1); +} + +static struct device * +add_regulator_linked(int num, struct regulator_init_data *pdata, + struct regulator_consumer_supply *consumers, + unsigned num_consumers) +{ + /* regulator framework demands init_data ... */ + if (!pdata) + return NULL; + + if (consumers) { + pdata->consumer_supplies = consumers; + pdata->num_consumer_supplies = num_consumers; + } + + /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ + return add_numbered_child(3, "twl4030_reg", num, + pdata, sizeof(*pdata), false, 0, 0); +} + +static struct device * +add_regulator(int num, struct regulator_init_data *pdata) +{ + return add_regulator_linked(num, pdata, NULL, 0); +} + +/* + * NOTE: We know the first 8 IRQs after pdata->base_irq are + * for the PIH, and the next are for the PWR_INT SIH, since + * that's how twl_init_irq() sets things up. + */ + +static int +add_children(struct twl4030_platform_data *pdata, unsigned long features) +{ + struct device *child; + + if (twl_has_bci() && pdata->bci && + !(features & (TPS_SUBSET | TWL5031))) { + child = add_child(3, "twl4030_bci", + pdata->bci, sizeof(*pdata->bci), + false, + /* irq0 = CHG_PRES, irq1 = BCI */ + pdata->irq_base + 8 + 1, pdata->irq_base + 2); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_gpio() && pdata->gpio) { + child = add_child(1, "twl4030_gpio", + pdata->gpio, sizeof(*pdata->gpio), + false, pdata->irq_base + 0, 0); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_keypad() && pdata->keypad) { + child = add_child(2, "twl4030_keypad", + pdata->keypad, sizeof(*pdata->keypad), + true, pdata->irq_base + 1, 0); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_madc() && pdata->madc) { + child = add_child(2, "twl4030_madc", + pdata->madc, sizeof(*pdata->madc), + true, pdata->irq_base + 3, 0); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_rtc()) { + /* + * REVISIT platform_data here currently might expose the + * "msecure" line ... but for now we just expect board + * setup to tell the chip "it's always ok to SET_TIME". + * Eventually, Linux might become more aware of such + * HW security concerns, and "least privilege". + */ + child = add_child(3, "twl4030_rtc", + NULL, 0, + true, pdata->irq_base + 8 + 3, 0); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_usb() && pdata->usb) { + + static struct regulator_consumer_supply usb1v5 = { + .supply = "usb1v5", + }; + static struct regulator_consumer_supply usb1v8 = { + .supply = "usb1v8", + }; + static struct regulator_consumer_supply usb3v1 = { + .supply = "usb3v1", + }; + + /* First add the regulators so that they can be used by transceiver */ + if (twl_has_regulator()) { + /* this is a template that gets copied */ + struct regulator_init_data usb_fixed = { + .constraints.valid_modes_mask = + REGULATOR_MODE_NORMAL + | REGULATOR_MODE_STANDBY, + .constraints.valid_ops_mask = + REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS, + }; + + child = add_regulator_linked(TWL4030_REG_VUSB1V5, + &usb_fixed, &usb1v5, 1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator_linked(TWL4030_REG_VUSB1V8, + &usb_fixed, &usb1v8, 1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator_linked(TWL4030_REG_VUSB3V1, + &usb_fixed, &usb3v1, 1); + if (IS_ERR(child)) + return PTR_ERR(child); + + } + + child = add_child(0, "twl4030_usb", + pdata->usb, sizeof(*pdata->usb), + true, + /* irq0 = USB_PRES, irq1 = USB */ + pdata->irq_base + 8 + 2, pdata->irq_base + 4); + + if (IS_ERR(child)) + return PTR_ERR(child); + + /* we need to connect regulators to this transceiver */ + if (twl_has_regulator() && child) { + usb1v5.dev = child; + usb1v8.dev = child; + usb3v1.dev = child; + } + } + + if (twl_has_watchdog()) { + child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_pwrbutton()) { + child = add_child(1, "twl4030_pwrbutton", + NULL, 0, true, pdata->irq_base + 8 + 0, 0); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_codec() && pdata->codec) { + child = add_child(1, "twl4030_codec", + pdata->codec, sizeof(*pdata->codec), + false, 0, 0); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + if (twl_has_regulator()) { + child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VIO, pdata->vio); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VDAC, pdata->vdac); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator((features & TWL4030_VAUX2) + ? TWL4030_REG_VAUX2_4030 + : TWL4030_REG_VAUX2, + pdata->vaux2); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + /* maybe add LDOs that are omitted on cost-reduced parts */ + if (twl_has_regulator() && !(features & TPS_SUBSET)) { + child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VSIM, pdata->vsim); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4); + if (IS_ERR(child)) + return PTR_ERR(child); + } + + return 0; +} + +/*----------------------------------------------------------------------*/ + +/* + * These three functions initialize the on-chip clock framework, + * letting it generate the right frequencies for USB, MADC, and + * other purposes. + */ +static inline int __init protect_pm_master(void) +{ + int e = 0; + + e = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_LOCK, + R_PROTECT_KEY); + return e; +} + +static inline int __init unprotect_pm_master(void) +{ + int e = 0; + + e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK1, + R_PROTECT_KEY); + e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK2, + R_PROTECT_KEY); + return e; +} + +static void clocks_init(struct device *dev, + struct twl4030_clock_init_data *clock) +{ + int e = 0; + struct clk *osc; + u32 rate; + u8 ctrl = HFCLK_FREQ_26_MHZ; + +#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) + if (cpu_is_omap2430()) + osc = clk_get(dev, "osc_ck"); + else + osc = clk_get(dev, "osc_sys_ck"); + + if (IS_ERR(osc)) { + printk(KERN_WARNING "Skipping twl4030 internal clock init and " + "using bootloader value (unknown osc rate)\n"); + return; + } + + rate = clk_get_rate(osc); + clk_put(osc); + +#else + /* REVISIT for non-OMAP systems, pass the clock rate from + * board init code, using platform_data. + */ + osc = ERR_PTR(-EIO); + + printk(KERN_WARNING "Skipping twl4030 internal clock init and " + "using bootloader value (unknown osc rate)\n"); + + return; +#endif + + switch (rate) { + case 19200000: + ctrl = HFCLK_FREQ_19p2_MHZ; + break; + case 26000000: + ctrl = HFCLK_FREQ_26_MHZ; + break; + case 38400000: + ctrl = HFCLK_FREQ_38p4_MHZ; + break; + } + + ctrl |= HIGH_PERF_SQ; + if (clock && clock->ck32k_lowpwr_enable) + ctrl |= CK32K_LOWPWR_EN; + + e |= unprotect_pm_master(); + /* effect->MADC+USB ck en */ + e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT); + e |= protect_pm_master(); + + if (e < 0) + pr_err("%s: clock init err [%d]\n", DRIVER_NAME, e); +} + +/*----------------------------------------------------------------------*/ + +int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); +int twl_exit_irq(void); +int twl_init_chip_irq(const char *chip); + +static int twl4030_remove(struct i2c_client *client) +{ + unsigned i; + int status; + + status = twl_exit_irq(); + if (status < 0) + return status; + + for (i = 0; i < TWL4030_NUM_SLAVES; i++) { + struct twl4030_client *twl = &twl4030_modules[i]; + + if (twl->client && twl->client != client) + i2c_unregister_device(twl->client); + twl4030_modules[i].client = NULL; + } + inuse = false; + return 0; +} + +/* NOTE: this driver only handles a single twl4030/tps659x0 chip */ +static int __init +twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int status; + unsigned i; + struct twl4030_platform_data *pdata = client->dev.platform_data; + + if (!pdata) { + dev_dbg(&client->dev, "no platform data?\n"); + return -EINVAL; + } + + if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { + dev_dbg(&client->dev, "can't talk I2C?\n"); + return -EIO; + } + + if (inuse) { + dev_dbg(&client->dev, "driver is already in use\n"); + return -EBUSY; + } + + for (i = 0; i < TWL4030_NUM_SLAVES; i++) { + struct twl4030_client *twl = &twl4030_modules[i]; + + twl->address = client->addr + i; + if (i == 0) + twl->client = client; + else { + twl->client = i2c_new_dummy(client->adapter, + twl->address); + if (!twl->client) { + dev_err(&client->dev, + "can't attach client %d\n", i); + status = -ENOMEM; + goto fail; + } + } + mutex_init(&twl->xfer_lock); + } + inuse = true; + + /* setup clock framework */ + clocks_init(&client->dev, pdata->clock); + + /* load power event scripts */ + if (twl_has_power() && pdata->power) + twl4030_power_init(pdata->power); + + /* Maybe init the T2 Interrupt subsystem */ + if (client->irq + && pdata->irq_base + && pdata->irq_end > pdata->irq_base) { + twl_init_chip_irq(id->name); + status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); + if (status < 0) + goto fail; + } + + status = add_children(pdata, id->driver_data); +fail: + if (status < 0) + twl4030_remove(client); + return status; +} + +static const struct i2c_device_id twl4030_ids[] = { + { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ + { "twl5030", 0 }, /* T2 updated */ + { "twl5031", TWL5031 }, /* TWL5030 updated */ + { "tps65950", 0 }, /* catalog version of twl5030 */ + { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ + { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(i2c, twl4030_ids); + +/* One Client Driver , 4 Clients */ +static struct i2c_driver twl4030_driver = { + .driver.name = DRIVER_NAME, + .id_table = twl4030_ids, + .probe = twl4030_probe, + .remove = twl4030_remove, +}; + +static int __init twl4030_init(void) +{ + return i2c_add_driver(&twl4030_driver); +} +subsys_initcall(twl4030_init); + +static void __exit twl4030_exit(void) +{ + i2c_del_driver(&twl4030_driver); +} +module_exit(twl4030_exit); + +MODULE_AUTHOR("Texas Instruments, Inc."); +MODULE_DESCRIPTION("I2C Core interface for TWL4030"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c deleted file mode 100644 index 7c2ec0aa9d87..000000000000 --- a/drivers/mfd/twl4030-core.c +++ /dev/null @@ -1,929 +0,0 @@ -/* - * twl4030_core.c - driver for TWL4030/TPS659x0 PM and audio CODEC devices - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * Modifications to defer interrupt handling to a kernel thread: - * Copyright (C) 2006 MontaVista Software, Inc. - * - * Based on tlv320aic23.c: - * Copyright (c) by Kai Svahn - * - * Code cleanup and modifications to IRQ handler. - * by syed khasim - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include - -#include - -#include -#include - -#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) -#include -#endif - -/* - * The TWL4030 "Triton 2" is one of a family of a multi-function "Power - * Management and System Companion Device" chips originally designed for - * use in OMAP2 and OMAP 3 based systems. Its control interfaces use I2C, - * often at around 3 Mbit/sec, including for interrupt handling. - * - * This driver core provides genirq support for the interrupts emitted, - * by the various modules, and exports register access primitives. - * - * FIXME this driver currently requires use of the first interrupt line - * (and associated registers). - */ - -#define DRIVER_NAME "twl4030" - -#if defined(CONFIG_TWL4030_BCI_BATTERY) || \ - defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) -#define twl_has_bci() true -#else -#define twl_has_bci() false -#endif - -#if defined(CONFIG_KEYBOARD_TWL4030) || defined(CONFIG_KEYBOARD_TWL4030_MODULE) -#define twl_has_keypad() true -#else -#define twl_has_keypad() false -#endif - -#if defined(CONFIG_GPIO_TWL4030) || defined(CONFIG_GPIO_TWL4030_MODULE) -#define twl_has_gpio() true -#else -#define twl_has_gpio() false -#endif - -#if defined(CONFIG_REGULATOR_TWL4030) \ - || defined(CONFIG_REGULATOR_TWL4030_MODULE) -#define twl_has_regulator() true -#else -#define twl_has_regulator() false -#endif - -#if defined(CONFIG_TWL4030_MADC) || defined(CONFIG_TWL4030_MADC_MODULE) -#define twl_has_madc() true -#else -#define twl_has_madc() false -#endif - -#ifdef CONFIG_TWL4030_POWER -#define twl_has_power() true -#else -#define twl_has_power() false -#endif - -#if defined(CONFIG_RTC_DRV_TWL4030) || defined(CONFIG_RTC_DRV_TWL4030_MODULE) -#define twl_has_rtc() true -#else -#define twl_has_rtc() false -#endif - -#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE) -#define twl_has_usb() true -#else -#define twl_has_usb() false -#endif - -#if defined(CONFIG_TWL4030_WATCHDOG) || \ - defined(CONFIG_TWL4030_WATCHDOG_MODULE) -#define twl_has_watchdog() true -#else -#define twl_has_watchdog() false -#endif - -#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) -#define twl_has_codec() true -#else -#define twl_has_codec() false -#endif - -/* Triton Core internal information (BEGIN) */ - -/* Last - for index max*/ -#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG - -#define TWL4030_NUM_SLAVES 4 - -#if defined(CONFIG_INPUT_TWL4030_PWRBUTTON) \ - || defined(CONFIG_INPUT_TWL4030_PWBUTTON_MODULE) -#define twl_has_pwrbutton() true -#else -#define twl_has_pwrbutton() false -#endif - -/* Base Address defns for twl4030_map[] */ - -/* subchip/slave 0 - USB ID */ -#define TWL4030_BASEADD_USB 0x0000 - -/* subchip/slave 1 - AUD ID */ -#define TWL4030_BASEADD_AUDIO_VOICE 0x0000 -#define TWL4030_BASEADD_GPIO 0x0098 -#define TWL4030_BASEADD_INTBR 0x0085 -#define TWL4030_BASEADD_PIH 0x0080 -#define TWL4030_BASEADD_TEST 0x004C - -/* subchip/slave 2 - AUX ID */ -#define TWL4030_BASEADD_INTERRUPTS 0x00B9 -#define TWL4030_BASEADD_LED 0x00EE -#define TWL4030_BASEADD_MADC 0x0000 -#define TWL4030_BASEADD_MAIN_CHARGE 0x0074 -#define TWL4030_BASEADD_PRECHARGE 0x00AA -#define TWL4030_BASEADD_PWM0 0x00F8 -#define TWL4030_BASEADD_PWM1 0x00FB -#define TWL4030_BASEADD_PWMA 0x00EF -#define TWL4030_BASEADD_PWMB 0x00F1 -#define TWL4030_BASEADD_KEYPAD 0x00D2 - -#define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */ -#define TWL5031_BASEADD_INTERRUPTS 0x00B9 /* Different than TWL4030's - one */ - -/* subchip/slave 3 - POWER ID */ -#define TWL4030_BASEADD_BACKUP 0x0014 -#define TWL4030_BASEADD_INT 0x002E -#define TWL4030_BASEADD_PM_MASTER 0x0036 -#define TWL4030_BASEADD_PM_RECEIVER 0x005B -#define TWL4030_BASEADD_RTC 0x001C -#define TWL4030_BASEADD_SECURED_REG 0x0000 - -/* Triton Core internal information (END) */ - - -/* Few power values */ -#define R_CFG_BOOT 0x05 -#define R_PROTECT_KEY 0x0E - -/* access control values for R_PROTECT_KEY */ -#define KEY_UNLOCK1 0xce -#define KEY_UNLOCK2 0xec -#define KEY_LOCK 0x00 - -/* some fields in R_CFG_BOOT */ -#define HFCLK_FREQ_19p2_MHZ (1 << 0) -#define HFCLK_FREQ_26_MHZ (2 << 0) -#define HFCLK_FREQ_38p4_MHZ (3 << 0) -#define HIGH_PERF_SQ (1 << 3) -#define CK32K_LOWPWR_EN (1 << 7) - - -/* chip-specific feature flags, for i2c_device_id.driver_data */ -#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */ -#define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ -#define TWL5031 BIT(2) /* twl5031 has different registers */ - -/*----------------------------------------------------------------------*/ - -/* is driver active, bound to a chip? */ -static bool inuse; - -/* Structure for each TWL4030 Slave */ -struct twl4030_client { - struct i2c_client *client; - u8 address; - - /* max numb of i2c_msg required is for read =2 */ - struct i2c_msg xfer_msg[2]; - - /* To lock access to xfer_msg */ - struct mutex xfer_lock; -}; - -static struct twl4030_client twl4030_modules[TWL4030_NUM_SLAVES]; - - -/* mapping the module id to slave id and base address */ -struct twl4030mapping { - unsigned char sid; /* Slave ID */ - unsigned char base; /* base address */ -}; - -static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { - /* - * NOTE: don't change this table without updating the - * defines for TWL4030_MODULE_* - * so they continue to match the order in this table. - */ - - { 0, TWL4030_BASEADD_USB }, - - { 1, TWL4030_BASEADD_AUDIO_VOICE }, - { 1, TWL4030_BASEADD_GPIO }, - { 1, TWL4030_BASEADD_INTBR }, - { 1, TWL4030_BASEADD_PIH }, - { 1, TWL4030_BASEADD_TEST }, - - { 2, TWL4030_BASEADD_KEYPAD }, - { 2, TWL4030_BASEADD_MADC }, - { 2, TWL4030_BASEADD_INTERRUPTS }, - { 2, TWL4030_BASEADD_LED }, - { 2, TWL4030_BASEADD_MAIN_CHARGE }, - { 2, TWL4030_BASEADD_PRECHARGE }, - { 2, TWL4030_BASEADD_PWM0 }, - { 2, TWL4030_BASEADD_PWM1 }, - { 2, TWL4030_BASEADD_PWMA }, - { 2, TWL4030_BASEADD_PWMB }, - { 2, TWL5031_BASEADD_ACCESSORY }, - { 2, TWL5031_BASEADD_INTERRUPTS }, - - { 3, TWL4030_BASEADD_BACKUP }, - { 3, TWL4030_BASEADD_INT }, - { 3, TWL4030_BASEADD_PM_MASTER }, - { 3, TWL4030_BASEADD_PM_RECEIVER }, - { 3, TWL4030_BASEADD_RTC }, - { 3, TWL4030_BASEADD_SECURED_REG }, -}; - -/*----------------------------------------------------------------------*/ - -/* Exported Functions */ - -/** - * twl4030_i2c_write - Writes a n bit register in TWL4030 - * @mod_no: module number - * @value: an array of num_bytes+1 containing data to write - * @reg: register address (just offset will do) - * @num_bytes: number of bytes to transfer - * - * IMPORTANT: for 'value' parameter: Allocate value num_bytes+1 and - * valid data starts at Offset 1. - * - * Returns the result of operation - 0 is success - */ -int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) -{ - int ret; - int sid; - struct twl4030_client *twl; - struct i2c_msg *msg; - - if (unlikely(mod_no > TWL4030_MODULE_LAST)) { - pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); - return -EPERM; - } - sid = twl4030_map[mod_no].sid; - twl = &twl4030_modules[sid]; - - if (unlikely(!inuse)) { - pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); - return -EPERM; - } - mutex_lock(&twl->xfer_lock); - /* - * [MSG1]: fill the register address data - * fill the data Tx buffer - */ - msg = &twl->xfer_msg[0]; - msg->addr = twl->address; - msg->len = num_bytes + 1; - msg->flags = 0; - msg->buf = value; - /* over write the first byte of buffer with the register address */ - *value = twl4030_map[mod_no].base + reg; - ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1); - mutex_unlock(&twl->xfer_lock); - - /* i2c_transfer returns number of messages transferred */ - if (ret != 1) { - pr_err("%s: i2c_write failed to transfer all messages\n", - DRIVER_NAME); - if (ret < 0) - return ret; - else - return -EIO; - } else { - return 0; - } -} -EXPORT_SYMBOL(twl4030_i2c_write); - -/** - * twl4030_i2c_read - Reads a n bit register in TWL4030 - * @mod_no: module number - * @value: an array of num_bytes containing data to be read - * @reg: register address (just offset will do) - * @num_bytes: number of bytes to transfer - * - * Returns result of operation - num_bytes is success else failure. - */ -int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) -{ - int ret; - u8 val; - int sid; - struct twl4030_client *twl; - struct i2c_msg *msg; - - if (unlikely(mod_no > TWL4030_MODULE_LAST)) { - pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); - return -EPERM; - } - sid = twl4030_map[mod_no].sid; - twl = &twl4030_modules[sid]; - - if (unlikely(!inuse)) { - pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); - return -EPERM; - } - mutex_lock(&twl->xfer_lock); - /* [MSG1] fill the register address data */ - msg = &twl->xfer_msg[0]; - msg->addr = twl->address; - msg->len = 1; - msg->flags = 0; /* Read the register value */ - val = twl4030_map[mod_no].base + reg; - msg->buf = &val; - /* [MSG2] fill the data rx buffer */ - msg = &twl->xfer_msg[1]; - msg->addr = twl->address; - msg->flags = I2C_M_RD; /* Read the register value */ - msg->len = num_bytes; /* only n bytes */ - msg->buf = value; - ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2); - mutex_unlock(&twl->xfer_lock); - - /* i2c_transfer returns number of messages transferred */ - if (ret != 2) { - pr_err("%s: i2c_read failed to transfer all messages\n", - DRIVER_NAME); - if (ret < 0) - return ret; - else - return -EIO; - } else { - return 0; - } -} -EXPORT_SYMBOL(twl4030_i2c_read); - -/** - * twl4030_i2c_write_u8 - Writes a 8 bit register in TWL4030 - * @mod_no: module number - * @value: the value to be written 8 bit - * @reg: register address (just offset will do) - * - * Returns result of operation - 0 is success - */ -int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg) -{ - - /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */ - u8 temp_buffer[2] = { 0 }; - /* offset 1 contains the data */ - temp_buffer[1] = value; - return twl4030_i2c_write(mod_no, temp_buffer, reg, 1); -} -EXPORT_SYMBOL(twl4030_i2c_write_u8); - -/** - * twl4030_i2c_read_u8 - Reads a 8 bit register from TWL4030 - * @mod_no: module number - * @value: the value read 8 bit - * @reg: register address (just offset will do) - * - * Returns result of operation - 0 is success - */ -int twl4030_i2c_read_u8(u8 mod_no, u8 *value, u8 reg) -{ - return twl4030_i2c_read(mod_no, value, reg, 1); -} -EXPORT_SYMBOL(twl4030_i2c_read_u8); - -/*----------------------------------------------------------------------*/ - -static struct device * -add_numbered_child(unsigned chip, const char *name, int num, - void *pdata, unsigned pdata_len, - bool can_wakeup, int irq0, int irq1) -{ - struct platform_device *pdev; - struct twl4030_client *twl = &twl4030_modules[chip]; - int status; - - pdev = platform_device_alloc(name, num); - if (!pdev) { - dev_dbg(&twl->client->dev, "can't alloc dev\n"); - status = -ENOMEM; - goto err; - } - - device_init_wakeup(&pdev->dev, can_wakeup); - pdev->dev.parent = &twl->client->dev; - - if (pdata) { - status = platform_device_add_data(pdev, pdata, pdata_len); - if (status < 0) { - dev_dbg(&pdev->dev, "can't add platform_data\n"); - goto err; - } - } - - if (irq0) { - struct resource r[2] = { - { .start = irq0, .flags = IORESOURCE_IRQ, }, - { .start = irq1, .flags = IORESOURCE_IRQ, }, - }; - - status = platform_device_add_resources(pdev, r, irq1 ? 2 : 1); - if (status < 0) { - dev_dbg(&pdev->dev, "can't add irqs\n"); - goto err; - } - } - - status = platform_device_add(pdev); - -err: - if (status < 0) { - platform_device_put(pdev); - dev_err(&twl->client->dev, "can't add %s dev\n", name); - return ERR_PTR(status); - } - return &pdev->dev; -} - -static inline struct device *add_child(unsigned chip, const char *name, - void *pdata, unsigned pdata_len, - bool can_wakeup, int irq0, int irq1) -{ - return add_numbered_child(chip, name, -1, pdata, pdata_len, - can_wakeup, irq0, irq1); -} - -static struct device * -add_regulator_linked(int num, struct regulator_init_data *pdata, - struct regulator_consumer_supply *consumers, - unsigned num_consumers) -{ - /* regulator framework demands init_data ... */ - if (!pdata) - return NULL; - - if (consumers) { - pdata->consumer_supplies = consumers; - pdata->num_consumer_supplies = num_consumers; - } - - /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ - return add_numbered_child(3, "twl4030_reg", num, - pdata, sizeof(*pdata), false, 0, 0); -} - -static struct device * -add_regulator(int num, struct regulator_init_data *pdata) -{ - return add_regulator_linked(num, pdata, NULL, 0); -} - -/* - * NOTE: We know the first 8 IRQs after pdata->base_irq are - * for the PIH, and the next are for the PWR_INT SIH, since - * that's how twl_init_irq() sets things up. - */ - -static int -add_children(struct twl4030_platform_data *pdata, unsigned long features) -{ - struct device *child; - - if (twl_has_bci() && pdata->bci && - !(features & (TPS_SUBSET | TWL5031))) { - child = add_child(3, "twl4030_bci", - pdata->bci, sizeof(*pdata->bci), - false, - /* irq0 = CHG_PRES, irq1 = BCI */ - pdata->irq_base + 8 + 1, pdata->irq_base + 2); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_gpio() && pdata->gpio) { - child = add_child(1, "twl4030_gpio", - pdata->gpio, sizeof(*pdata->gpio), - false, pdata->irq_base + 0, 0); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_keypad() && pdata->keypad) { - child = add_child(2, "twl4030_keypad", - pdata->keypad, sizeof(*pdata->keypad), - true, pdata->irq_base + 1, 0); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_madc() && pdata->madc) { - child = add_child(2, "twl4030_madc", - pdata->madc, sizeof(*pdata->madc), - true, pdata->irq_base + 3, 0); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_rtc()) { - /* - * REVISIT platform_data here currently might expose the - * "msecure" line ... but for now we just expect board - * setup to tell the chip "it's always ok to SET_TIME". - * Eventually, Linux might become more aware of such - * HW security concerns, and "least privilege". - */ - child = add_child(3, "twl4030_rtc", - NULL, 0, - true, pdata->irq_base + 8 + 3, 0); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_usb() && pdata->usb) { - - static struct regulator_consumer_supply usb1v5 = { - .supply = "usb1v5", - }; - static struct regulator_consumer_supply usb1v8 = { - .supply = "usb1v8", - }; - static struct regulator_consumer_supply usb3v1 = { - .supply = "usb3v1", - }; - - /* First add the regulators so that they can be used by transceiver */ - if (twl_has_regulator()) { - /* this is a template that gets copied */ - struct regulator_init_data usb_fixed = { - .constraints.valid_modes_mask = - REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .constraints.valid_ops_mask = - REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }; - - child = add_regulator_linked(TWL4030_REG_VUSB1V5, - &usb_fixed, &usb1v5, 1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator_linked(TWL4030_REG_VUSB1V8, - &usb_fixed, &usb1v8, 1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator_linked(TWL4030_REG_VUSB3V1, - &usb_fixed, &usb3v1, 1); - if (IS_ERR(child)) - return PTR_ERR(child); - - } - - child = add_child(0, "twl4030_usb", - pdata->usb, sizeof(*pdata->usb), - true, - /* irq0 = USB_PRES, irq1 = USB */ - pdata->irq_base + 8 + 2, pdata->irq_base + 4); - - if (IS_ERR(child)) - return PTR_ERR(child); - - /* we need to connect regulators to this transceiver */ - if (twl_has_regulator() && child) { - usb1v5.dev = child; - usb1v8.dev = child; - usb3v1.dev = child; - } - } - - if (twl_has_watchdog()) { - child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_pwrbutton()) { - child = add_child(1, "twl4030_pwrbutton", - NULL, 0, true, pdata->irq_base + 8 + 0, 0); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_codec() && pdata->codec) { - child = add_child(1, "twl4030_codec", - pdata->codec, sizeof(*pdata->codec), - false, 0, 0); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - if (twl_has_regulator()) { - child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VIO, pdata->vio); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VDAC, pdata->vdac); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator((features & TWL4030_VAUX2) - ? TWL4030_REG_VAUX2_4030 - : TWL4030_REG_VAUX2, - pdata->vaux2); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - /* maybe add LDOs that are omitted on cost-reduced parts */ - if (twl_has_regulator() && !(features & TPS_SUBSET)) { - child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VSIM, pdata->vsim); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3); - if (IS_ERR(child)) - return PTR_ERR(child); - - child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4); - if (IS_ERR(child)) - return PTR_ERR(child); - } - - return 0; -} - -/*----------------------------------------------------------------------*/ - -/* - * These three functions initialize the on-chip clock framework, - * letting it generate the right frequencies for USB, MADC, and - * other purposes. - */ -static inline int __init protect_pm_master(void) -{ - int e = 0; - - e = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_LOCK, - R_PROTECT_KEY); - return e; -} - -static inline int __init unprotect_pm_master(void) -{ - int e = 0; - - e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK1, - R_PROTECT_KEY); - e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK2, - R_PROTECT_KEY); - return e; -} - -static void clocks_init(struct device *dev, - struct twl4030_clock_init_data *clock) -{ - int e = 0; - struct clk *osc; - u32 rate; - u8 ctrl = HFCLK_FREQ_26_MHZ; - -#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) - if (cpu_is_omap2430()) - osc = clk_get(dev, "osc_ck"); - else - osc = clk_get(dev, "osc_sys_ck"); - - if (IS_ERR(osc)) { - printk(KERN_WARNING "Skipping twl4030 internal clock init and " - "using bootloader value (unknown osc rate)\n"); - return; - } - - rate = clk_get_rate(osc); - clk_put(osc); - -#else - /* REVISIT for non-OMAP systems, pass the clock rate from - * board init code, using platform_data. - */ - osc = ERR_PTR(-EIO); - - printk(KERN_WARNING "Skipping twl4030 internal clock init and " - "using bootloader value (unknown osc rate)\n"); - - return; -#endif - - switch (rate) { - case 19200000: - ctrl = HFCLK_FREQ_19p2_MHZ; - break; - case 26000000: - ctrl = HFCLK_FREQ_26_MHZ; - break; - case 38400000: - ctrl = HFCLK_FREQ_38p4_MHZ; - break; - } - - ctrl |= HIGH_PERF_SQ; - if (clock && clock->ck32k_lowpwr_enable) - ctrl |= CK32K_LOWPWR_EN; - - e |= unprotect_pm_master(); - /* effect->MADC+USB ck en */ - e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT); - e |= protect_pm_master(); - - if (e < 0) - pr_err("%s: clock init err [%d]\n", DRIVER_NAME, e); -} - -/*----------------------------------------------------------------------*/ - -int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); -int twl_exit_irq(void); -int twl_init_chip_irq(const char *chip); - -static int twl4030_remove(struct i2c_client *client) -{ - unsigned i; - int status; - - status = twl_exit_irq(); - if (status < 0) - return status; - - for (i = 0; i < TWL4030_NUM_SLAVES; i++) { - struct twl4030_client *twl = &twl4030_modules[i]; - - if (twl->client && twl->client != client) - i2c_unregister_device(twl->client); - twl4030_modules[i].client = NULL; - } - inuse = false; - return 0; -} - -/* NOTE: this driver only handles a single twl4030/tps659x0 chip */ -static int __init -twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) -{ - int status; - unsigned i; - struct twl4030_platform_data *pdata = client->dev.platform_data; - - if (!pdata) { - dev_dbg(&client->dev, "no platform data?\n"); - return -EINVAL; - } - - if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { - dev_dbg(&client->dev, "can't talk I2C?\n"); - return -EIO; - } - - if (inuse) { - dev_dbg(&client->dev, "driver is already in use\n"); - return -EBUSY; - } - - for (i = 0; i < TWL4030_NUM_SLAVES; i++) { - struct twl4030_client *twl = &twl4030_modules[i]; - - twl->address = client->addr + i; - if (i == 0) - twl->client = client; - else { - twl->client = i2c_new_dummy(client->adapter, - twl->address); - if (!twl->client) { - dev_err(&client->dev, - "can't attach client %d\n", i); - status = -ENOMEM; - goto fail; - } - } - mutex_init(&twl->xfer_lock); - } - inuse = true; - - /* setup clock framework */ - clocks_init(&client->dev, pdata->clock); - - /* load power event scripts */ - if (twl_has_power() && pdata->power) - twl4030_power_init(pdata->power); - - /* Maybe init the T2 Interrupt subsystem */ - if (client->irq - && pdata->irq_base - && pdata->irq_end > pdata->irq_base) { - twl_init_chip_irq(id->name); - status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); - if (status < 0) - goto fail; - } - - status = add_children(pdata, id->driver_data); -fail: - if (status < 0) - twl4030_remove(client); - return status; -} - -static const struct i2c_device_id twl4030_ids[] = { - { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ - { "twl5030", 0 }, /* T2 updated */ - { "twl5031", TWL5031 }, /* TWL5030 updated */ - { "tps65950", 0 }, /* catalog version of twl5030 */ - { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ - { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ - { /* end of list */ }, -}; -MODULE_DEVICE_TABLE(i2c, twl4030_ids); - -/* One Client Driver , 4 Clients */ -static struct i2c_driver twl4030_driver = { - .driver.name = DRIVER_NAME, - .id_table = twl4030_ids, - .probe = twl4030_probe, - .remove = twl4030_remove, -}; - -static int __init twl4030_init(void) -{ - return i2c_add_driver(&twl4030_driver); -} -subsys_initcall(twl4030_init); - -static void __exit twl4030_exit(void) -{ - i2c_del_driver(&twl4030_driver); -} -module_exit(twl4030_exit); - -MODULE_AUTHOR("Texas Instruments, Inc."); -MODULE_DESCRIPTION("I2C Core interface for TWL4030"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 3f7e93ca0514..c4528db549c6 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -32,7 +32,7 @@ #include #include -#include +#include /* diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 3048f18e0419..424b255d6f92 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 4257a8683778..9ae3cc44e668 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o -obj-$(CONFIG_REGULATOR_TWL4030) += twl4030-regulator.o +obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c new file mode 100644 index 000000000000..c8a6e583d773 --- /dev/null +++ b/drivers/regulator/twl-regulator.c @@ -0,0 +1,500 @@ +/* + * twl4030-regulator.c -- support regulators in twl4030 family chips + * + * Copyright (C) 2008 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + + +/* + * The TWL4030/TW5030/TPS659x0 family chips include power management, a + * USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions + * include an audio codec, battery charger, and more voltage regulators. + * These chips are often used in OMAP-based systems. + * + * This driver implements software-based resource control for various + * voltage regulators. This is usually augmented with state machine + * based control. + */ + +struct twlreg_info { + /* start of regulator's PM_RECEIVER control register bank */ + u8 base; + + /* twl4030 resource ID, for resource control state machine */ + u8 id; + + /* voltage in mV = table[VSEL]; table_len must be a power-of-two */ + u8 table_len; + const u16 *table; + + /* chip constraints on regulator behavior */ + u16 min_mV; + + /* used by regulator core */ + struct regulator_desc desc; +}; + + +/* LDO control registers ... offset is from the base of its register bank. + * The first three registers of all power resource banks help hardware to + * manage the various resource groups. + */ +#define VREG_GRP 0 +#define VREG_TYPE 1 +#define VREG_REMAP 2 +#define VREG_DEDICATED 3 /* LDO control */ + + +static inline int +twl4030reg_read(struct twlreg_info *info, unsigned offset) +{ + u8 value; + int status; + + status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, + &value, info->base + offset); + return (status < 0) ? status : value; +} + +static inline int +twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value) +{ + return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + value, info->base + offset); +} + +/*----------------------------------------------------------------------*/ + +/* generic power resource operations, which work on all regulators */ + +static int twl4030reg_grp(struct regulator_dev *rdev) +{ + return twl4030reg_read(rdev_get_drvdata(rdev), VREG_GRP); +} + +/* + * Enable/disable regulators by joining/leaving the P1 (processor) group. + * We assume nobody else is updating the DEV_GRP registers. + */ + +#define P3_GRP BIT(7) /* "peripherals" */ +#define P2_GRP BIT(6) /* secondary processor, modem, etc */ +#define P1_GRP BIT(5) /* CPU/Linux */ + +static int twl4030reg_is_enabled(struct regulator_dev *rdev) +{ + int state = twl4030reg_grp(rdev); + + if (state < 0) + return state; + + return (state & P1_GRP) != 0; +} + +static int twl4030reg_enable(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int grp; + + grp = twl4030reg_read(info, VREG_GRP); + if (grp < 0) + return grp; + + grp |= P1_GRP; + return twl4030reg_write(info, VREG_GRP, grp); +} + +static int twl4030reg_disable(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int grp; + + grp = twl4030reg_read(info, VREG_GRP); + if (grp < 0) + return grp; + + grp &= ~P1_GRP; + return twl4030reg_write(info, VREG_GRP, grp); +} + +static int twl4030reg_get_status(struct regulator_dev *rdev) +{ + int state = twl4030reg_grp(rdev); + + if (state < 0) + return state; + state &= 0x0f; + + /* assume state != WARM_RESET; we'd not be running... */ + if (!state) + return REGULATOR_STATUS_OFF; + return (state & BIT(3)) + ? REGULATOR_STATUS_NORMAL + : REGULATOR_STATUS_STANDBY; +} + +static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + unsigned message; + int status; + + /* We can only set the mode through state machine commands... */ + switch (mode) { + case REGULATOR_MODE_NORMAL: + message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_ACTIVE); + break; + case REGULATOR_MODE_STANDBY: + message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_SLEEP); + break; + default: + return -EINVAL; + } + + /* Ensure the resource is associated with some group */ + status = twl4030reg_grp(rdev); + if (status < 0) + return status; + if (!(status & (P3_GRP | P2_GRP | P1_GRP))) + return -EACCES; + + status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, + message >> 8, 0x15 /* PB_WORD_MSB */ ); + if (status >= 0) + return status; + + return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, + message, 0x16 /* PB_WORD_LSB */ ); +} + +/*----------------------------------------------------------------------*/ + +/* + * Support for adjustable-voltage LDOs uses a four bit (or less) voltage + * select field in its control register. We use tables indexed by VSEL + * to record voltages in milliVolts. (Accuracy is about three percent.) + * + * Note that VSEL values for VAUX2 changed in twl5030 and newer silicon; + * currently handled by listing two slightly different VAUX2 regulators, + * only one of which will be configured. + * + * VSEL values documented as "TI cannot support these values" are flagged + * in these tables as UNSUP() values; we normally won't assign them. + * + * VAUX3 at 3V is incorrectly listed in some TI manuals as unsupported. + * TI are revising the twl5030/tps659x0 specs to support that 3.0V setting. + */ +#ifdef CONFIG_TWL4030_ALLOW_UNSUPPORTED +#define UNSUP_MASK 0x0000 +#else +#define UNSUP_MASK 0x8000 +#endif + +#define UNSUP(x) (UNSUP_MASK | (x)) +#define IS_UNSUP(x) (UNSUP_MASK & (x)) +#define LDO_MV(x) (~UNSUP_MASK & (x)) + + +static const u16 VAUX1_VSEL_table[] = { + UNSUP(1500), UNSUP(1800), 2500, 2800, + 3000, 3000, 3000, 3000, +}; +static const u16 VAUX2_4030_VSEL_table[] = { + UNSUP(1000), UNSUP(1000), UNSUP(1200), 1300, + 1500, 1800, UNSUP(1850), 2500, + UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), + UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), +}; +static const u16 VAUX2_VSEL_table[] = { + 1700, 1700, 1900, 1300, + 1500, 1800, 2000, 2500, + 2100, 2800, 2200, 2300, + 2400, 2400, 2400, 2400, +}; +static const u16 VAUX3_VSEL_table[] = { + 1500, 1800, 2500, 2800, + 3000, 3000, 3000, 3000, +}; +static const u16 VAUX4_VSEL_table[] = { + 700, 1000, 1200, UNSUP(1300), + 1500, 1800, UNSUP(1850), 2500, + UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), + UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), +}; +static const u16 VMMC1_VSEL_table[] = { + 1850, 2850, 3000, 3150, +}; +static const u16 VMMC2_VSEL_table[] = { + UNSUP(1000), UNSUP(1000), UNSUP(1200), UNSUP(1300), + UNSUP(1500), UNSUP(1800), 1850, UNSUP(2500), + 2600, 2800, 2850, 3000, + 3150, 3150, 3150, 3150, +}; +static const u16 VPLL1_VSEL_table[] = { + 1000, 1200, 1300, 1800, + UNSUP(2800), UNSUP(3000), UNSUP(3000), UNSUP(3000), +}; +static const u16 VPLL2_VSEL_table[] = { + 700, 1000, 1200, 1300, + UNSUP(1500), 1800, UNSUP(1850), UNSUP(2500), + UNSUP(2600), UNSUP(2800), UNSUP(2850), UNSUP(3000), + UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), +}; +static const u16 VSIM_VSEL_table[] = { + UNSUP(1000), UNSUP(1200), UNSUP(1300), 1800, + 2800, 3000, 3000, 3000, +}; +static const u16 VDAC_VSEL_table[] = { + 1200, 1300, 1800, 1800, +}; + + +static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int mV = info->table[index]; + + return IS_UNSUP(mV) ? 0 : (LDO_MV(mV) * 1000); +} + +static int +twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel; + + for (vsel = 0; vsel < info->table_len; vsel++) { + int mV = info->table[vsel]; + int uV; + + if (IS_UNSUP(mV)) + continue; + uV = LDO_MV(mV) * 1000; + + /* REVISIT for VAUX2, first match may not be best/lowest */ + + /* use the first in-range value */ + if (min_uV <= uV && uV <= max_uV) + return twl4030reg_write(info, VREG_DEDICATED, vsel); + } + + return -EDOM; +} + +static int twl4030ldo_get_voltage(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel = twl4030reg_read(info, VREG_DEDICATED); + + if (vsel < 0) + return vsel; + + vsel &= info->table_len - 1; + return LDO_MV(info->table[vsel]) * 1000; +} + +static struct regulator_ops twl4030ldo_ops = { + .list_voltage = twl4030ldo_list_voltage, + + .set_voltage = twl4030ldo_set_voltage, + .get_voltage = twl4030ldo_get_voltage, + + .enable = twl4030reg_enable, + .disable = twl4030reg_disable, + .is_enabled = twl4030reg_is_enabled, + + .set_mode = twl4030reg_set_mode, + + .get_status = twl4030reg_get_status, +}; + +/*----------------------------------------------------------------------*/ + +/* + * Fixed voltage LDOs don't have a VSEL field to update. + */ +static int twl4030fixed_list_voltage(struct regulator_dev *rdev, unsigned index) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return info->min_mV * 1000; +} + +static int twl4030fixed_get_voltage(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return info->min_mV * 1000; +} + +static struct regulator_ops twl4030fixed_ops = { + .list_voltage = twl4030fixed_list_voltage, + + .get_voltage = twl4030fixed_get_voltage, + + .enable = twl4030reg_enable, + .disable = twl4030reg_disable, + .is_enabled = twl4030reg_is_enabled, + + .set_mode = twl4030reg_set_mode, + + .get_status = twl4030reg_get_status, +}; + +/*----------------------------------------------------------------------*/ + +#define TWL_ADJUSTABLE_LDO(label, offset, num) { \ + .base = offset, \ + .id = num, \ + .table_len = ARRAY_SIZE(label##_VSEL_table), \ + .table = label##_VSEL_table, \ + .desc = { \ + .name = #label, \ + .id = TWL4030_REG_##label, \ + .n_voltages = ARRAY_SIZE(label##_VSEL_table), \ + .ops = &twl4030ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + +#define TWL_FIXED_LDO(label, offset, mVolts, num) { \ + .base = offset, \ + .id = num, \ + .min_mV = mVolts, \ + .desc = { \ + .name = #label, \ + .id = TWL4030_REG_##label, \ + .n_voltages = 1, \ + .ops = &twl4030fixed_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + +/* + * We list regulators here if systems need some level of + * software control over them after boot. + */ +static struct twlreg_info twl4030_regs[] = { + TWL_ADJUSTABLE_LDO(VAUX1, 0x17, 1), + TWL_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2), + TWL_ADJUSTABLE_LDO(VAUX2, 0x1b, 2), + TWL_ADJUSTABLE_LDO(VAUX3, 0x1f, 3), + TWL_ADJUSTABLE_LDO(VAUX4, 0x23, 4), + TWL_ADJUSTABLE_LDO(VMMC1, 0x27, 5), + TWL_ADJUSTABLE_LDO(VMMC2, 0x2b, 6), + /* + TWL_ADJUSTABLE_LDO(VPLL1, 0x2f, 7), + */ + TWL_ADJUSTABLE_LDO(VPLL2, 0x33, 8), + TWL_ADJUSTABLE_LDO(VSIM, 0x37, 9), + TWL_ADJUSTABLE_LDO(VDAC, 0x3b, 10), + /* + TWL_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11), + TWL_ADJUSTABLE_LDO(VINTANA2, 0x43, 12), + TWL_ADJUSTABLE_LDO(VINTDIG, 0x47, 13), + TWL_SMPS(VIO, 0x4b, 14), + TWL_SMPS(VDD1, 0x55, 15), + TWL_SMPS(VDD2, 0x63, 16), + */ + TWL_FIXED_LDO(VUSB1V5, 0x71, 1500, 17), + TWL_FIXED_LDO(VUSB1V8, 0x74, 1800, 18), + TWL_FIXED_LDO(VUSB3V1, 0x77, 3100, 19), + /* VUSBCP is managed *only* by the USB subchip */ +}; + +static int twl4030reg_probe(struct platform_device *pdev) +{ + int i; + struct twlreg_info *info; + struct regulator_init_data *initdata; + struct regulation_constraints *c; + struct regulator_dev *rdev; + + for (i = 0, info = NULL; i < ARRAY_SIZE(twl4030_regs); i++) { + if (twl4030_regs[i].desc.id != pdev->id) + continue; + info = twl4030_regs + i; + break; + } + if (!info) + return -ENODEV; + + initdata = pdev->dev.platform_data; + if (!initdata) + return -EINVAL; + + /* Constrain board-specific capabilities according to what + * this driver and the chip itself can actually do. + */ + c = &initdata->constraints; + c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY; + c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE + | REGULATOR_CHANGE_MODE + | REGULATOR_CHANGE_STATUS; + + rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, "can't register %s, %ld\n", + info->desc.name, PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + platform_set_drvdata(pdev, rdev); + + /* NOTE: many regulators support short-circuit IRQs (presentable + * as REGULATOR_OVER_CURRENT notifications?) configured via: + * - SC_CONFIG + * - SC_DETECT1 (vintana2, vmmc1/2, vaux1/2/3/4) + * - SC_DETECT2 (vusb, vdac, vio, vdd1/2, vpll2) + * - IT_CONFIG + */ + + return 0; +} + +static int __devexit twl4030reg_remove(struct platform_device *pdev) +{ + regulator_unregister(platform_get_drvdata(pdev)); + return 0; +} + +MODULE_ALIAS("platform:twl4030_reg"); + +static struct platform_driver twl4030reg_driver = { + .probe = twl4030reg_probe, + .remove = __devexit_p(twl4030reg_remove), + /* NOTE: short name, to work around driver model truncation of + * "twl4030_regulator.12" (and friends) to "twl4030_regulator.1". + */ + .driver.name = "twl4030_reg", + .driver.owner = THIS_MODULE, +}; + +static int __init twl4030reg_init(void) +{ + return platform_driver_register(&twl4030reg_driver); +} +subsys_initcall(twl4030reg_init); + +static void __exit twl4030reg_exit(void) +{ + platform_driver_unregister(&twl4030reg_driver); +} +module_exit(twl4030reg_exit) + +MODULE_DESCRIPTION("TWL4030 regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/twl4030-regulator.c b/drivers/regulator/twl4030-regulator.c deleted file mode 100644 index e2032fb60b55..000000000000 --- a/drivers/regulator/twl4030-regulator.c +++ /dev/null @@ -1,500 +0,0 @@ -/* - * twl4030-regulator.c -- support regulators in twl4030 family chips - * - * Copyright (C) 2008 David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include - - -/* - * The TWL4030/TW5030/TPS659x0 family chips include power management, a - * USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions - * include an audio codec, battery charger, and more voltage regulators. - * These chips are often used in OMAP-based systems. - * - * This driver implements software-based resource control for various - * voltage regulators. This is usually augmented with state machine - * based control. - */ - -struct twlreg_info { - /* start of regulator's PM_RECEIVER control register bank */ - u8 base; - - /* twl4030 resource ID, for resource control state machine */ - u8 id; - - /* voltage in mV = table[VSEL]; table_len must be a power-of-two */ - u8 table_len; - const u16 *table; - - /* chip constraints on regulator behavior */ - u16 min_mV; - - /* used by regulator core */ - struct regulator_desc desc; -}; - - -/* LDO control registers ... offset is from the base of its register bank. - * The first three registers of all power resource banks help hardware to - * manage the various resource groups. - */ -#define VREG_GRP 0 -#define VREG_TYPE 1 -#define VREG_REMAP 2 -#define VREG_DEDICATED 3 /* LDO control */ - - -static inline int -twl4030reg_read(struct twlreg_info *info, unsigned offset) -{ - u8 value; - int status; - - status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, - &value, info->base + offset); - return (status < 0) ? status : value; -} - -static inline int -twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value) -{ - return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, - value, info->base + offset); -} - -/*----------------------------------------------------------------------*/ - -/* generic power resource operations, which work on all regulators */ - -static int twl4030reg_grp(struct regulator_dev *rdev) -{ - return twl4030reg_read(rdev_get_drvdata(rdev), VREG_GRP); -} - -/* - * Enable/disable regulators by joining/leaving the P1 (processor) group. - * We assume nobody else is updating the DEV_GRP registers. - */ - -#define P3_GRP BIT(7) /* "peripherals" */ -#define P2_GRP BIT(6) /* secondary processor, modem, etc */ -#define P1_GRP BIT(5) /* CPU/Linux */ - -static int twl4030reg_is_enabled(struct regulator_dev *rdev) -{ - int state = twl4030reg_grp(rdev); - - if (state < 0) - return state; - - return (state & P1_GRP) != 0; -} - -static int twl4030reg_enable(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int grp; - - grp = twl4030reg_read(info, VREG_GRP); - if (grp < 0) - return grp; - - grp |= P1_GRP; - return twl4030reg_write(info, VREG_GRP, grp); -} - -static int twl4030reg_disable(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int grp; - - grp = twl4030reg_read(info, VREG_GRP); - if (grp < 0) - return grp; - - grp &= ~P1_GRP; - return twl4030reg_write(info, VREG_GRP, grp); -} - -static int twl4030reg_get_status(struct regulator_dev *rdev) -{ - int state = twl4030reg_grp(rdev); - - if (state < 0) - return state; - state &= 0x0f; - - /* assume state != WARM_RESET; we'd not be running... */ - if (!state) - return REGULATOR_STATUS_OFF; - return (state & BIT(3)) - ? REGULATOR_STATUS_NORMAL - : REGULATOR_STATUS_STANDBY; -} - -static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - unsigned message; - int status; - - /* We can only set the mode through state machine commands... */ - switch (mode) { - case REGULATOR_MODE_NORMAL: - message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_ACTIVE); - break; - case REGULATOR_MODE_STANDBY: - message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_SLEEP); - break; - default: - return -EINVAL; - } - - /* Ensure the resource is associated with some group */ - status = twl4030reg_grp(rdev); - if (status < 0) - return status; - if (!(status & (P3_GRP | P2_GRP | P1_GRP))) - return -EACCES; - - status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, - message >> 8, 0x15 /* PB_WORD_MSB */ ); - if (status >= 0) - return status; - - return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, - message, 0x16 /* PB_WORD_LSB */ ); -} - -/*----------------------------------------------------------------------*/ - -/* - * Support for adjustable-voltage LDOs uses a four bit (or less) voltage - * select field in its control register. We use tables indexed by VSEL - * to record voltages in milliVolts. (Accuracy is about three percent.) - * - * Note that VSEL values for VAUX2 changed in twl5030 and newer silicon; - * currently handled by listing two slightly different VAUX2 regulators, - * only one of which will be configured. - * - * VSEL values documented as "TI cannot support these values" are flagged - * in these tables as UNSUP() values; we normally won't assign them. - * - * VAUX3 at 3V is incorrectly listed in some TI manuals as unsupported. - * TI are revising the twl5030/tps659x0 specs to support that 3.0V setting. - */ -#ifdef CONFIG_TWL4030_ALLOW_UNSUPPORTED -#define UNSUP_MASK 0x0000 -#else -#define UNSUP_MASK 0x8000 -#endif - -#define UNSUP(x) (UNSUP_MASK | (x)) -#define IS_UNSUP(x) (UNSUP_MASK & (x)) -#define LDO_MV(x) (~UNSUP_MASK & (x)) - - -static const u16 VAUX1_VSEL_table[] = { - UNSUP(1500), UNSUP(1800), 2500, 2800, - 3000, 3000, 3000, 3000, -}; -static const u16 VAUX2_4030_VSEL_table[] = { - UNSUP(1000), UNSUP(1000), UNSUP(1200), 1300, - 1500, 1800, UNSUP(1850), 2500, - UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), - UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), -}; -static const u16 VAUX2_VSEL_table[] = { - 1700, 1700, 1900, 1300, - 1500, 1800, 2000, 2500, - 2100, 2800, 2200, 2300, - 2400, 2400, 2400, 2400, -}; -static const u16 VAUX3_VSEL_table[] = { - 1500, 1800, 2500, 2800, - 3000, 3000, 3000, 3000, -}; -static const u16 VAUX4_VSEL_table[] = { - 700, 1000, 1200, UNSUP(1300), - 1500, 1800, UNSUP(1850), 2500, - UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), - UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), -}; -static const u16 VMMC1_VSEL_table[] = { - 1850, 2850, 3000, 3150, -}; -static const u16 VMMC2_VSEL_table[] = { - UNSUP(1000), UNSUP(1000), UNSUP(1200), UNSUP(1300), - UNSUP(1500), UNSUP(1800), 1850, UNSUP(2500), - 2600, 2800, 2850, 3000, - 3150, 3150, 3150, 3150, -}; -static const u16 VPLL1_VSEL_table[] = { - 1000, 1200, 1300, 1800, - UNSUP(2800), UNSUP(3000), UNSUP(3000), UNSUP(3000), -}; -static const u16 VPLL2_VSEL_table[] = { - 700, 1000, 1200, 1300, - UNSUP(1500), 1800, UNSUP(1850), UNSUP(2500), - UNSUP(2600), UNSUP(2800), UNSUP(2850), UNSUP(3000), - UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), -}; -static const u16 VSIM_VSEL_table[] = { - UNSUP(1000), UNSUP(1200), UNSUP(1300), 1800, - 2800, 3000, 3000, 3000, -}; -static const u16 VDAC_VSEL_table[] = { - 1200, 1300, 1800, 1800, -}; - - -static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int mV = info->table[index]; - - return IS_UNSUP(mV) ? 0 : (LDO_MV(mV) * 1000); -} - -static int -twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int vsel; - - for (vsel = 0; vsel < info->table_len; vsel++) { - int mV = info->table[vsel]; - int uV; - - if (IS_UNSUP(mV)) - continue; - uV = LDO_MV(mV) * 1000; - - /* REVISIT for VAUX2, first match may not be best/lowest */ - - /* use the first in-range value */ - if (min_uV <= uV && uV <= max_uV) - return twl4030reg_write(info, VREG_DEDICATED, vsel); - } - - return -EDOM; -} - -static int twl4030ldo_get_voltage(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - int vsel = twl4030reg_read(info, VREG_DEDICATED); - - if (vsel < 0) - return vsel; - - vsel &= info->table_len - 1; - return LDO_MV(info->table[vsel]) * 1000; -} - -static struct regulator_ops twl4030ldo_ops = { - .list_voltage = twl4030ldo_list_voltage, - - .set_voltage = twl4030ldo_set_voltage, - .get_voltage = twl4030ldo_get_voltage, - - .enable = twl4030reg_enable, - .disable = twl4030reg_disable, - .is_enabled = twl4030reg_is_enabled, - - .set_mode = twl4030reg_set_mode, - - .get_status = twl4030reg_get_status, -}; - -/*----------------------------------------------------------------------*/ - -/* - * Fixed voltage LDOs don't have a VSEL field to update. - */ -static int twl4030fixed_list_voltage(struct regulator_dev *rdev, unsigned index) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - return info->min_mV * 1000; -} - -static int twl4030fixed_get_voltage(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - return info->min_mV * 1000; -} - -static struct regulator_ops twl4030fixed_ops = { - .list_voltage = twl4030fixed_list_voltage, - - .get_voltage = twl4030fixed_get_voltage, - - .enable = twl4030reg_enable, - .disable = twl4030reg_disable, - .is_enabled = twl4030reg_is_enabled, - - .set_mode = twl4030reg_set_mode, - - .get_status = twl4030reg_get_status, -}; - -/*----------------------------------------------------------------------*/ - -#define TWL_ADJUSTABLE_LDO(label, offset, num) { \ - .base = offset, \ - .id = num, \ - .table_len = ARRAY_SIZE(label##_VSEL_table), \ - .table = label##_VSEL_table, \ - .desc = { \ - .name = #label, \ - .id = TWL4030_REG_##label, \ - .n_voltages = ARRAY_SIZE(label##_VSEL_table), \ - .ops = &twl4030ldo_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - }, \ - } - -#define TWL_FIXED_LDO(label, offset, mVolts, num) { \ - .base = offset, \ - .id = num, \ - .min_mV = mVolts, \ - .desc = { \ - .name = #label, \ - .id = TWL4030_REG_##label, \ - .n_voltages = 1, \ - .ops = &twl4030fixed_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - }, \ - } - -/* - * We list regulators here if systems need some level of - * software control over them after boot. - */ -static struct twlreg_info twl4030_regs[] = { - TWL_ADJUSTABLE_LDO(VAUX1, 0x17, 1), - TWL_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2), - TWL_ADJUSTABLE_LDO(VAUX2, 0x1b, 2), - TWL_ADJUSTABLE_LDO(VAUX3, 0x1f, 3), - TWL_ADJUSTABLE_LDO(VAUX4, 0x23, 4), - TWL_ADJUSTABLE_LDO(VMMC1, 0x27, 5), - TWL_ADJUSTABLE_LDO(VMMC2, 0x2b, 6), - /* - TWL_ADJUSTABLE_LDO(VPLL1, 0x2f, 7), - */ - TWL_ADJUSTABLE_LDO(VPLL2, 0x33, 8), - TWL_ADJUSTABLE_LDO(VSIM, 0x37, 9), - TWL_ADJUSTABLE_LDO(VDAC, 0x3b, 10), - /* - TWL_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11), - TWL_ADJUSTABLE_LDO(VINTANA2, 0x43, 12), - TWL_ADJUSTABLE_LDO(VINTDIG, 0x47, 13), - TWL_SMPS(VIO, 0x4b, 14), - TWL_SMPS(VDD1, 0x55, 15), - TWL_SMPS(VDD2, 0x63, 16), - */ - TWL_FIXED_LDO(VUSB1V5, 0x71, 1500, 17), - TWL_FIXED_LDO(VUSB1V8, 0x74, 1800, 18), - TWL_FIXED_LDO(VUSB3V1, 0x77, 3100, 19), - /* VUSBCP is managed *only* by the USB subchip */ -}; - -static int twl4030reg_probe(struct platform_device *pdev) -{ - int i; - struct twlreg_info *info; - struct regulator_init_data *initdata; - struct regulation_constraints *c; - struct regulator_dev *rdev; - - for (i = 0, info = NULL; i < ARRAY_SIZE(twl4030_regs); i++) { - if (twl4030_regs[i].desc.id != pdev->id) - continue; - info = twl4030_regs + i; - break; - } - if (!info) - return -ENODEV; - - initdata = pdev->dev.platform_data; - if (!initdata) - return -EINVAL; - - /* Constrain board-specific capabilities according to what - * this driver and the chip itself can actually do. - */ - c = &initdata->constraints; - c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY; - c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE - | REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS; - - rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); - if (IS_ERR(rdev)) { - dev_err(&pdev->dev, "can't register %s, %ld\n", - info->desc.name, PTR_ERR(rdev)); - return PTR_ERR(rdev); - } - platform_set_drvdata(pdev, rdev); - - /* NOTE: many regulators support short-circuit IRQs (presentable - * as REGULATOR_OVER_CURRENT notifications?) configured via: - * - SC_CONFIG - * - SC_DETECT1 (vintana2, vmmc1/2, vaux1/2/3/4) - * - SC_DETECT2 (vusb, vdac, vio, vdd1/2, vpll2) - * - IT_CONFIG - */ - - return 0; -} - -static int __devexit twl4030reg_remove(struct platform_device *pdev) -{ - regulator_unregister(platform_get_drvdata(pdev)); - return 0; -} - -MODULE_ALIAS("platform:twl4030_reg"); - -static struct platform_driver twl4030reg_driver = { - .probe = twl4030reg_probe, - .remove = __devexit_p(twl4030reg_remove), - /* NOTE: short name, to work around driver model truncation of - * "twl4030_regulator.12" (and friends) to "twl4030_regulator.1". - */ - .driver.name = "twl4030_reg", - .driver.owner = THIS_MODULE, -}; - -static int __init twl4030reg_init(void) -{ - return platform_driver_register(&twl4030reg_driver); -} -subsys_initcall(twl4030reg_init); - -static void __exit twl4030reg_exit(void) -{ - platform_driver_unregister(&twl4030reg_driver); -} -module_exit(twl4030reg_exit) - -MODULE_DESCRIPTION("TWL4030 regulator driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index af1ba7ae2857..7da6efb3e953 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -80,7 +80,7 @@ obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o -obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o +obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c new file mode 100644 index 000000000000..93565be12fae --- /dev/null +++ b/drivers/rtc/rtc-twl.c @@ -0,0 +1,540 @@ +/* + * rtc-twl4030.c -- TWL4030 Real Time Clock interface + * + * Copyright (C) 2007 MontaVista Software, Inc + * Author: Alexandre Rusev + * + * Based on original TI driver twl4030-rtc.c + * Copyright (C) 2006 Texas Instruments, Inc. + * + * Based on rtc-omap.c + * Copyright (C) 2003 MontaVista Software, Inc. + * Author: George G. Davis or + * Copyright (C) 2006 David Brownell + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +/* + * RTC block register offsets (use TWL_MODULE_RTC) + */ +#define REG_SECONDS_REG 0x00 +#define REG_MINUTES_REG 0x01 +#define REG_HOURS_REG 0x02 +#define REG_DAYS_REG 0x03 +#define REG_MONTHS_REG 0x04 +#define REG_YEARS_REG 0x05 +#define REG_WEEKS_REG 0x06 + +#define REG_ALARM_SECONDS_REG 0x07 +#define REG_ALARM_MINUTES_REG 0x08 +#define REG_ALARM_HOURS_REG 0x09 +#define REG_ALARM_DAYS_REG 0x0A +#define REG_ALARM_MONTHS_REG 0x0B +#define REG_ALARM_YEARS_REG 0x0C + +#define REG_RTC_CTRL_REG 0x0D +#define REG_RTC_STATUS_REG 0x0E +#define REG_RTC_INTERRUPTS_REG 0x0F + +#define REG_RTC_COMP_LSB_REG 0x10 +#define REG_RTC_COMP_MSB_REG 0x11 + +/* RTC_CTRL_REG bitfields */ +#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01 +#define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02 +#define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04 +#define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08 +#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10 +#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20 +#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40 + +/* RTC_STATUS_REG bitfields */ +#define BIT_RTC_STATUS_REG_RUN_M 0x02 +#define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04 +#define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08 +#define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10 +#define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20 +#define BIT_RTC_STATUS_REG_ALARM_M 0x40 +#define BIT_RTC_STATUS_REG_POWER_UP_M 0x80 + +/* RTC_INTERRUPTS_REG bitfields */ +#define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03 +#define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04 +#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08 + + +/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */ +#define ALL_TIME_REGS 6 + +/*----------------------------------------------------------------------*/ + +/* + * Supports 1 byte read from TWL4030 RTC register. + */ +static int twl4030_rtc_read_u8(u8 *data, u8 reg) +{ + int ret; + + ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg); + if (ret < 0) + pr_err("twl4030_rtc: Could not read TWL4030" + "register %X - error %d\n", reg, ret); + return ret; +} + +/* + * Supports 1 byte write to TWL4030 RTC registers. + */ +static int twl4030_rtc_write_u8(u8 data, u8 reg) +{ + int ret; + + ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg); + if (ret < 0) + pr_err("twl4030_rtc: Could not write TWL4030" + "register %X - error %d\n", reg, ret); + return ret; +} + +/* + * Cache the value for timer/alarm interrupts register; this is + * only changed by callers holding rtc ops lock (or resume). + */ +static unsigned char rtc_irq_bits; + +/* + * Enable 1/second update and/or alarm interrupts. + */ +static int set_rtc_irq_bit(unsigned char bit) +{ + unsigned char val; + int ret; + + val = rtc_irq_bits | bit; + val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M; + ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); + if (ret == 0) + rtc_irq_bits = val; + + return ret; +} + +/* + * Disable update and/or alarm interrupts. + */ +static int mask_rtc_irq_bit(unsigned char bit) +{ + unsigned char val; + int ret; + + val = rtc_irq_bits & ~bit; + ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); + if (ret == 0) + rtc_irq_bits = val; + + return ret; +} + +static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) +{ + int ret; + + if (enabled) + ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); + else + ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); + + return ret; +} + +static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled) +{ + int ret; + + if (enabled) + ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); + else + ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); + + return ret; +} + +/* + * Gets current TWL4030 RTC time and date parameters. + * + * The RTC's time/alarm representation is not what gmtime(3) requires + * Linux to use: + * + * - Months are 1..12 vs Linux 0-11 + * - Years are 0..99 vs Linux 1900..N (we assume 21st century) + */ +static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char rtc_data[ALL_TIME_REGS + 1]; + int ret; + u8 save_control; + + ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); + if (ret < 0) + return ret; + + save_control |= BIT_RTC_CTRL_REG_GET_TIME_M; + + ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); + if (ret < 0) + return ret; + + ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, + REG_SECONDS_REG, ALL_TIME_REGS); + + if (ret < 0) { + dev_err(dev, "rtc_read_time error %d\n", ret); + return ret; + } + + tm->tm_sec = bcd2bin(rtc_data[0]); + tm->tm_min = bcd2bin(rtc_data[1]); + tm->tm_hour = bcd2bin(rtc_data[2]); + tm->tm_mday = bcd2bin(rtc_data[3]); + tm->tm_mon = bcd2bin(rtc_data[4]) - 1; + tm->tm_year = bcd2bin(rtc_data[5]) + 100; + + return ret; +} + +static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char save_control; + unsigned char rtc_data[ALL_TIME_REGS + 1]; + int ret; + + rtc_data[1] = bin2bcd(tm->tm_sec); + rtc_data[2] = bin2bcd(tm->tm_min); + rtc_data[3] = bin2bcd(tm->tm_hour); + rtc_data[4] = bin2bcd(tm->tm_mday); + rtc_data[5] = bin2bcd(tm->tm_mon + 1); + rtc_data[6] = bin2bcd(tm->tm_year - 100); + + /* Stop RTC while updating the TC registers */ + ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); + if (ret < 0) + goto out; + + save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M; + twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); + if (ret < 0) + goto out; + + /* update all the time registers in one shot */ + ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data, + REG_SECONDS_REG, ALL_TIME_REGS); + if (ret < 0) { + dev_err(dev, "rtc_set_time error %d\n", ret); + goto out; + } + + /* Start back RTC */ + save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M; + ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); + +out: + return ret; +} + +/* + * Gets current TWL4030 RTC alarm time. + */ +static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + unsigned char rtc_data[ALL_TIME_REGS + 1]; + int ret; + + ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, + REG_ALARM_SECONDS_REG, ALL_TIME_REGS); + if (ret < 0) { + dev_err(dev, "rtc_read_alarm error %d\n", ret); + return ret; + } + + /* some of these fields may be wildcard/"match all" */ + alm->time.tm_sec = bcd2bin(rtc_data[0]); + alm->time.tm_min = bcd2bin(rtc_data[1]); + alm->time.tm_hour = bcd2bin(rtc_data[2]); + alm->time.tm_mday = bcd2bin(rtc_data[3]); + alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1; + alm->time.tm_year = bcd2bin(rtc_data[5]) + 100; + + /* report cached alarm enable state */ + if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M) + alm->enabled = 1; + + return ret; +} + +static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + unsigned char alarm_data[ALL_TIME_REGS + 1]; + int ret; + + ret = twl4030_rtc_alarm_irq_enable(dev, 0); + if (ret) + goto out; + + alarm_data[1] = bin2bcd(alm->time.tm_sec); + alarm_data[2] = bin2bcd(alm->time.tm_min); + alarm_data[3] = bin2bcd(alm->time.tm_hour); + alarm_data[4] = bin2bcd(alm->time.tm_mday); + alarm_data[5] = bin2bcd(alm->time.tm_mon + 1); + alarm_data[6] = bin2bcd(alm->time.tm_year - 100); + + /* update all the alarm registers in one shot */ + ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data, + REG_ALARM_SECONDS_REG, ALL_TIME_REGS); + if (ret) { + dev_err(dev, "rtc_set_alarm error %d\n", ret); + goto out; + } + + if (alm->enabled) + ret = twl4030_rtc_alarm_irq_enable(dev, 1); +out: + return ret; +} + +static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) +{ + unsigned long events = 0; + int ret = IRQ_NONE; + int res; + u8 rd_reg; + +#ifdef CONFIG_LOCKDEP + /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which + * we don't want and can't tolerate. Although it might be + * friendlier not to borrow this thread context... + */ + local_irq_enable(); +#endif + + res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); + if (res) + goto out; + /* + * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG. + * only one (ALARM or RTC) interrupt source may be enabled + * at time, we also could check our results + * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM] + */ + if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) + events |= RTC_IRQF | RTC_AF; + else + events |= RTC_IRQF | RTC_UF; + + res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M, + REG_RTC_STATUS_REG); + if (res) + goto out; + + /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1 + * needs 2 reads to clear the interrupt. One read is done in + * do_twl4030_pwrirq(). Doing the second read, to clear + * the bit. + * + * FIXME the reason PWR_ISR1 needs an extra read is that + * RTC_IF retriggered until we cleared REG_ALARM_M above. + * But re-reading like this is a bad hack; by doing so we + * risk wrongly clearing status for some other IRQ (losing + * the interrupt). Be smarter about handling RTC_UF ... + */ + res = twl4030_i2c_read_u8(TWL4030_MODULE_INT, + &rd_reg, TWL4030_INT_PWR_ISR1); + if (res) + goto out; + + /* Notify RTC core on event */ + rtc_update_irq(rtc, 1, events); + + ret = IRQ_HANDLED; +out: + return ret; +} + +static struct rtc_class_ops twl4030_rtc_ops = { + .read_time = twl4030_rtc_read_time, + .set_time = twl4030_rtc_set_time, + .read_alarm = twl4030_rtc_read_alarm, + .set_alarm = twl4030_rtc_set_alarm, + .alarm_irq_enable = twl4030_rtc_alarm_irq_enable, + .update_irq_enable = twl4030_rtc_update_irq_enable, +}; + +/*----------------------------------------------------------------------*/ + +static int __devinit twl4030_rtc_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + int ret = 0; + int irq = platform_get_irq(pdev, 0); + u8 rd_reg; + + if (irq <= 0) + return -EINVAL; + + rtc = rtc_device_register(pdev->name, + &pdev->dev, &twl4030_rtc_ops, THIS_MODULE); + if (IS_ERR(rtc)) { + ret = PTR_ERR(rtc); + dev_err(&pdev->dev, "can't register RTC device, err %ld\n", + PTR_ERR(rtc)); + goto out0; + + } + + platform_set_drvdata(pdev, rtc); + + ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); + if (ret < 0) + goto out1; + + if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M) + dev_warn(&pdev->dev, "Power up reset detected.\n"); + + if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) + dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n"); + + /* Clear RTC Power up reset and pending alarm interrupts */ + ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); + if (ret < 0) + goto out1; + + ret = request_irq(irq, twl4030_rtc_interrupt, + IRQF_TRIGGER_RISING, + dev_name(&rtc->dev), rtc); + if (ret < 0) { + dev_err(&pdev->dev, "IRQ is not free.\n"); + goto out1; + } + + /* Check RTC module status, Enable if it is off */ + ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); + if (ret < 0) + goto out2; + + if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { + dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n"); + rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; + ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); + if (ret < 0) + goto out2; + } + + /* init cached IRQ enable bits */ + ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); + if (ret < 0) + goto out2; + + return ret; + +out2: + free_irq(irq, rtc); +out1: + rtc_device_unregister(rtc); +out0: + return ret; +} + +/* + * Disable all TWL4030 RTC module interrupts. + * Sets status flag to free. + */ +static int __devexit twl4030_rtc_remove(struct platform_device *pdev) +{ + /* leave rtc running, but disable irqs */ + struct rtc_device *rtc = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); + mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); + + free_irq(irq, rtc); + + rtc_device_unregister(rtc); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static void twl4030_rtc_shutdown(struct platform_device *pdev) +{ + /* mask timer interrupts, but leave alarm interrupts on to enable + power-on when alarm is triggered */ + mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); +} + +#ifdef CONFIG_PM + +static unsigned char irqstat; + +static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state) +{ + irqstat = rtc_irq_bits; + + mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); + return 0; +} + +static int twl4030_rtc_resume(struct platform_device *pdev) +{ + set_rtc_irq_bit(irqstat); + return 0; +} + +#else +#define twl4030_rtc_suspend NULL +#define twl4030_rtc_resume NULL +#endif + +MODULE_ALIAS("platform:twl4030_rtc"); + +static struct platform_driver twl4030rtc_driver = { + .probe = twl4030_rtc_probe, + .remove = __devexit_p(twl4030_rtc_remove), + .shutdown = twl4030_rtc_shutdown, + .suspend = twl4030_rtc_suspend, + .resume = twl4030_rtc_resume, + .driver = { + .owner = THIS_MODULE, + .name = "twl4030_rtc", + }, +}; + +static int __init twl4030_rtc_init(void) +{ + return platform_driver_register(&twl4030rtc_driver); +} +module_init(twl4030_rtc_init); + +static void __exit twl4030_rtc_exit(void) +{ + platform_driver_unregister(&twl4030rtc_driver); +} +module_exit(twl4030_rtc_exit); + +MODULE_AUTHOR("Texas Instruments, MontaVista Software"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c deleted file mode 100644 index 9c8c70c497dc..000000000000 --- a/drivers/rtc/rtc-twl4030.c +++ /dev/null @@ -1,540 +0,0 @@ -/* - * rtc-twl4030.c -- TWL4030 Real Time Clock interface - * - * Copyright (C) 2007 MontaVista Software, Inc - * Author: Alexandre Rusev - * - * Based on original TI driver twl4030-rtc.c - * Copyright (C) 2006 Texas Instruments, Inc. - * - * Based on rtc-omap.c - * Copyright (C) 2003 MontaVista Software, Inc. - * Author: George G. Davis or - * Copyright (C) 2006 David Brownell - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - - -/* - * RTC block register offsets (use TWL_MODULE_RTC) - */ -#define REG_SECONDS_REG 0x00 -#define REG_MINUTES_REG 0x01 -#define REG_HOURS_REG 0x02 -#define REG_DAYS_REG 0x03 -#define REG_MONTHS_REG 0x04 -#define REG_YEARS_REG 0x05 -#define REG_WEEKS_REG 0x06 - -#define REG_ALARM_SECONDS_REG 0x07 -#define REG_ALARM_MINUTES_REG 0x08 -#define REG_ALARM_HOURS_REG 0x09 -#define REG_ALARM_DAYS_REG 0x0A -#define REG_ALARM_MONTHS_REG 0x0B -#define REG_ALARM_YEARS_REG 0x0C - -#define REG_RTC_CTRL_REG 0x0D -#define REG_RTC_STATUS_REG 0x0E -#define REG_RTC_INTERRUPTS_REG 0x0F - -#define REG_RTC_COMP_LSB_REG 0x10 -#define REG_RTC_COMP_MSB_REG 0x11 - -/* RTC_CTRL_REG bitfields */ -#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01 -#define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02 -#define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04 -#define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08 -#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10 -#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20 -#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40 - -/* RTC_STATUS_REG bitfields */ -#define BIT_RTC_STATUS_REG_RUN_M 0x02 -#define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04 -#define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08 -#define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10 -#define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20 -#define BIT_RTC_STATUS_REG_ALARM_M 0x40 -#define BIT_RTC_STATUS_REG_POWER_UP_M 0x80 - -/* RTC_INTERRUPTS_REG bitfields */ -#define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03 -#define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04 -#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08 - - -/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */ -#define ALL_TIME_REGS 6 - -/*----------------------------------------------------------------------*/ - -/* - * Supports 1 byte read from TWL4030 RTC register. - */ -static int twl4030_rtc_read_u8(u8 *data, u8 reg) -{ - int ret; - - ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg); - if (ret < 0) - pr_err("twl4030_rtc: Could not read TWL4030" - "register %X - error %d\n", reg, ret); - return ret; -} - -/* - * Supports 1 byte write to TWL4030 RTC registers. - */ -static int twl4030_rtc_write_u8(u8 data, u8 reg) -{ - int ret; - - ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg); - if (ret < 0) - pr_err("twl4030_rtc: Could not write TWL4030" - "register %X - error %d\n", reg, ret); - return ret; -} - -/* - * Cache the value for timer/alarm interrupts register; this is - * only changed by callers holding rtc ops lock (or resume). - */ -static unsigned char rtc_irq_bits; - -/* - * Enable 1/second update and/or alarm interrupts. - */ -static int set_rtc_irq_bit(unsigned char bit) -{ - unsigned char val; - int ret; - - val = rtc_irq_bits | bit; - val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M; - ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); - if (ret == 0) - rtc_irq_bits = val; - - return ret; -} - -/* - * Disable update and/or alarm interrupts. - */ -static int mask_rtc_irq_bit(unsigned char bit) -{ - unsigned char val; - int ret; - - val = rtc_irq_bits & ~bit; - ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); - if (ret == 0) - rtc_irq_bits = val; - - return ret; -} - -static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) -{ - int ret; - - if (enabled) - ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); - else - ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); - - return ret; -} - -static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled) -{ - int ret; - - if (enabled) - ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); - else - ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); - - return ret; -} - -/* - * Gets current TWL4030 RTC time and date parameters. - * - * The RTC's time/alarm representation is not what gmtime(3) requires - * Linux to use: - * - * - Months are 1..12 vs Linux 0-11 - * - Years are 0..99 vs Linux 1900..N (we assume 21st century) - */ -static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm) -{ - unsigned char rtc_data[ALL_TIME_REGS + 1]; - int ret; - u8 save_control; - - ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); - if (ret < 0) - return ret; - - save_control |= BIT_RTC_CTRL_REG_GET_TIME_M; - - ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); - if (ret < 0) - return ret; - - ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, - REG_SECONDS_REG, ALL_TIME_REGS); - - if (ret < 0) { - dev_err(dev, "rtc_read_time error %d\n", ret); - return ret; - } - - tm->tm_sec = bcd2bin(rtc_data[0]); - tm->tm_min = bcd2bin(rtc_data[1]); - tm->tm_hour = bcd2bin(rtc_data[2]); - tm->tm_mday = bcd2bin(rtc_data[3]); - tm->tm_mon = bcd2bin(rtc_data[4]) - 1; - tm->tm_year = bcd2bin(rtc_data[5]) + 100; - - return ret; -} - -static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm) -{ - unsigned char save_control; - unsigned char rtc_data[ALL_TIME_REGS + 1]; - int ret; - - rtc_data[1] = bin2bcd(tm->tm_sec); - rtc_data[2] = bin2bcd(tm->tm_min); - rtc_data[3] = bin2bcd(tm->tm_hour); - rtc_data[4] = bin2bcd(tm->tm_mday); - rtc_data[5] = bin2bcd(tm->tm_mon + 1); - rtc_data[6] = bin2bcd(tm->tm_year - 100); - - /* Stop RTC while updating the TC registers */ - ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); - if (ret < 0) - goto out; - - save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M; - twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); - if (ret < 0) - goto out; - - /* update all the time registers in one shot */ - ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data, - REG_SECONDS_REG, ALL_TIME_REGS); - if (ret < 0) { - dev_err(dev, "rtc_set_time error %d\n", ret); - goto out; - } - - /* Start back RTC */ - save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M; - ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG); - -out: - return ret; -} - -/* - * Gets current TWL4030 RTC alarm time. - */ -static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) -{ - unsigned char rtc_data[ALL_TIME_REGS + 1]; - int ret; - - ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, - REG_ALARM_SECONDS_REG, ALL_TIME_REGS); - if (ret < 0) { - dev_err(dev, "rtc_read_alarm error %d\n", ret); - return ret; - } - - /* some of these fields may be wildcard/"match all" */ - alm->time.tm_sec = bcd2bin(rtc_data[0]); - alm->time.tm_min = bcd2bin(rtc_data[1]); - alm->time.tm_hour = bcd2bin(rtc_data[2]); - alm->time.tm_mday = bcd2bin(rtc_data[3]); - alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1; - alm->time.tm_year = bcd2bin(rtc_data[5]) + 100; - - /* report cached alarm enable state */ - if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M) - alm->enabled = 1; - - return ret; -} - -static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) -{ - unsigned char alarm_data[ALL_TIME_REGS + 1]; - int ret; - - ret = twl4030_rtc_alarm_irq_enable(dev, 0); - if (ret) - goto out; - - alarm_data[1] = bin2bcd(alm->time.tm_sec); - alarm_data[2] = bin2bcd(alm->time.tm_min); - alarm_data[3] = bin2bcd(alm->time.tm_hour); - alarm_data[4] = bin2bcd(alm->time.tm_mday); - alarm_data[5] = bin2bcd(alm->time.tm_mon + 1); - alarm_data[6] = bin2bcd(alm->time.tm_year - 100); - - /* update all the alarm registers in one shot */ - ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data, - REG_ALARM_SECONDS_REG, ALL_TIME_REGS); - if (ret) { - dev_err(dev, "rtc_set_alarm error %d\n", ret); - goto out; - } - - if (alm->enabled) - ret = twl4030_rtc_alarm_irq_enable(dev, 1); -out: - return ret; -} - -static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) -{ - unsigned long events = 0; - int ret = IRQ_NONE; - int res; - u8 rd_reg; - -#ifdef CONFIG_LOCKDEP - /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which - * we don't want and can't tolerate. Although it might be - * friendlier not to borrow this thread context... - */ - local_irq_enable(); -#endif - - res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); - if (res) - goto out; - /* - * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG. - * only one (ALARM or RTC) interrupt source may be enabled - * at time, we also could check our results - * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM] - */ - if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) - events |= RTC_IRQF | RTC_AF; - else - events |= RTC_IRQF | RTC_UF; - - res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M, - REG_RTC_STATUS_REG); - if (res) - goto out; - - /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1 - * needs 2 reads to clear the interrupt. One read is done in - * do_twl4030_pwrirq(). Doing the second read, to clear - * the bit. - * - * FIXME the reason PWR_ISR1 needs an extra read is that - * RTC_IF retriggered until we cleared REG_ALARM_M above. - * But re-reading like this is a bad hack; by doing so we - * risk wrongly clearing status for some other IRQ (losing - * the interrupt). Be smarter about handling RTC_UF ... - */ - res = twl4030_i2c_read_u8(TWL4030_MODULE_INT, - &rd_reg, TWL4030_INT_PWR_ISR1); - if (res) - goto out; - - /* Notify RTC core on event */ - rtc_update_irq(rtc, 1, events); - - ret = IRQ_HANDLED; -out: - return ret; -} - -static struct rtc_class_ops twl4030_rtc_ops = { - .read_time = twl4030_rtc_read_time, - .set_time = twl4030_rtc_set_time, - .read_alarm = twl4030_rtc_read_alarm, - .set_alarm = twl4030_rtc_set_alarm, - .alarm_irq_enable = twl4030_rtc_alarm_irq_enable, - .update_irq_enable = twl4030_rtc_update_irq_enable, -}; - -/*----------------------------------------------------------------------*/ - -static int __devinit twl4030_rtc_probe(struct platform_device *pdev) -{ - struct rtc_device *rtc; - int ret = 0; - int irq = platform_get_irq(pdev, 0); - u8 rd_reg; - - if (irq <= 0) - return -EINVAL; - - rtc = rtc_device_register(pdev->name, - &pdev->dev, &twl4030_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) { - ret = PTR_ERR(rtc); - dev_err(&pdev->dev, "can't register RTC device, err %ld\n", - PTR_ERR(rtc)); - goto out0; - - } - - platform_set_drvdata(pdev, rtc); - - ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); - if (ret < 0) - goto out1; - - if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M) - dev_warn(&pdev->dev, "Power up reset detected.\n"); - - if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) - dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n"); - - /* Clear RTC Power up reset and pending alarm interrupts */ - ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); - if (ret < 0) - goto out1; - - ret = request_irq(irq, twl4030_rtc_interrupt, - IRQF_TRIGGER_RISING, - dev_name(&rtc->dev), rtc); - if (ret < 0) { - dev_err(&pdev->dev, "IRQ is not free.\n"); - goto out1; - } - - /* Check RTC module status, Enable if it is off */ - ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); - if (ret < 0) - goto out2; - - if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { - dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n"); - rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; - ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); - if (ret < 0) - goto out2; - } - - /* init cached IRQ enable bits */ - ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); - if (ret < 0) - goto out2; - - return ret; - -out2: - free_irq(irq, rtc); -out1: - rtc_device_unregister(rtc); -out0: - return ret; -} - -/* - * Disable all TWL4030 RTC module interrupts. - * Sets status flag to free. - */ -static int __devexit twl4030_rtc_remove(struct platform_device *pdev) -{ - /* leave rtc running, but disable irqs */ - struct rtc_device *rtc = platform_get_drvdata(pdev); - int irq = platform_get_irq(pdev, 0); - - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); - - free_irq(irq, rtc); - - rtc_device_unregister(rtc); - platform_set_drvdata(pdev, NULL); - return 0; -} - -static void twl4030_rtc_shutdown(struct platform_device *pdev) -{ - /* mask timer interrupts, but leave alarm interrupts on to enable - power-on when alarm is triggered */ - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); -} - -#ifdef CONFIG_PM - -static unsigned char irqstat; - -static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state) -{ - irqstat = rtc_irq_bits; - - mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); - return 0; -} - -static int twl4030_rtc_resume(struct platform_device *pdev) -{ - set_rtc_irq_bit(irqstat); - return 0; -} - -#else -#define twl4030_rtc_suspend NULL -#define twl4030_rtc_resume NULL -#endif - -MODULE_ALIAS("platform:twl4030_rtc"); - -static struct platform_driver twl4030rtc_driver = { - .probe = twl4030_rtc_probe, - .remove = __devexit_p(twl4030_rtc_remove), - .shutdown = twl4030_rtc_shutdown, - .suspend = twl4030_rtc_suspend, - .resume = twl4030_rtc_resume, - .driver = { - .owner = THIS_MODULE, - .name = "twl4030_rtc", - }, -}; - -static int __init twl4030_rtc_init(void) -{ - return platform_driver_register(&twl4030rtc_driver); -} -module_init(twl4030_rtc_init); - -static void __exit twl4030_rtc_exit(void) -{ - platform_driver_unregister(&twl4030rtc_driver); -} -module_exit(twl4030_rtc_exit); - -MODULE_AUTHOR("Texas Instruments, MontaVista Software"); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c index bd9883f41e63..3acbdb82bcf9 100644 --- a/drivers/usb/otg/twl4030-usb.c +++ b/drivers/usb/otg/twl4030-usb.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c index 760645d9dbb6..3764a36d9142 100644 --- a/drivers/video/omap/lcd_2430sdp.c +++ b/drivers/video/omap/lcd_2430sdp.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c index cb46556f2973..20968b2aadef 100644 --- a/drivers/watchdog/twl4030_wdt.c +++ b/drivers/watchdog/twl4030_wdt.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3 diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h new file mode 100644 index 000000000000..a50bcf8a4048 --- /dev/null +++ b/include/linux/i2c/twl.h @@ -0,0 +1,550 @@ +/* + * twl4030.h - header for TWL4030 PM and audio CODEC device + * + * Copyright (C) 2005-2006 Texas Instruments, Inc. + * + * Based on tlv320aic23.c: + * Copyright (c) by Kai Svahn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __TWL4030_H_ +#define __TWL4030_H_ + +#include +#include + +/* + * Using the twl4030 core we address registers using a pair + * { module id, relative register offset } + * which that core then maps to the relevant + * { i2c slave, absolute register address } + * + * The module IDs are meaningful only to the twl4030 core code, + * which uses them as array indices to look up the first register + * address each module uses within a given i2c slave. + */ + +/* Slave 0 (i2c address 0x48) */ +#define TWL4030_MODULE_USB 0x00 + +/* Slave 1 (i2c address 0x49) */ +#define TWL4030_MODULE_AUDIO_VOICE 0x01 +#define TWL4030_MODULE_GPIO 0x02 +#define TWL4030_MODULE_INTBR 0x03 +#define TWL4030_MODULE_PIH 0x04 +#define TWL4030_MODULE_TEST 0x05 + +/* Slave 2 (i2c address 0x4a) */ +#define TWL4030_MODULE_KEYPAD 0x06 +#define TWL4030_MODULE_MADC 0x07 +#define TWL4030_MODULE_INTERRUPTS 0x08 +#define TWL4030_MODULE_LED 0x09 +#define TWL4030_MODULE_MAIN_CHARGE 0x0A +#define TWL4030_MODULE_PRECHARGE 0x0B +#define TWL4030_MODULE_PWM0 0x0C +#define TWL4030_MODULE_PWM1 0x0D +#define TWL4030_MODULE_PWMA 0x0E +#define TWL4030_MODULE_PWMB 0x0F + +#define TWL5031_MODULE_ACCESSORY 0x10 +#define TWL5031_MODULE_INTERRUPTS 0x11 + +/* Slave 3 (i2c address 0x4b) */ +#define TWL4030_MODULE_BACKUP 0x12 +#define TWL4030_MODULE_INT 0x13 +#define TWL4030_MODULE_PM_MASTER 0x14 +#define TWL4030_MODULE_PM_RECEIVER 0x15 +#define TWL4030_MODULE_RTC 0x16 +#define TWL4030_MODULE_SECURED_REG 0x17 + +/* + * Read and write single 8-bit registers + */ +int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg); +int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); + +/* + * Read and write several 8-bit registers at once. + * + * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1 + * for the value, and populate your data starting at offset 1. + */ +int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); + +/*----------------------------------------------------------------------*/ + +/* + * NOTE: at up to 1024 registers, this is a big chip. + * + * Avoid putting register declarations in this file, instead of into + * a driver-private file, unless some of the registers in a block + * need to be shared with other drivers. One example is blocks that + * have Secondary IRQ Handler (SIH) registers. + */ + +#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0) +#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1) +#define TWL4030_SIH_CTRL_COR_MASK BIT(2) + +/*----------------------------------------------------------------------*/ + +/* + * GPIO Block Register offsets (use TWL4030_MODULE_GPIO) + */ + +#define REG_GPIODATAIN1 0x0 +#define REG_GPIODATAIN2 0x1 +#define REG_GPIODATAIN3 0x2 +#define REG_GPIODATADIR1 0x3 +#define REG_GPIODATADIR2 0x4 +#define REG_GPIODATADIR3 0x5 +#define REG_GPIODATAOUT1 0x6 +#define REG_GPIODATAOUT2 0x7 +#define REG_GPIODATAOUT3 0x8 +#define REG_CLEARGPIODATAOUT1 0x9 +#define REG_CLEARGPIODATAOUT2 0xA +#define REG_CLEARGPIODATAOUT3 0xB +#define REG_SETGPIODATAOUT1 0xC +#define REG_SETGPIODATAOUT2 0xD +#define REG_SETGPIODATAOUT3 0xE +#define REG_GPIO_DEBEN1 0xF +#define REG_GPIO_DEBEN2 0x10 +#define REG_GPIO_DEBEN3 0x11 +#define REG_GPIO_CTRL 0x12 +#define REG_GPIOPUPDCTR1 0x13 +#define REG_GPIOPUPDCTR2 0x14 +#define REG_GPIOPUPDCTR3 0x15 +#define REG_GPIOPUPDCTR4 0x16 +#define REG_GPIOPUPDCTR5 0x17 +#define REG_GPIO_ISR1A 0x19 +#define REG_GPIO_ISR2A 0x1A +#define REG_GPIO_ISR3A 0x1B +#define REG_GPIO_IMR1A 0x1C +#define REG_GPIO_IMR2A 0x1D +#define REG_GPIO_IMR3A 0x1E +#define REG_GPIO_ISR1B 0x1F +#define REG_GPIO_ISR2B 0x20 +#define REG_GPIO_ISR3B 0x21 +#define REG_GPIO_IMR1B 0x22 +#define REG_GPIO_IMR2B 0x23 +#define REG_GPIO_IMR3B 0x24 +#define REG_GPIO_EDR1 0x28 +#define REG_GPIO_EDR2 0x29 +#define REG_GPIO_EDR3 0x2A +#define REG_GPIO_EDR4 0x2B +#define REG_GPIO_EDR5 0x2C +#define REG_GPIO_SIH_CTRL 0x2D + +/* Up to 18 signals are available as GPIOs, when their + * pins are not assigned to another use (such as ULPI/USB). + */ +#define TWL4030_GPIO_MAX 18 + +/*----------------------------------------------------------------------*/ + +/* + * Keypad register offsets (use TWL4030_MODULE_KEYPAD) + * ... SIH/interrupt only + */ + +#define TWL4030_KEYPAD_KEYP_ISR1 0x11 +#define TWL4030_KEYPAD_KEYP_IMR1 0x12 +#define TWL4030_KEYPAD_KEYP_ISR2 0x13 +#define TWL4030_KEYPAD_KEYP_IMR2 0x14 +#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */ +#define TWL4030_KEYPAD_KEYP_EDR 0x16 +#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17 + +/*----------------------------------------------------------------------*/ + +/* + * Multichannel ADC register offsets (use TWL4030_MODULE_MADC) + * ... SIH/interrupt only + */ + +#define TWL4030_MADC_ISR1 0x61 +#define TWL4030_MADC_IMR1 0x62 +#define TWL4030_MADC_ISR2 0x63 +#define TWL4030_MADC_IMR2 0x64 +#define TWL4030_MADC_SIR 0x65 /* test register */ +#define TWL4030_MADC_EDR 0x66 +#define TWL4030_MADC_SIH_CTRL 0x67 + +/*----------------------------------------------------------------------*/ + +/* + * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS) + */ + +#define TWL4030_INTERRUPTS_BCIISR1A 0x0 +#define TWL4030_INTERRUPTS_BCIISR2A 0x1 +#define TWL4030_INTERRUPTS_BCIIMR1A 0x2 +#define TWL4030_INTERRUPTS_BCIIMR2A 0x3 +#define TWL4030_INTERRUPTS_BCIISR1B 0x4 +#define TWL4030_INTERRUPTS_BCIISR2B 0x5 +#define TWL4030_INTERRUPTS_BCIIMR1B 0x6 +#define TWL4030_INTERRUPTS_BCIIMR2B 0x7 +#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */ +#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */ +#define TWL4030_INTERRUPTS_BCIEDR1 0xa +#define TWL4030_INTERRUPTS_BCIEDR2 0xb +#define TWL4030_INTERRUPTS_BCIEDR3 0xc +#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd + +/*----------------------------------------------------------------------*/ + +/* + * Power Interrupt block register offsets (use TWL4030_MODULE_INT) + */ + +#define TWL4030_INT_PWR_ISR1 0x0 +#define TWL4030_INT_PWR_IMR1 0x1 +#define TWL4030_INT_PWR_ISR2 0x2 +#define TWL4030_INT_PWR_IMR2 0x3 +#define TWL4030_INT_PWR_SIR 0x4 /* test register */ +#define TWL4030_INT_PWR_EDR1 0x5 +#define TWL4030_INT_PWR_EDR2 0x6 +#define TWL4030_INT_PWR_SIH_CTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* + * Accessory Interrupts + */ +#define TWL5031_ACIIMR_LSB 0x05 +#define TWL5031_ACIIMR_MSB 0x06 +#define TWL5031_ACIIDR_LSB 0x07 +#define TWL5031_ACIIDR_MSB 0x08 +#define TWL5031_ACCISR1 0x0F +#define TWL5031_ACCIMR1 0x10 +#define TWL5031_ACCISR2 0x11 +#define TWL5031_ACCIMR2 0x12 +#define TWL5031_ACCSIR 0x13 +#define TWL5031_ACCEDR1 0x14 +#define TWL5031_ACCSIHCTRL 0x15 + +/*----------------------------------------------------------------------*/ + +/* + * Battery Charger Controller + */ + +#define TWL5031_INTERRUPTS_BCIISR1 0x0 +#define TWL5031_INTERRUPTS_BCIIMR1 0x1 +#define TWL5031_INTERRUPTS_BCIISR2 0x2 +#define TWL5031_INTERRUPTS_BCIIMR2 0x3 +#define TWL5031_INTERRUPTS_BCISIR 0x4 +#define TWL5031_INTERRUPTS_BCIEDR1 0x5 +#define TWL5031_INTERRUPTS_BCIEDR2 0x6 +#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* Power bus message definitions */ + +/* The TWL4030/5030 splits its power-management resources (the various + * regulators, clock and reset lines) into 3 processor groups - P1, P2 and + * P3. These groups can then be configured to transition between sleep, wait-on + * and active states by sending messages to the power bus. See Section 5.4.2 + * Power Resources of TWL4030 TRM + */ + +/* Processor groups */ +#define DEV_GRP_NULL 0x0 +#define DEV_GRP_P1 0x1 /* P1: all OMAP devices */ +#define DEV_GRP_P2 0x2 /* P2: all Modem devices */ +#define DEV_GRP_P3 0x4 /* P3: all peripheral devices */ + +/* Resource groups */ +#define RES_GRP_RES 0x0 /* Reserved */ +#define RES_GRP_PP 0x1 /* Power providers */ +#define RES_GRP_RC 0x2 /* Reset and control */ +#define RES_GRP_PP_RC 0x3 +#define RES_GRP_PR 0x4 /* Power references */ +#define RES_GRP_PP_PR 0x5 +#define RES_GRP_RC_PR 0x6 +#define RES_GRP_ALL 0x7 /* All resource groups */ + +#define RES_TYPE2_R0 0x0 + +#define RES_TYPE_ALL 0x7 + +/* Resource states */ +#define RES_STATE_WRST 0xF +#define RES_STATE_ACTIVE 0xE +#define RES_STATE_SLEEP 0x8 +#define RES_STATE_OFF 0x0 + +/* Power resources */ + +/* Power providers */ +#define RES_VAUX1 1 +#define RES_VAUX2 2 +#define RES_VAUX3 3 +#define RES_VAUX4 4 +#define RES_VMMC1 5 +#define RES_VMMC2 6 +#define RES_VPLL1 7 +#define RES_VPLL2 8 +#define RES_VSIM 9 +#define RES_VDAC 10 +#define RES_VINTANA1 11 +#define RES_VINTANA2 12 +#define RES_VINTDIG 13 +#define RES_VIO 14 +#define RES_VDD1 15 +#define RES_VDD2 16 +#define RES_VUSB_1V5 17 +#define RES_VUSB_1V8 18 +#define RES_VUSB_3V1 19 +#define RES_VUSBCP 20 +#define RES_REGEN 21 +/* Reset and control */ +#define RES_NRES_PWRON 22 +#define RES_CLKEN 23 +#define RES_SYSEN 24 +#define RES_HFCLKOUT 25 +#define RES_32KCLKOUT 26 +#define RES_RESET 27 +/* Power Reference */ +#define RES_Main_Ref 28 + +#define TOTAL_RESOURCES 28 +/* + * Power Bus Message Format ... these can be sent individually by Linux, + * but are usually part of downloaded scripts that are run when various + * power events are triggered. + * + * Broadcast Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] + * RES_STATE[3:0] + * + * Singular Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] + */ + +#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ + ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ + | (type) << 4 | (state)) + +#define MSG_SINGULAR(devgrp, id, state) \ + ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) + +/*----------------------------------------------------------------------*/ + +struct twl4030_clock_init_data { + bool ck32k_lowpwr_enable; +}; + +struct twl4030_bci_platform_data { + int *battery_tmp_tbl; + unsigned int tblsize; +}; + +/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */ +struct twl4030_gpio_platform_data { + int gpio_base; + unsigned irq_base, irq_end; + + /* package the two LED signals as output-only GPIOs? */ + bool use_leds; + + /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */ + u8 mmc_cd; + + /* if BIT(N) is set, or VMMC(n+1) is linked, debounce GPIO-N */ + u32 debounce; + + /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup + * should be enabled. Else, if that bit is set in "pulldowns", + * that pulldown is enabled. Don't waste power by letting any + * digital inputs float... + */ + u32 pullups; + u32 pulldowns; + + int (*setup)(struct device *dev, + unsigned gpio, unsigned ngpio); + int (*teardown)(struct device *dev, + unsigned gpio, unsigned ngpio); +}; + +struct twl4030_madc_platform_data { + int irq_line; +}; + +/* Boards have uniqe mappings of {row, col} --> keycode. + * Column and row are 8 bits each, but range only from 0..7. + * a PERSISTENT_KEY is "always on" and never reported. + */ +#define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED) + +struct twl4030_keypad_data { + const struct matrix_keymap_data *keymap_data; + unsigned rows; + unsigned cols; + bool rep; +}; + +enum twl4030_usb_mode { + T2_USB_MODE_ULPI = 1, + T2_USB_MODE_CEA2011_3PIN = 2, +}; + +struct twl4030_usb_data { + enum twl4030_usb_mode usb_mode; +}; + +struct twl4030_ins { + u16 pmb_message; + u8 delay; +}; + +struct twl4030_script { + struct twl4030_ins *script; + unsigned size; + u8 flags; +#define TWL4030_WRST_SCRIPT (1<<0) +#define TWL4030_WAKEUP12_SCRIPT (1<<1) +#define TWL4030_WAKEUP3_SCRIPT (1<<2) +#define TWL4030_SLEEP_SCRIPT (1<<3) +}; + +struct twl4030_resconfig { + u8 resource; + u8 devgroup; /* Processor group that Power resource belongs to */ + u8 type; /* Power resource addressed, 6 / broadcast message */ + u8 type2; /* Power resource addressed, 3 / broadcast message */ + u8 remap_off; /* off state remapping */ + u8 remap_sleep; /* sleep state remapping */ +}; + +struct twl4030_power_data { + struct twl4030_script **scripts; + unsigned num; + struct twl4030_resconfig *resource_config; +#define TWL4030_RESCONFIG_UNDEF ((u8)-1) +}; + +extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); + +struct twl4030_codec_audio_data { + unsigned int audio_mclk; + unsigned int ramp_delay_value; + unsigned int hs_extmute:1; + void (*set_hs_extmute)(int mute); +}; + +struct twl4030_codec_vibra_data { + unsigned int audio_mclk; + unsigned int coexist; +}; + +struct twl4030_codec_data { + unsigned int audio_mclk; + struct twl4030_codec_audio_data *audio; + struct twl4030_codec_vibra_data *vibra; +}; + +struct twl4030_platform_data { + unsigned irq_base, irq_end; + struct twl4030_clock_init_data *clock; + struct twl4030_bci_platform_data *bci; + struct twl4030_gpio_platform_data *gpio; + struct twl4030_madc_platform_data *madc; + struct twl4030_keypad_data *keypad; + struct twl4030_usb_data *usb; + struct twl4030_power_data *power; + struct twl4030_codec_data *codec; + + /* LDO regulators */ + struct regulator_init_data *vdac; + struct regulator_init_data *vpll1; + struct regulator_init_data *vpll2; + struct regulator_init_data *vmmc1; + struct regulator_init_data *vmmc2; + struct regulator_init_data *vsim; + struct regulator_init_data *vaux1; + struct regulator_init_data *vaux2; + struct regulator_init_data *vaux3; + struct regulator_init_data *vaux4; + struct regulator_init_data *vio; + struct regulator_init_data *vdd1; + struct regulator_init_data *vdd2; + struct regulator_init_data *vintana1; + struct regulator_init_data *vintana2; + struct regulator_init_data *vintdig; +}; + +/*----------------------------------------------------------------------*/ + +int twl4030_sih_setup(int module); + +/* Offsets to Power Registers */ +#define TWL4030_VDAC_DEV_GRP 0x3B +#define TWL4030_VDAC_DEDICATED 0x3E +#define TWL4030_VAUX1_DEV_GRP 0x17 +#define TWL4030_VAUX1_DEDICATED 0x1A +#define TWL4030_VAUX2_DEV_GRP 0x1B +#define TWL4030_VAUX2_DEDICATED 0x1E +#define TWL4030_VAUX3_DEV_GRP 0x1F +#define TWL4030_VAUX3_DEDICATED 0x22 + +#if defined(CONFIG_TWL4030_BCI_BATTERY) || \ + defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) + extern int twl4030charger_usb_en(int enable); +#else + static inline int twl4030charger_usb_en(int enable) { return 0; } +#endif + +/*----------------------------------------------------------------------*/ + +/* Linux-specific regulator identifiers ... for now, we only support + * the LDOs, and leave the three buck converters alone. VDD1 and VDD2 + * need to tie into hardware based voltage scaling (cpufreq etc), while + * VIO is generally fixed. + */ + +/* EXTERNAL dc-to-dc buck converters */ +#define TWL4030_REG_VDD1 0 +#define TWL4030_REG_VDD2 1 +#define TWL4030_REG_VIO 2 + +/* EXTERNAL LDOs */ +#define TWL4030_REG_VDAC 3 +#define TWL4030_REG_VPLL1 4 +#define TWL4030_REG_VPLL2 5 /* not on all chips */ +#define TWL4030_REG_VMMC1 6 +#define TWL4030_REG_VMMC2 7 /* not on all chips */ +#define TWL4030_REG_VSIM 8 /* not on all chips */ +#define TWL4030_REG_VAUX1 9 /* not on all chips */ +#define TWL4030_REG_VAUX2_4030 10 /* (twl4030-specific) */ +#define TWL4030_REG_VAUX2 11 /* (twl5030 and newer) */ +#define TWL4030_REG_VAUX3 12 /* not on all chips */ +#define TWL4030_REG_VAUX4 13 /* not on all chips */ + +/* INTERNAL LDOs */ +#define TWL4030_REG_VINTANA1 14 +#define TWL4030_REG_VINTANA2 15 +#define TWL4030_REG_VINTDIG 16 +#define TWL4030_REG_VUSB1V5 17 +#define TWL4030_REG_VUSB1V8 18 +#define TWL4030_REG_VUSB3V1 19 + +#endif /* End of __TWL4030_H */ diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h deleted file mode 100644 index a50bcf8a4048..000000000000 --- a/include/linux/i2c/twl4030.h +++ /dev/null @@ -1,550 +0,0 @@ -/* - * twl4030.h - header for TWL4030 PM and audio CODEC device - * - * Copyright (C) 2005-2006 Texas Instruments, Inc. - * - * Based on tlv320aic23.c: - * Copyright (c) by Kai Svahn - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef __TWL4030_H_ -#define __TWL4030_H_ - -#include -#include - -/* - * Using the twl4030 core we address registers using a pair - * { module id, relative register offset } - * which that core then maps to the relevant - * { i2c slave, absolute register address } - * - * The module IDs are meaningful only to the twl4030 core code, - * which uses them as array indices to look up the first register - * address each module uses within a given i2c slave. - */ - -/* Slave 0 (i2c address 0x48) */ -#define TWL4030_MODULE_USB 0x00 - -/* Slave 1 (i2c address 0x49) */ -#define TWL4030_MODULE_AUDIO_VOICE 0x01 -#define TWL4030_MODULE_GPIO 0x02 -#define TWL4030_MODULE_INTBR 0x03 -#define TWL4030_MODULE_PIH 0x04 -#define TWL4030_MODULE_TEST 0x05 - -/* Slave 2 (i2c address 0x4a) */ -#define TWL4030_MODULE_KEYPAD 0x06 -#define TWL4030_MODULE_MADC 0x07 -#define TWL4030_MODULE_INTERRUPTS 0x08 -#define TWL4030_MODULE_LED 0x09 -#define TWL4030_MODULE_MAIN_CHARGE 0x0A -#define TWL4030_MODULE_PRECHARGE 0x0B -#define TWL4030_MODULE_PWM0 0x0C -#define TWL4030_MODULE_PWM1 0x0D -#define TWL4030_MODULE_PWMA 0x0E -#define TWL4030_MODULE_PWMB 0x0F - -#define TWL5031_MODULE_ACCESSORY 0x10 -#define TWL5031_MODULE_INTERRUPTS 0x11 - -/* Slave 3 (i2c address 0x4b) */ -#define TWL4030_MODULE_BACKUP 0x12 -#define TWL4030_MODULE_INT 0x13 -#define TWL4030_MODULE_PM_MASTER 0x14 -#define TWL4030_MODULE_PM_RECEIVER 0x15 -#define TWL4030_MODULE_RTC 0x16 -#define TWL4030_MODULE_SECURED_REG 0x17 - -/* - * Read and write single 8-bit registers - */ -int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg); -int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); - -/* - * Read and write several 8-bit registers at once. - * - * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1 - * for the value, and populate your data starting at offset 1. - */ -int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); -int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); - -/*----------------------------------------------------------------------*/ - -/* - * NOTE: at up to 1024 registers, this is a big chip. - * - * Avoid putting register declarations in this file, instead of into - * a driver-private file, unless some of the registers in a block - * need to be shared with other drivers. One example is blocks that - * have Secondary IRQ Handler (SIH) registers. - */ - -#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0) -#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1) -#define TWL4030_SIH_CTRL_COR_MASK BIT(2) - -/*----------------------------------------------------------------------*/ - -/* - * GPIO Block Register offsets (use TWL4030_MODULE_GPIO) - */ - -#define REG_GPIODATAIN1 0x0 -#define REG_GPIODATAIN2 0x1 -#define REG_GPIODATAIN3 0x2 -#define REG_GPIODATADIR1 0x3 -#define REG_GPIODATADIR2 0x4 -#define REG_GPIODATADIR3 0x5 -#define REG_GPIODATAOUT1 0x6 -#define REG_GPIODATAOUT2 0x7 -#define REG_GPIODATAOUT3 0x8 -#define REG_CLEARGPIODATAOUT1 0x9 -#define REG_CLEARGPIODATAOUT2 0xA -#define REG_CLEARGPIODATAOUT3 0xB -#define REG_SETGPIODATAOUT1 0xC -#define REG_SETGPIODATAOUT2 0xD -#define REG_SETGPIODATAOUT3 0xE -#define REG_GPIO_DEBEN1 0xF -#define REG_GPIO_DEBEN2 0x10 -#define REG_GPIO_DEBEN3 0x11 -#define REG_GPIO_CTRL 0x12 -#define REG_GPIOPUPDCTR1 0x13 -#define REG_GPIOPUPDCTR2 0x14 -#define REG_GPIOPUPDCTR3 0x15 -#define REG_GPIOPUPDCTR4 0x16 -#define REG_GPIOPUPDCTR5 0x17 -#define REG_GPIO_ISR1A 0x19 -#define REG_GPIO_ISR2A 0x1A -#define REG_GPIO_ISR3A 0x1B -#define REG_GPIO_IMR1A 0x1C -#define REG_GPIO_IMR2A 0x1D -#define REG_GPIO_IMR3A 0x1E -#define REG_GPIO_ISR1B 0x1F -#define REG_GPIO_ISR2B 0x20 -#define REG_GPIO_ISR3B 0x21 -#define REG_GPIO_IMR1B 0x22 -#define REG_GPIO_IMR2B 0x23 -#define REG_GPIO_IMR3B 0x24 -#define REG_GPIO_EDR1 0x28 -#define REG_GPIO_EDR2 0x29 -#define REG_GPIO_EDR3 0x2A -#define REG_GPIO_EDR4 0x2B -#define REG_GPIO_EDR5 0x2C -#define REG_GPIO_SIH_CTRL 0x2D - -/* Up to 18 signals are available as GPIOs, when their - * pins are not assigned to another use (such as ULPI/USB). - */ -#define TWL4030_GPIO_MAX 18 - -/*----------------------------------------------------------------------*/ - -/* - * Keypad register offsets (use TWL4030_MODULE_KEYPAD) - * ... SIH/interrupt only - */ - -#define TWL4030_KEYPAD_KEYP_ISR1 0x11 -#define TWL4030_KEYPAD_KEYP_IMR1 0x12 -#define TWL4030_KEYPAD_KEYP_ISR2 0x13 -#define TWL4030_KEYPAD_KEYP_IMR2 0x14 -#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */ -#define TWL4030_KEYPAD_KEYP_EDR 0x16 -#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17 - -/*----------------------------------------------------------------------*/ - -/* - * Multichannel ADC register offsets (use TWL4030_MODULE_MADC) - * ... SIH/interrupt only - */ - -#define TWL4030_MADC_ISR1 0x61 -#define TWL4030_MADC_IMR1 0x62 -#define TWL4030_MADC_ISR2 0x63 -#define TWL4030_MADC_IMR2 0x64 -#define TWL4030_MADC_SIR 0x65 /* test register */ -#define TWL4030_MADC_EDR 0x66 -#define TWL4030_MADC_SIH_CTRL 0x67 - -/*----------------------------------------------------------------------*/ - -/* - * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS) - */ - -#define TWL4030_INTERRUPTS_BCIISR1A 0x0 -#define TWL4030_INTERRUPTS_BCIISR2A 0x1 -#define TWL4030_INTERRUPTS_BCIIMR1A 0x2 -#define TWL4030_INTERRUPTS_BCIIMR2A 0x3 -#define TWL4030_INTERRUPTS_BCIISR1B 0x4 -#define TWL4030_INTERRUPTS_BCIISR2B 0x5 -#define TWL4030_INTERRUPTS_BCIIMR1B 0x6 -#define TWL4030_INTERRUPTS_BCIIMR2B 0x7 -#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */ -#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */ -#define TWL4030_INTERRUPTS_BCIEDR1 0xa -#define TWL4030_INTERRUPTS_BCIEDR2 0xb -#define TWL4030_INTERRUPTS_BCIEDR3 0xc -#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd - -/*----------------------------------------------------------------------*/ - -/* - * Power Interrupt block register offsets (use TWL4030_MODULE_INT) - */ - -#define TWL4030_INT_PWR_ISR1 0x0 -#define TWL4030_INT_PWR_IMR1 0x1 -#define TWL4030_INT_PWR_ISR2 0x2 -#define TWL4030_INT_PWR_IMR2 0x3 -#define TWL4030_INT_PWR_SIR 0x4 /* test register */ -#define TWL4030_INT_PWR_EDR1 0x5 -#define TWL4030_INT_PWR_EDR2 0x6 -#define TWL4030_INT_PWR_SIH_CTRL 0x7 - -/*----------------------------------------------------------------------*/ - -/* - * Accessory Interrupts - */ -#define TWL5031_ACIIMR_LSB 0x05 -#define TWL5031_ACIIMR_MSB 0x06 -#define TWL5031_ACIIDR_LSB 0x07 -#define TWL5031_ACIIDR_MSB 0x08 -#define TWL5031_ACCISR1 0x0F -#define TWL5031_ACCIMR1 0x10 -#define TWL5031_ACCISR2 0x11 -#define TWL5031_ACCIMR2 0x12 -#define TWL5031_ACCSIR 0x13 -#define TWL5031_ACCEDR1 0x14 -#define TWL5031_ACCSIHCTRL 0x15 - -/*----------------------------------------------------------------------*/ - -/* - * Battery Charger Controller - */ - -#define TWL5031_INTERRUPTS_BCIISR1 0x0 -#define TWL5031_INTERRUPTS_BCIIMR1 0x1 -#define TWL5031_INTERRUPTS_BCIISR2 0x2 -#define TWL5031_INTERRUPTS_BCIIMR2 0x3 -#define TWL5031_INTERRUPTS_BCISIR 0x4 -#define TWL5031_INTERRUPTS_BCIEDR1 0x5 -#define TWL5031_INTERRUPTS_BCIEDR2 0x6 -#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7 - -/*----------------------------------------------------------------------*/ - -/* Power bus message definitions */ - -/* The TWL4030/5030 splits its power-management resources (the various - * regulators, clock and reset lines) into 3 processor groups - P1, P2 and - * P3. These groups can then be configured to transition between sleep, wait-on - * and active states by sending messages to the power bus. See Section 5.4.2 - * Power Resources of TWL4030 TRM - */ - -/* Processor groups */ -#define DEV_GRP_NULL 0x0 -#define DEV_GRP_P1 0x1 /* P1: all OMAP devices */ -#define DEV_GRP_P2 0x2 /* P2: all Modem devices */ -#define DEV_GRP_P3 0x4 /* P3: all peripheral devices */ - -/* Resource groups */ -#define RES_GRP_RES 0x0 /* Reserved */ -#define RES_GRP_PP 0x1 /* Power providers */ -#define RES_GRP_RC 0x2 /* Reset and control */ -#define RES_GRP_PP_RC 0x3 -#define RES_GRP_PR 0x4 /* Power references */ -#define RES_GRP_PP_PR 0x5 -#define RES_GRP_RC_PR 0x6 -#define RES_GRP_ALL 0x7 /* All resource groups */ - -#define RES_TYPE2_R0 0x0 - -#define RES_TYPE_ALL 0x7 - -/* Resource states */ -#define RES_STATE_WRST 0xF -#define RES_STATE_ACTIVE 0xE -#define RES_STATE_SLEEP 0x8 -#define RES_STATE_OFF 0x0 - -/* Power resources */ - -/* Power providers */ -#define RES_VAUX1 1 -#define RES_VAUX2 2 -#define RES_VAUX3 3 -#define RES_VAUX4 4 -#define RES_VMMC1 5 -#define RES_VMMC2 6 -#define RES_VPLL1 7 -#define RES_VPLL2 8 -#define RES_VSIM 9 -#define RES_VDAC 10 -#define RES_VINTANA1 11 -#define RES_VINTANA2 12 -#define RES_VINTDIG 13 -#define RES_VIO 14 -#define RES_VDD1 15 -#define RES_VDD2 16 -#define RES_VUSB_1V5 17 -#define RES_VUSB_1V8 18 -#define RES_VUSB_3V1 19 -#define RES_VUSBCP 20 -#define RES_REGEN 21 -/* Reset and control */ -#define RES_NRES_PWRON 22 -#define RES_CLKEN 23 -#define RES_SYSEN 24 -#define RES_HFCLKOUT 25 -#define RES_32KCLKOUT 26 -#define RES_RESET 27 -/* Power Reference */ -#define RES_Main_Ref 28 - -#define TOTAL_RESOURCES 28 -/* - * Power Bus Message Format ... these can be sent individually by Linux, - * but are usually part of downloaded scripts that are run when various - * power events are triggered. - * - * Broadcast Message (16 Bits): - * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] - * RES_STATE[3:0] - * - * Singular Message (16 Bits): - * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] - */ - -#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ - ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ - | (type) << 4 | (state)) - -#define MSG_SINGULAR(devgrp, id, state) \ - ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) - -/*----------------------------------------------------------------------*/ - -struct twl4030_clock_init_data { - bool ck32k_lowpwr_enable; -}; - -struct twl4030_bci_platform_data { - int *battery_tmp_tbl; - unsigned int tblsize; -}; - -/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */ -struct twl4030_gpio_platform_data { - int gpio_base; - unsigned irq_base, irq_end; - - /* package the two LED signals as output-only GPIOs? */ - bool use_leds; - - /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */ - u8 mmc_cd; - - /* if BIT(N) is set, or VMMC(n+1) is linked, debounce GPIO-N */ - u32 debounce; - - /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup - * should be enabled. Else, if that bit is set in "pulldowns", - * that pulldown is enabled. Don't waste power by letting any - * digital inputs float... - */ - u32 pullups; - u32 pulldowns; - - int (*setup)(struct device *dev, - unsigned gpio, unsigned ngpio); - int (*teardown)(struct device *dev, - unsigned gpio, unsigned ngpio); -}; - -struct twl4030_madc_platform_data { - int irq_line; -}; - -/* Boards have uniqe mappings of {row, col} --> keycode. - * Column and row are 8 bits each, but range only from 0..7. - * a PERSISTENT_KEY is "always on" and never reported. - */ -#define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED) - -struct twl4030_keypad_data { - const struct matrix_keymap_data *keymap_data; - unsigned rows; - unsigned cols; - bool rep; -}; - -enum twl4030_usb_mode { - T2_USB_MODE_ULPI = 1, - T2_USB_MODE_CEA2011_3PIN = 2, -}; - -struct twl4030_usb_data { - enum twl4030_usb_mode usb_mode; -}; - -struct twl4030_ins { - u16 pmb_message; - u8 delay; -}; - -struct twl4030_script { - struct twl4030_ins *script; - unsigned size; - u8 flags; -#define TWL4030_WRST_SCRIPT (1<<0) -#define TWL4030_WAKEUP12_SCRIPT (1<<1) -#define TWL4030_WAKEUP3_SCRIPT (1<<2) -#define TWL4030_SLEEP_SCRIPT (1<<3) -}; - -struct twl4030_resconfig { - u8 resource; - u8 devgroup; /* Processor group that Power resource belongs to */ - u8 type; /* Power resource addressed, 6 / broadcast message */ - u8 type2; /* Power resource addressed, 3 / broadcast message */ - u8 remap_off; /* off state remapping */ - u8 remap_sleep; /* sleep state remapping */ -}; - -struct twl4030_power_data { - struct twl4030_script **scripts; - unsigned num; - struct twl4030_resconfig *resource_config; -#define TWL4030_RESCONFIG_UNDEF ((u8)-1) -}; - -extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); - -struct twl4030_codec_audio_data { - unsigned int audio_mclk; - unsigned int ramp_delay_value; - unsigned int hs_extmute:1; - void (*set_hs_extmute)(int mute); -}; - -struct twl4030_codec_vibra_data { - unsigned int audio_mclk; - unsigned int coexist; -}; - -struct twl4030_codec_data { - unsigned int audio_mclk; - struct twl4030_codec_audio_data *audio; - struct twl4030_codec_vibra_data *vibra; -}; - -struct twl4030_platform_data { - unsigned irq_base, irq_end; - struct twl4030_clock_init_data *clock; - struct twl4030_bci_platform_data *bci; - struct twl4030_gpio_platform_data *gpio; - struct twl4030_madc_platform_data *madc; - struct twl4030_keypad_data *keypad; - struct twl4030_usb_data *usb; - struct twl4030_power_data *power; - struct twl4030_codec_data *codec; - - /* LDO regulators */ - struct regulator_init_data *vdac; - struct regulator_init_data *vpll1; - struct regulator_init_data *vpll2; - struct regulator_init_data *vmmc1; - struct regulator_init_data *vmmc2; - struct regulator_init_data *vsim; - struct regulator_init_data *vaux1; - struct regulator_init_data *vaux2; - struct regulator_init_data *vaux3; - struct regulator_init_data *vaux4; - struct regulator_init_data *vio; - struct regulator_init_data *vdd1; - struct regulator_init_data *vdd2; - struct regulator_init_data *vintana1; - struct regulator_init_data *vintana2; - struct regulator_init_data *vintdig; -}; - -/*----------------------------------------------------------------------*/ - -int twl4030_sih_setup(int module); - -/* Offsets to Power Registers */ -#define TWL4030_VDAC_DEV_GRP 0x3B -#define TWL4030_VDAC_DEDICATED 0x3E -#define TWL4030_VAUX1_DEV_GRP 0x17 -#define TWL4030_VAUX1_DEDICATED 0x1A -#define TWL4030_VAUX2_DEV_GRP 0x1B -#define TWL4030_VAUX2_DEDICATED 0x1E -#define TWL4030_VAUX3_DEV_GRP 0x1F -#define TWL4030_VAUX3_DEDICATED 0x22 - -#if defined(CONFIG_TWL4030_BCI_BATTERY) || \ - defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) - extern int twl4030charger_usb_en(int enable); -#else - static inline int twl4030charger_usb_en(int enable) { return 0; } -#endif - -/*----------------------------------------------------------------------*/ - -/* Linux-specific regulator identifiers ... for now, we only support - * the LDOs, and leave the three buck converters alone. VDD1 and VDD2 - * need to tie into hardware based voltage scaling (cpufreq etc), while - * VIO is generally fixed. - */ - -/* EXTERNAL dc-to-dc buck converters */ -#define TWL4030_REG_VDD1 0 -#define TWL4030_REG_VDD2 1 -#define TWL4030_REG_VIO 2 - -/* EXTERNAL LDOs */ -#define TWL4030_REG_VDAC 3 -#define TWL4030_REG_VPLL1 4 -#define TWL4030_REG_VPLL2 5 /* not on all chips */ -#define TWL4030_REG_VMMC1 6 -#define TWL4030_REG_VMMC2 7 /* not on all chips */ -#define TWL4030_REG_VSIM 8 /* not on all chips */ -#define TWL4030_REG_VAUX1 9 /* not on all chips */ -#define TWL4030_REG_VAUX2_4030 10 /* (twl4030-specific) */ -#define TWL4030_REG_VAUX2 11 /* (twl5030 and newer) */ -#define TWL4030_REG_VAUX3 12 /* not on all chips */ -#define TWL4030_REG_VAUX4 13 /* not on all chips */ - -/* INTERNAL LDOs */ -#define TWL4030_REG_VINTANA1 14 -#define TWL4030_REG_VINTANA2 15 -#define TWL4030_REG_VINTDIG 16 -#define TWL4030_REG_VUSB1V5 17 -#define TWL4030_REG_VUSB1V8 18 -#define TWL4030_REG_VUSB3V1 19 - -#endif /* End of __TWL4030_H */ diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c index 5f1681f6ca76..c3a6ceb542cb 100644 --- a/sound/soc/codecs/twl4030.c +++ b/sound/soc/codecs/twl4030.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From fc7b92fca4e546184557f1c53f84ad57c66b7695 Mon Sep 17 00:00:00 2001 From: Balaji T K Date: Sun, 13 Dec 2009 21:23:33 +0100 Subject: mfd: Rename all twl4030_i2c* This patch renames function names like twl4030_i2c_write_u8, twl4030_i2c_read_u8 to twl_i2c_write_u8, twl_i2c_read_u8 and also common variable in twl-core.c Signed-off-by: Rajendra Nayak Signed-off-by: Balaji T K Signed-off-by: Santosh Shilimkar Acked-by: Kevin Hilman Signed-off-by: Samuel Ortiz --- drivers/gpio/twl4030-gpio.c | 18 ++--- drivers/input/keyboard/twl4030_keypad.c | 4 +- drivers/input/misc/twl4030-pwrbutton.c | 2 +- drivers/mfd/twl-core.c | 136 +++++++++++++++++--------------- drivers/mfd/twl4030-irq.c | 18 ++--- drivers/mfd/twl4030-power.c | 68 ++++++++-------- drivers/regulator/twl-regulator.c | 8 +- drivers/rtc/rtc-twl.c | 14 ++-- drivers/usb/otg/twl4030-usb.c | 36 ++++----- drivers/video/omap/lcd_2430sdp.c | 2 +- drivers/watchdog/twl4030_wdt.c | 2 +- include/linux/i2c/twl.h | 31 ++++++-- sound/soc/codecs/twl4030.c | 8 +- 13 files changed, 187 insertions(+), 160 deletions(-) (limited to 'include') diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c index a320d7bfe67c..7fe881e2bdfb 100644 --- a/drivers/gpio/twl4030-gpio.c +++ b/drivers/gpio/twl4030-gpio.c @@ -80,7 +80,7 @@ static unsigned int gpio_usage_count; */ static inline int gpio_twl4030_write(u8 address, u8 data) { - return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address); + return twl_i2c_write_u8(TWL4030_MODULE_GPIO, data, address); } /*----------------------------------------------------------------------*/ @@ -117,7 +117,7 @@ static inline int gpio_twl4030_read(u8 address) u8 data; int ret = 0; - ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address); + ret = twl_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address); return (ret < 0) ? ret : data; } @@ -142,7 +142,7 @@ static void twl4030_led_set_value(int led, int value) cached_leden &= ~mask; else cached_leden |= mask; - status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, + status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, TWL4030_LED_LEDEN); mutex_unlock(&gpio_lock); } @@ -223,23 +223,23 @@ static int twl_request(struct gpio_chip *chip, unsigned offset) } /* initialize PWM to always-drive */ - status = twl4030_i2c_write_u8(module, 0x7f, + status = twl_i2c_write_u8(module, 0x7f, TWL4030_PWMx_PWMxOFF); if (status < 0) goto done; - status = twl4030_i2c_write_u8(module, 0x7f, + status = twl_i2c_write_u8(module, 0x7f, TWL4030_PWMx_PWMxON); if (status < 0) goto done; /* init LED to not-driven (high) */ module = TWL4030_MODULE_LED; - status = twl4030_i2c_read_u8(module, &cached_leden, + status = twl_i2c_read_u8(module, &cached_leden, TWL4030_LED_LEDEN); if (status < 0) goto done; cached_leden &= ~ledclr_mask; - status = twl4030_i2c_write_u8(module, cached_leden, + status = twl_i2c_write_u8(module, cached_leden, TWL4030_LED_LEDEN); if (status < 0) goto done; @@ -370,7 +370,7 @@ static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs) message[i] = bit_mask; } - return twl4030_i2c_write(TWL4030_MODULE_GPIO, message, + return twl_i2c_write(TWL4030_MODULE_GPIO, message, REG_GPIOPUPDCTR1, 5); } @@ -387,7 +387,7 @@ static int __devinit gpio_twl4030_debounce(u32 debounce, u8 mmc_cd) debounce >>= 8; message[3] = (debounce & 0x03); - return twl4030_i2c_write(TWL4030_MODULE_GPIO, message, + return twl_i2c_write(TWL4030_MODULE_GPIO, message, REG_GPIO_DEBEN1, 3); } diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index 1d1536a2fe46..eeaa7acb9cfc 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c @@ -133,7 +133,7 @@ struct twl4030_keypad { static int twl4030_kpread(struct twl4030_keypad *kp, u8 *data, u32 reg, u8 num_bytes) { - int ret = twl4030_i2c_read(TWL4030_MODULE_KEYPAD, data, reg, num_bytes); + int ret = twl_i2c_read(TWL4030_MODULE_KEYPAD, data, reg, num_bytes); if (ret < 0) dev_warn(kp->dbg_dev, @@ -145,7 +145,7 @@ static int twl4030_kpread(struct twl4030_keypad *kp, static int twl4030_kpwrite_u8(struct twl4030_keypad *kp, u8 data, u32 reg) { - int ret = twl4030_i2c_write_u8(TWL4030_MODULE_KEYPAD, data, reg); + int ret = twl_i2c_write_u8(TWL4030_MODULE_KEYPAD, data, reg); if (ret < 0) dev_warn(kp->dbg_dev, diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c index a73b889fff79..bdde5c889035 100644 --- a/drivers/input/misc/twl4030-pwrbutton.c +++ b/drivers/input/misc/twl4030-pwrbutton.c @@ -49,7 +49,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr) local_irq_enable(); #endif - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value, STS_HW_CONDITIONS); if (!err) { input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ); diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 44714f5cf495..9021f44de2a4 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -1,5 +1,6 @@ /* - * twl4030_core.c - driver for TWL4030/TPS659x0 PM and audio CODEC devices + * twl_core.c - driver for TWL4030/TWL5030/TWL60X0/TPS659x0 PM + * and audio CODEC devices * * Copyright (C) 2005-2006 Texas Instruments, Inc. * @@ -55,7 +56,7 @@ * (and associated registers). */ -#define DRIVER_NAME "twl4030" +#define DRIVER_NAME "twl" #if defined(CONFIG_TWL4030_BCI_BATTERY) || \ defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) @@ -125,7 +126,7 @@ /* Last - for index max*/ #define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG -#define TWL4030_NUM_SLAVES 4 +#define TWL_NUM_SLAVES 4 #if defined(CONFIG_INPUT_TWL4030_PWRBUTTON) \ || defined(CONFIG_INPUT_TWL4030_PWBUTTON_MODULE) @@ -134,6 +135,13 @@ #define twl_has_pwrbutton() false #endif +#define SUB_CHIP_ID0 0 +#define SUB_CHIP_ID1 1 +#define SUB_CHIP_ID2 2 +#define SUB_CHIP_ID3 3 + +#define TWL_MODULE_LAST TWL4030_MODULE_LAST + /* Base Address defns for twl4030_map[] */ /* subchip/slave 0 - USB ID */ @@ -201,7 +209,7 @@ static bool inuse; /* Structure for each TWL4030 Slave */ -struct twl4030_client { +struct twl_client { struct i2c_client *client; u8 address; @@ -212,16 +220,16 @@ struct twl4030_client { struct mutex xfer_lock; }; -static struct twl4030_client twl4030_modules[TWL4030_NUM_SLAVES]; +static struct twl_client twl_modules[TWL_NUM_SLAVES]; /* mapping the module id to slave id and base address */ -struct twl4030mapping { +struct twl_mapping { unsigned char sid; /* Slave ID */ unsigned char base; /* base address */ }; -static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { +static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { /* * NOTE: don't change this table without updating the * defines for TWL4030_MODULE_* @@ -262,7 +270,7 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { /* Exported Functions */ /** - * twl4030_i2c_write - Writes a n bit register in TWL4030 + * twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: an array of num_bytes+1 containing data to write * @reg: register address (just offset will do) @@ -273,19 +281,19 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { * * Returns the result of operation - 0 is success */ -int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) +int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { int ret; int sid; - struct twl4030_client *twl; + struct twl_client *twl; struct i2c_msg *msg; - if (unlikely(mod_no > TWL4030_MODULE_LAST)) { + if (unlikely(mod_no > TWL_MODULE_LAST)) { pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } sid = twl4030_map[mod_no].sid; - twl = &twl4030_modules[sid]; + twl = &twl_modules[sid]; if (unlikely(!inuse)) { pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); @@ -318,10 +326,10 @@ int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) return 0; } } -EXPORT_SYMBOL(twl4030_i2c_write); +EXPORT_SYMBOL(twl_i2c_write); /** - * twl4030_i2c_read - Reads a n bit register in TWL4030 + * twl_i2c_read - Reads a n bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: an array of num_bytes containing data to be read * @reg: register address (just offset will do) @@ -329,20 +337,20 @@ EXPORT_SYMBOL(twl4030_i2c_write); * * Returns result of operation - num_bytes is success else failure. */ -int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) +int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { int ret; u8 val; int sid; - struct twl4030_client *twl; + struct twl_client *twl; struct i2c_msg *msg; - if (unlikely(mod_no > TWL4030_MODULE_LAST)) { + if (unlikely(mod_no > TWL_MODULE_LAST)) { pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } sid = twl4030_map[mod_no].sid; - twl = &twl4030_modules[sid]; + twl = &twl_modules[sid]; if (unlikely(!inuse)) { pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); @@ -377,40 +385,40 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) return 0; } } -EXPORT_SYMBOL(twl4030_i2c_read); +EXPORT_SYMBOL(twl_i2c_read); /** - * twl4030_i2c_write_u8 - Writes a 8 bit register in TWL4030 + * twl_i2c_write_u8 - Writes a 8 bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: the value to be written 8 bit * @reg: register address (just offset will do) * * Returns result of operation - 0 is success */ -int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg) +int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg) { /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */ u8 temp_buffer[2] = { 0 }; /* offset 1 contains the data */ temp_buffer[1] = value; - return twl4030_i2c_write(mod_no, temp_buffer, reg, 1); + return twl_i2c_write(mod_no, temp_buffer, reg, 1); } -EXPORT_SYMBOL(twl4030_i2c_write_u8); +EXPORT_SYMBOL(twl_i2c_write_u8); /** - * twl4030_i2c_read_u8 - Reads a 8 bit register from TWL4030 + * twl_i2c_read_u8 - Reads a 8 bit register from TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: the value read 8 bit * @reg: register address (just offset will do) * * Returns result of operation - 0 is success */ -int twl4030_i2c_read_u8(u8 mod_no, u8 *value, u8 reg) +int twl_i2c_read_u8(u8 mod_no, u8 *value, u8 reg) { - return twl4030_i2c_read(mod_no, value, reg, 1); + return twl_i2c_read(mod_no, value, reg, 1); } -EXPORT_SYMBOL(twl4030_i2c_read_u8); +EXPORT_SYMBOL(twl_i2c_read_u8); /*----------------------------------------------------------------------*/ @@ -420,7 +428,7 @@ add_numbered_child(unsigned chip, const char *name, int num, bool can_wakeup, int irq0, int irq1) { struct platform_device *pdev; - struct twl4030_client *twl = &twl4030_modules[chip]; + struct twl_client *twl = &twl_modules[chip]; int status; pdev = platform_device_alloc(name, num); @@ -515,23 +523,24 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) pdata->bci, sizeof(*pdata->bci), false, /* irq0 = CHG_PRES, irq1 = BCI */ - pdata->irq_base + 8 + 1, pdata->irq_base + 2); + pdata->irq_base + BCI_PRES_INTR_OFFSET, + pdata->irq_base + BCI_INTR_OFFSET); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_gpio() && pdata->gpio) { - child = add_child(1, "twl4030_gpio", + child = add_child(SUB_CHIP_ID1, "twl4030_gpio", pdata->gpio, sizeof(*pdata->gpio), - false, pdata->irq_base + 0, 0); + false, pdata->irq_base + GPIO_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_keypad() && pdata->keypad) { - child = add_child(2, "twl4030_keypad", + child = add_child(SUB_CHIP_ID2, "twl4030_keypad", pdata->keypad, sizeof(*pdata->keypad), - true, pdata->irq_base + 1, 0); + true, pdata->irq_base + KEYPAD_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } @@ -539,7 +548,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) if (twl_has_madc() && pdata->madc) { child = add_child(2, "twl4030_madc", pdata->madc, sizeof(*pdata->madc), - true, pdata->irq_base + 3, 0); + true, pdata->irq_base + MADC_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } @@ -554,7 +563,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) */ child = add_child(3, "twl4030_rtc", NULL, 0, - true, pdata->irq_base + 8 + 3, 0); + true, pdata->irq_base + RTC_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } @@ -604,7 +613,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) pdata->usb, sizeof(*pdata->usb), true, /* irq0 = USB_PRES, irq1 = USB */ - pdata->irq_base + 8 + 2, pdata->irq_base + 4); + pdata->irq_base + USB_PRES_INTR_OFFSET, + pdata->irq_base + USB_INTR_OFFSET); if (IS_ERR(child)) return PTR_ERR(child); @@ -724,7 +734,7 @@ static inline int __init protect_pm_master(void) { int e = 0; - e = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_LOCK, + e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_LOCK, R_PROTECT_KEY); return e; } @@ -733,9 +743,9 @@ static inline int __init unprotect_pm_master(void) { int e = 0; - e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK1, + e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK1, R_PROTECT_KEY); - e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK2, + e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK2, R_PROTECT_KEY); return e; } @@ -755,7 +765,7 @@ static void clocks_init(struct device *dev, osc = clk_get(dev, "osc_sys_ck"); if (IS_ERR(osc)) { - printk(KERN_WARNING "Skipping twl4030 internal clock init and " + printk(KERN_WARNING "Skipping twl internal clock init and " "using bootloader value (unknown osc rate)\n"); return; } @@ -769,7 +779,7 @@ static void clocks_init(struct device *dev, */ osc = ERR_PTR(-EIO); - printk(KERN_WARNING "Skipping twl4030 internal clock init and " + printk(KERN_WARNING "Skipping twl internal clock init and " "using bootloader value (unknown osc rate)\n"); return; @@ -793,7 +803,7 @@ static void clocks_init(struct device *dev, e |= unprotect_pm_master(); /* effect->MADC+USB ck en */ - e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT); + e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, ctrl, R_CFG_BOOT); e |= protect_pm_master(); if (e < 0) @@ -806,7 +816,7 @@ int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); int twl_exit_irq(void); int twl_init_chip_irq(const char *chip); -static int twl4030_remove(struct i2c_client *client) +static int twl_remove(struct i2c_client *client) { unsigned i; int status; @@ -815,12 +825,12 @@ static int twl4030_remove(struct i2c_client *client) if (status < 0) return status; - for (i = 0; i < TWL4030_NUM_SLAVES; i++) { - struct twl4030_client *twl = &twl4030_modules[i]; + for (i = 0; i < TWL_NUM_SLAVES; i++) { + struct twl_client *twl = &twl_modules[i]; if (twl->client && twl->client != client) i2c_unregister_device(twl->client); - twl4030_modules[i].client = NULL; + twl_modules[i].client = NULL; } inuse = false; return 0; @@ -828,7 +838,7 @@ static int twl4030_remove(struct i2c_client *client) /* NOTE: this driver only handles a single twl4030/tps659x0 chip */ static int __init -twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) +twl_probe(struct i2c_client *client, const struct i2c_device_id *id) { int status; unsigned i; @@ -849,8 +859,8 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) return -EBUSY; } - for (i = 0; i < TWL4030_NUM_SLAVES; i++) { - struct twl4030_client *twl = &twl4030_modules[i]; + for (i = 0; i < TWL_NUM_SLAVES; i++) { + struct twl_client *twl = &twl_modules[i]; twl->address = client->addr + i; if (i == 0) @@ -889,11 +899,11 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) status = add_children(pdata, id->driver_data); fail: if (status < 0) - twl4030_remove(client); + twl_remove(client); return status; } -static const struct i2c_device_id twl4030_ids[] = { +static const struct i2c_device_id twl_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ { "twl5031", TWL5031 }, /* TWL5030 updated */ @@ -902,28 +912,28 @@ static const struct i2c_device_id twl4030_ids[] = { { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ { /* end of list */ }, }; -MODULE_DEVICE_TABLE(i2c, twl4030_ids); +MODULE_DEVICE_TABLE(i2c, twl_ids); /* One Client Driver , 4 Clients */ -static struct i2c_driver twl4030_driver = { +static struct i2c_driver twl_driver = { .driver.name = DRIVER_NAME, - .id_table = twl4030_ids, - .probe = twl4030_probe, - .remove = twl4030_remove, + .id_table = twl_ids, + .probe = twl_probe, + .remove = twl_remove, }; -static int __init twl4030_init(void) +static int __init twl_init(void) { - return i2c_add_driver(&twl4030_driver); + return i2c_add_driver(&twl_driver); } -subsys_initcall(twl4030_init); +subsys_initcall(twl_init); -static void __exit twl4030_exit(void) +static void __exit twl_exit(void) { - i2c_del_driver(&twl4030_driver); + i2c_del_driver(&twl_driver); } -module_exit(twl4030_exit); +module_exit(twl_exit); MODULE_AUTHOR("Texas Instruments, Inc."); -MODULE_DESCRIPTION("I2C Core interface for TWL4030"); +MODULE_DESCRIPTION("I2C Core interface for TWL"); MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index c4528db549c6..5a62cf916987 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -296,7 +296,7 @@ static int twl4030_irq_thread(void *data) /* Wait for IRQ, then read PIH irq status (also blocking) */ wait_for_completion_interruptible(&irq_event); - ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr, + ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr, REG_PIH_ISR_P1); if (ret) { pr_warning("twl4030: I2C error %d reading PIH ISR\n", @@ -396,7 +396,7 @@ static int twl4030_init_sih_modules(unsigned line) if (sih->irq_lines <= line) continue; - status = twl4030_i2c_write(sih->module, buf, + status = twl_i2c_write(sih->module, buf, sih->mask[line].imr_offset, sih->bytes_ixr); if (status < 0) pr_err("twl4030: err %d initializing %s %s\n", @@ -410,7 +410,7 @@ static int twl4030_init_sih_modules(unsigned line) * And for PWR_INT it's not documented... */ if (sih->set_cor) { - status = twl4030_i2c_write_u8(sih->module, + status = twl_i2c_write_u8(sih->module, TWL4030_SIH_CTRL_COR_MASK, sih->control_offset); if (status < 0) @@ -438,14 +438,14 @@ static int twl4030_init_sih_modules(unsigned line) * uncommon with PWR_INT.PWRON. */ for (j = 0; j < 2; j++) { - status = twl4030_i2c_read(sih->module, rxbuf, + status = twl_i2c_read(sih->module, rxbuf, sih->mask[line].isr_offset, sih->bytes_ixr); if (status < 0) pr_err("twl4030: err %d initializing %s %s\n", status, sih->name, "ISR"); if (!sih->set_cor) - status = twl4030_i2c_write(sih->module, buf, + status = twl_i2c_write(sih->module, buf, sih->mask[line].isr_offset, sih->bytes_ixr); /* else COR=1 means read sufficed. @@ -514,7 +514,7 @@ static void twl4030_sih_do_mask(struct work_struct *work) return; /* write the whole mask ... simpler than subsetting it */ - status = twl4030_i2c_write(sih->module, imr.bytes, + status = twl_i2c_write(sih->module, imr.bytes, sih->mask[irq_line].imr_offset, sih->bytes_ixr); if (status) pr_err("twl4030: %s, %s --> %d\n", __func__, @@ -545,7 +545,7 @@ static void twl4030_sih_do_edge(struct work_struct *work) * any processor on the other IRQ line, EDR registers are * shared. */ - status = twl4030_i2c_read(sih->module, bytes + 1, + status = twl_i2c_read(sih->module, bytes + 1, sih->edr_offset, sih->bytes_edr); if (status) { pr_err("twl4030: %s, %s --> %d\n", __func__, @@ -579,7 +579,7 @@ static void twl4030_sih_do_edge(struct work_struct *work) } /* Write */ - status = twl4030_i2c_write(sih->module, bytes, + status = twl_i2c_write(sih->module, bytes, sih->edr_offset, sih->bytes_edr); if (status) pr_err("twl4030: %s, %s --> %d\n", __func__, @@ -664,7 +664,7 @@ static inline int sih_read_isr(const struct sih *sih) /* FIXME need retry-on-error ... */ isr.word = 0; - status = twl4030_i2c_read(sih->module, isr.bytes, + status = twl_i2c_read(sih->module, isr.bytes, sih->mask[irq_line].isr_offset, sih->bytes_ixr); return (status < 0) ? status : le32_to_cpu(isr.word); diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 424b255d6f92..0815292fdafc 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -131,11 +131,11 @@ static int __init twl4030_write_script_byte(u8 address, u8 byte) { int err; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, R_MEMORY_ADDRESS); if (err) goto out; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte, R_MEMORY_DATA); out: return err; @@ -192,18 +192,18 @@ static int __init twl4030_config_wakeup3_sequence(u8 address) u8 data; /* Set SLEEP to ACTIVE SEQ address for P3 */ - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A3); if (err) goto out; /* P3 LVL_WAKEUP should be on LEVEL */ - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, R_P3_SW_EVENTS); if (err) goto out; data |= LVL_WAKEUP; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, R_P3_SW_EVENTS); out: if (err) @@ -217,42 +217,42 @@ static int __init twl4030_config_wakeup12_sequence(u8 address) u8 data; /* Set SLEEP to ACTIVE SEQ address for P1 and P2 */ - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A12); if (err) goto out; /* P1/P2 LVL_WAKEUP should be on LEVEL */ - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, R_P1_SW_EVENTS); if (err) goto out; data |= LVL_WAKEUP; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, R_P1_SW_EVENTS); if (err) goto out; - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, R_P2_SW_EVENTS); if (err) goto out; data |= LVL_WAKEUP; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data, R_P2_SW_EVENTS); if (err) goto out; if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) { /* Disabling AC charger effect on sleep-active transitions */ - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data, R_CFG_P1_TRANSITION); if (err) goto out; data &= ~(1<<1); - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data , + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data , R_CFG_P1_TRANSITION); if (err) goto out; @@ -270,7 +270,7 @@ static int __init twl4030_config_sleep_sequence(u8 address) int err; /* Set ACTIVE to SLEEP SEQ address in T2 memory*/ - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, R_SEQ_ADD_A2S); if (err) @@ -285,41 +285,41 @@ static int __init twl4030_config_warmreset_sequence(u8 address) u8 rd_data; /* Set WARM RESET SEQ address for P1 */ - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address, R_SEQ_ADD_WARM); if (err) goto out; /* P1/P2/P3 enable WARMRESET */ - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, R_P1_SW_EVENTS); if (err) goto out; rd_data |= ENABLE_WARMRESET; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, R_P1_SW_EVENTS); if (err) goto out; - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, R_P2_SW_EVENTS); if (err) goto out; rd_data |= ENABLE_WARMRESET; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, R_P2_SW_EVENTS); if (err) goto out; - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data, R_P3_SW_EVENTS); if (err) goto out; rd_data |= ENABLE_WARMRESET; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data, R_P3_SW_EVENTS); out: if (err) @@ -344,8 +344,8 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) rconfig_addr = res_config_addrs[rconfig->resource]; /* Set resource group */ - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp, - rconfig_addr + DEV_GRP_OFFSET); + err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp, + rconfig_addr + DEV_GRP_OFFSET); if (err) { pr_err("TWL4030 Resource %d group could not be read\n", rconfig->resource); @@ -355,8 +355,8 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) { grp &= ~DEV_GRP_MASK; grp |= rconfig->devgroup << DEV_GRP_SHIFT; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, - grp, rconfig_addr + DEV_GRP_OFFSET); + err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + grp, rconfig_addr + DEV_GRP_OFFSET); if (err < 0) { pr_err("TWL4030 failed to program devgroup\n"); return err; @@ -364,7 +364,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) } /* Set resource types */ - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type, + err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type, rconfig_addr + TYPE_OFFSET); if (err < 0) { pr_err("TWL4030 Resource %d type could not be read\n", @@ -382,7 +382,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) type |= rconfig->type2 << TYPE2_SHIFT; } - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, type, rconfig_addr + TYPE_OFFSET); if (err < 0) { pr_err("TWL4030 failed to program resource type\n"); @@ -390,8 +390,8 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) } /* Set remap states */ - err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap, - rconfig_addr + REMAP_OFFSET); + err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap, + rconfig_addr + REMAP_OFFSET); if (err < 0) { pr_err("TWL4030 Resource %d remap could not be read\n", rconfig->resource); @@ -408,9 +408,9 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig) remap |= rconfig->remap_off << SLEEP_STATE_SHIFT; } - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, - remap, - rconfig_addr + REMAP_OFFSET); + err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + remap, + rconfig_addr + REMAP_OFFSET); if (err < 0) { pr_err("TWL4030 failed to program remap\n"); return err; @@ -468,12 +468,12 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts) struct twl4030_resconfig *resconfig; u8 address = twl4030_start_script_address; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1, R_PROTECT_KEY); if (err) goto unlock; - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2, + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2, R_PROTECT_KEY); if (err) goto unlock; @@ -496,7 +496,7 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts) } } - err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY); + err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY); if (err) pr_err("TWL4030 Unable to relock registers\n"); return; diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index c8a6e583d773..8cc46e99ccca 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -64,7 +64,7 @@ twl4030reg_read(struct twlreg_info *info, unsigned offset) u8 value; int status; - status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, + status = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &value, info->base + offset); return (status < 0) ? status : value; } @@ -72,7 +72,7 @@ twl4030reg_read(struct twlreg_info *info, unsigned offset) static inline int twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value) { - return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, + return twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, value, info->base + offset); } @@ -171,12 +171,12 @@ static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode) if (!(status & (P3_GRP | P2_GRP | P1_GRP))) return -EACCES; - status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, + status = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, message >> 8, 0x15 /* PB_WORD_MSB */ ); if (status >= 0) return status; - return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, + return twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, message, 0x16 /* PB_WORD_LSB */ ); } diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 93565be12fae..6119712cc8de 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -92,7 +92,7 @@ static int twl4030_rtc_read_u8(u8 *data, u8 reg) { int ret; - ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg); + ret = twl_i2c_read_u8(TWL4030_MODULE_RTC, data, reg); if (ret < 0) pr_err("twl4030_rtc: Could not read TWL4030" "register %X - error %d\n", reg, ret); @@ -106,7 +106,7 @@ static int twl4030_rtc_write_u8(u8 data, u8 reg) { int ret; - ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg); + ret = twl_i2c_write_u8(TWL4030_MODULE_RTC, data, reg); if (ret < 0) pr_err("twl4030_rtc: Could not write TWL4030" "register %X - error %d\n", reg, ret); @@ -201,7 +201,7 @@ static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm) if (ret < 0) return ret; - ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, + ret = twl_i2c_read(TWL4030_MODULE_RTC, rtc_data, REG_SECONDS_REG, ALL_TIME_REGS); if (ret < 0) { @@ -243,7 +243,7 @@ static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm) goto out; /* update all the time registers in one shot */ - ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data, + ret = twl_i2c_write(TWL4030_MODULE_RTC, rtc_data, REG_SECONDS_REG, ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_set_time error %d\n", ret); @@ -266,7 +266,7 @@ static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) unsigned char rtc_data[ALL_TIME_REGS + 1]; int ret; - ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data, + ret = twl_i2c_read(TWL4030_MODULE_RTC, rtc_data, REG_ALARM_SECONDS_REG, ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_read_alarm error %d\n", ret); @@ -305,7 +305,7 @@ static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) alarm_data[6] = bin2bcd(alm->time.tm_year - 100); /* update all the alarm registers in one shot */ - ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data, + ret = twl_i2c_write(TWL4030_MODULE_RTC, alarm_data, REG_ALARM_SECONDS_REG, ALL_TIME_REGS); if (ret) { dev_err(dev, "rtc_set_alarm error %d\n", ret); @@ -363,7 +363,7 @@ static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) * risk wrongly clearing status for some other IRQ (losing * the interrupt). Be smarter about handling RTC_UF ... */ - res = twl4030_i2c_read_u8(TWL4030_MODULE_INT, + res = twl_i2c_read_u8(TWL4030_MODULE_INT, &rd_reg, TWL4030_INT_PWR_ISR1); if (res) goto out; diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c index 3acbdb82bcf9..2be9f2fa41f9 100644 --- a/drivers/usb/otg/twl4030-usb.c +++ b/drivers/usb/otg/twl4030-usb.c @@ -276,16 +276,16 @@ static int twl4030_i2c_write_u8_verify(struct twl4030_usb *twl, { u8 check; - if ((twl4030_i2c_write_u8(module, data, address) >= 0) && - (twl4030_i2c_read_u8(module, &check, address) >= 0) && + if ((twl_i2c_write_u8(module, data, address) >= 0) && + (twl_i2c_read_u8(module, &check, address) >= 0) && (check == data)) return 0; dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n", 1, module, address, check, data); /* Failed once: Try again */ - if ((twl4030_i2c_write_u8(module, data, address) >= 0) && - (twl4030_i2c_read_u8(module, &check, address) >= 0) && + if ((twl_i2c_write_u8(module, data, address) >= 0) && + (twl_i2c_read_u8(module, &check, address) >= 0) && (check == data)) return 0; dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n", @@ -303,7 +303,7 @@ static inline int twl4030_usb_write(struct twl4030_usb *twl, { int ret = 0; - ret = twl4030_i2c_write_u8(TWL4030_MODULE_USB, data, address); + ret = twl_i2c_write_u8(TWL4030_MODULE_USB, data, address); if (ret < 0) dev_dbg(twl->dev, "TWL4030:USB:Write[0x%x] Error %d\n", address, ret); @@ -315,7 +315,7 @@ static inline int twl4030_readb(struct twl4030_usb *twl, u8 module, u8 address) u8 data; int ret = 0; - ret = twl4030_i2c_read_u8(module, &data, address); + ret = twl_i2c_read_u8(module, &data, address); if (ret >= 0) ret = data; else @@ -462,7 +462,7 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on) * SLEEP. We work around this by clearing the bit after usv3v1 * is re-activated. This ensures that VUSB3V1 is really active. */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); regulator_enable(twl->usb1v5); pwr &= ~PHY_PWR_PHYPWD; @@ -505,44 +505,44 @@ static void twl4030_phy_resume(struct twl4030_usb *twl) static int twl4030_usb_ldo_init(struct twl4030_usb *twl) { /* Enable writing to power configuration registers */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); - twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); + twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); + twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); /* put VUSB3V1 LDO in active state */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); /* input to VUSB3V1 LDO is from VBAT, not VBUS */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); /* Initialize 3.1V regulator */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP); twl->usb3v1 = regulator_get(twl->dev, "usb3v1"); if (IS_ERR(twl->usb3v1)) return -ENODEV; - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE); /* Initialize 1.5V regulator */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP); twl->usb1v5 = regulator_get(twl->dev, "usb1v5"); if (IS_ERR(twl->usb1v5)) goto fail1; - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE); /* Initialize 1.8V regulator */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP); twl->usb1v8 = regulator_get(twl->dev, "usb1v8"); if (IS_ERR(twl->usb1v8)) goto fail2; - twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE); + twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE); /* disable access to power configuration registers */ - twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY); + twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY); return 0; diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c index 3764a36d9142..e3eccc9af78e 100644 --- a/drivers/video/omap/lcd_2430sdp.c +++ b/drivers/video/omap/lcd_2430sdp.c @@ -52,7 +52,7 @@ static unsigned enable_gpio; #define TWL4030_VPLL2_DEV_GRP 0x33 #define TWL4030_VPLL2_DEDICATED 0x36 -#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v) +#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v) static int sdp2430_panel_init(struct lcd_panel *panel, diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c index 20968b2aadef..8162a40d1522 100644 --- a/drivers/watchdog/twl4030_wdt.c +++ b/drivers/watchdog/twl4030_wdt.c @@ -48,7 +48,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " static int twl4030_wdt_write(unsigned char val) { - return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val, + return twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val, TWL4030_WATCHDOG_CFG_REG_OFFS); } diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index a50bcf8a4048..0f812f5aa723 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h @@ -22,8 +22,8 @@ * */ -#ifndef __TWL4030_H_ -#define __TWL4030_H_ +#ifndef __TWL_H_ +#define __TWL_H_ #include #include @@ -72,20 +72,37 @@ #define TWL4030_MODULE_RTC 0x16 #define TWL4030_MODULE_SECURED_REG 0x17 +#define TWL_MODULE_USB TWL4030_MODULE_USB +#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE +#define TWL_MODULE_PIH TWL4030_MODULE_PIH +#define TWL_MODULE_MADC TWL4030_MODULE_MADC +#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE +#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER +#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER +#define TWL_MODULE_RTC TWL4030_MODULE_RTC + +#define GPIO_INTR_OFFSET 0 +#define KEYPAD_INTR_OFFSET 1 +#define BCI_INTR_OFFSET 2 +#define MADC_INTR_OFFSET 3 +#define USB_INTR_OFFSET 4 +#define BCI_PRES_INTR_OFFSET 9 +#define USB_PRES_INTR_OFFSET 10 +#define RTC_INTR_OFFSET 11 /* * Read and write single 8-bit registers */ -int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg); -int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); +int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg); +int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); /* * Read and write several 8-bit registers at once. * - * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1 + * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1 * for the value, and populate your data starting at offset 1. */ -int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); -int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); /*----------------------------------------------------------------------*/ diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c index c3a6ceb542cb..2a27f7b56726 100644 --- a/sound/soc/codecs/twl4030.c +++ b/sound/soc/codecs/twl4030.c @@ -175,7 +175,7 @@ static int twl4030_write(struct snd_soc_codec *codec, { twl4030_write_reg_cache(codec, reg, value); if (likely(reg < TWL4030_REG_SW_SHADOW)) - return twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, + return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, reg); else return 0; @@ -261,7 +261,7 @@ static void twl4030_power_up(struct snd_soc_codec *codec) do { /* this takes a little while, so don't slam i2c */ udelay(2000); - twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte, + twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte, TWL4030_REG_ANAMICL); } while ((i++ < 100) && ((byte & TWL4030_CNCL_OFFSET_START) == @@ -542,7 +542,7 @@ static int pin_name##pga_event(struct snd_soc_dapm_widget *w, \ break; \ case SND_SOC_DAPM_POST_PMD: \ reg_val = twl4030_read_reg_cache(w->codec, reg); \ - twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \ + twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \ reg_val & (~mask), \ reg); \ break; \ @@ -679,7 +679,7 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp) mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] / twl4030->sysclk) + 1); /* Bypass the reg_cache to mute the headset */ - twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, + twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, hs_gain & (~0x0f), TWL4030_REG_HS_GAIN_SET); -- cgit v1.2.3 From e8deb28ca8e221de0239eafb3c3d431d8854278e Mon Sep 17 00:00:00 2001 From: Balaji T K Date: Mon, 14 Dec 2009 00:25:31 +0100 Subject: mfd: Add support for twl6030 irq framework This patch adds support for phoenix interrupt framework. New iInterrupt status register A, B, C are introduced in Phoenix and are cleared on write. Due to the differences in interrupt handling with respect to TWL4030, twl6030-irq.c is created for TWL6030 PMIC Signed-off-by: Rajendra Nayak Signed-off-by: Balaji T K Signed-off-by: Santosh Shilimkar Reviewed-by: Tony Lindgren Signed-off-by: Samuel Ortiz --- arch/arm/plat-omap/include/plat/irqs.h | 16 +- drivers/mfd/Kconfig | 4 +- drivers/mfd/Makefile | 2 +- drivers/mfd/twl-core.c | 120 +++++++++++-- drivers/mfd/twl4030-irq.c | 6 +- drivers/mfd/twl6030-irq.c | 299 +++++++++++++++++++++++++++++++++ include/linux/i2c/twl.h | 64 +++++++ 7 files changed, 490 insertions(+), 21 deletions(-) create mode 100644 drivers/mfd/twl6030-irq.c (limited to 'include') diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index ce5dd2d1dc21..97d6c50c3dcb 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h @@ -472,8 +472,22 @@ #endif #define TWL4030_GPIO_IRQ_END (TWL4030_GPIO_IRQ_BASE + TWL4030_GPIO_NR_IRQS) +#define TWL6030_IRQ_BASE (OMAP_FPGA_IRQ_END) +#ifdef CONFIG_TWL4030_CORE +#define TWL6030_BASE_NR_IRQS 20 +#else +#define TWL6030_BASE_NR_IRQS 0 +#endif +#define TWL6030_IRQ_END (TWL6030_IRQ_BASE + TWL6030_BASE_NR_IRQS) + /* Total number of interrupts depends on the enabled blocks above */ -#define NR_IRQS TWL4030_GPIO_IRQ_END +#if (TWL4030_GPIO_IRQ_END > TWL6030_IRQ_END) +#define TWL_IRQ_END TWL4030_GPIO_IRQ_END +#else +#define TWL_IRQ_END TWL6030_IRQ_END +#endif + +#define NR_IRQS TWL_IRQ_END #define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32)) diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index b23343cdc196..87829789243e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -103,10 +103,10 @@ config MENELAUS cell phones and PDAs. config TWL4030_CORE - bool "Texas Instruments TWL4030/TPS659x0 Support" + bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support" depends on I2C=y && GENERIC_HARDIRQS help - Say yes here if you have TWL4030 family chip on your board. + Say yes here if you have TWL4030 / TWL6030 family chip on your board. This core driver provides register access and IRQ handling facilities, and registers devices for the various functions so that function-specific drivers can bind to them. diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index f4d14b7589bf..ca2f2c4ff05e 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -26,7 +26,7 @@ obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o obj-$(CONFIG_TPS65010) += tps65010.o obj-$(CONFIG_MENELAUS) += menelaus.o -obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o +obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 79946fe800af..c48a6138c575 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -181,6 +181,30 @@ /* Triton Core internal information (END) */ +/* subchip/slave 0 0x48 - POWER */ +#define TWL6030_BASEADD_RTC 0x0000 +#define TWL6030_BASEADD_MEM 0x0017 +#define TWL6030_BASEADD_PM_MASTER 0x001F +#define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */ +#define TWL6030_BASEADD_PM_MISC 0x00E2 +#define TWL6030_BASEADD_PM_PUPD 0x00F0 + +/* subchip/slave 1 0x49 - FEATURE */ +#define TWL6030_BASEADD_USB 0x0000 +#define TWL6030_BASEADD_GPADC_CTRL 0x002E +#define TWL6030_BASEADD_AUX 0x0090 +#define TWL6030_BASEADD_PWM 0x00BA +#define TWL6030_BASEADD_GASGAUGE 0x00C0 +#define TWL6030_BASEADD_PIH 0x00D0 +#define TWL6030_BASEADD_CHARGER 0x00E0 + +/* subchip/slave 2 0x4A - DFT */ +#define TWL6030_BASEADD_DIEID 0x00C0 + +/* subchip/slave 3 0x4B - AUDIO */ +#define TWL6030_BASEADD_AUDIO 0x0000 +#define TWL6030_BASEADD_RSV 0x0000 + /* Few power values */ #define R_CFG_BOOT 0x05 #define R_PROTECT_KEY 0x0E @@ -202,13 +226,21 @@ #define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */ #define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ #define TWL5031 BIT(2) /* twl5031 has different registers */ +#define TWL6030_CLASS BIT(3) /* TWL6030 class */ /*----------------------------------------------------------------------*/ /* is driver active, bound to a chip? */ static bool inuse; -/* Structure for each TWL4030 Slave */ +static unsigned int twl_id; +unsigned int twl_rev(void) +{ + return twl_id; +} +EXPORT_SYMBOL(twl_rev); + +/* Structure for each TWL4030/TWL6030 Slave */ struct twl_client { struct i2c_client *client; u8 address; @@ -228,11 +260,12 @@ struct twl_mapping { unsigned char sid; /* Slave ID */ unsigned char base; /* base address */ }; +struct twl_mapping *twl_map; static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { /* * NOTE: don't change this table without updating the - * defines for TWL4030_MODULE_* + * defines for TWL4030_MODULE_* * so they continue to match the order in this table. */ @@ -265,6 +298,40 @@ static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { { 3, TWL4030_BASEADD_SECURED_REG }, }; +static struct twl_mapping twl6030_map[] = { + /* + * NOTE: don't change this table without updating the + * defines for TWL4030_MODULE_* + * so they continue to match the order in this table. + */ + { SUB_CHIP_ID1, TWL6030_BASEADD_USB }, + { SUB_CHIP_ID3, TWL6030_BASEADD_AUDIO }, + { SUB_CHIP_ID2, TWL6030_BASEADD_DIEID }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID1, TWL6030_BASEADD_PIH }, + + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID1, TWL6030_BASEADD_GPADC_CTRL }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + + { SUB_CHIP_ID1, TWL6030_BASEADD_CHARGER }, + { SUB_CHIP_ID1, TWL6030_BASEADD_GASGAUGE }, + { SUB_CHIP_ID1, TWL6030_BASEADD_PWM }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, + { SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER }, + { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC }, + + { SUB_CHIP_ID0, TWL6030_BASEADD_RTC }, + { SUB_CHIP_ID0, TWL6030_BASEADD_MEM }, +}; + /*----------------------------------------------------------------------*/ /* Exported Functions */ @@ -292,7 +359,7 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - sid = twl4030_map[mod_no].sid; + sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; if (unlikely(!inuse)) { @@ -310,7 +377,7 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) msg->flags = 0; msg->buf = value; /* over write the first byte of buffer with the register address */ - *value = twl4030_map[mod_no].base + reg; + *value = twl_map[mod_no].base + reg; ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1); mutex_unlock(&twl->xfer_lock); @@ -349,7 +416,7 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - sid = twl4030_map[mod_no].sid; + sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; if (unlikely(!inuse)) { @@ -362,7 +429,7 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) msg->addr = twl->address; msg->len = 1; msg->flags = 0; /* Read the register value */ - val = twl4030_map[mod_no].base + reg; + val = twl_map[mod_no].base + reg; msg->buf = &val; /* [MSG2] fill the data rx buffer */ msg = &twl->xfer_msg[1]; @@ -486,6 +553,7 @@ add_regulator_linked(int num, struct regulator_init_data *pdata, struct regulator_consumer_supply *consumers, unsigned num_consumers) { + unsigned sub_chip_id; /* regulator framework demands init_data ... */ if (!pdata) return NULL; @@ -496,7 +564,8 @@ add_regulator_linked(int num, struct regulator_init_data *pdata, } /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ - return add_numbered_child(3, "twl_reg", num, + sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid; + return add_numbered_child(sub_chip_id, "twl_reg", num, pdata, sizeof(*pdata), false, 0, 0); } @@ -516,6 +585,7 @@ static int add_children(struct twl4030_platform_data *pdata, unsigned long features) { struct device *child; + unsigned sub_chip_id; if (twl_has_bci() && pdata->bci && !(features & (TPS_SUBSET | TWL5031))) { @@ -561,7 +631,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) * Eventually, Linux might become more aware of such * HW security concerns, and "least privilege". */ - child = add_child(3, "twl_rtc", + sub_chip_id = twl_map[TWL_MODULE_RTC].sid; + child = add_child(sub_chip_id, "twl_rtc", NULL, 0, true, pdata->irq_base + RTC_INTR_OFFSET, 0); if (IS_ERR(child)) @@ -812,16 +883,22 @@ static void clocks_init(struct device *dev, /*----------------------------------------------------------------------*/ -int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); -int twl_exit_irq(void); -int twl_init_chip_irq(const char *chip); +int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); +int twl4030_exit_irq(void); +int twl4030_init_chip_irq(const char *chip); +int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); +int twl6030_exit_irq(void); static int twl_remove(struct i2c_client *client) { unsigned i; int status; - status = twl_exit_irq(); + if (twl_class_is_4030()) + status = twl4030_exit_irq(); + else + status = twl6030_exit_irq(); + if (status < 0) return status; @@ -878,6 +955,13 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) mutex_init(&twl->xfer_lock); } inuse = true; + if ((id->driver_data) & TWL6030_CLASS) { + twl_id = TWL6030_CLASS_ID; + twl_map = &twl6030_map[0]; + } else { + twl_id = TWL4030_CLASS_ID; + twl_map = &twl4030_map[0]; + } /* setup clock framework */ clocks_init(&client->dev, pdata->clock); @@ -890,8 +974,15 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) if (client->irq && pdata->irq_base && pdata->irq_end > pdata->irq_base) { - twl_init_chip_irq(id->name); - status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); + if (twl_class_is_4030()) { + twl4030_init_chip_irq(id->name); + status = twl4030_init_irq(client->irq, pdata->irq_base, + pdata->irq_end); + } else { + status = twl6030_init_irq(client->irq, pdata->irq_base, + pdata->irq_end); + } + if (status < 0) goto fail; } @@ -910,6 +1001,7 @@ static const struct i2c_device_id twl_ids[] = { { "tps65950", 0 }, /* catalog version of twl5030 */ { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ + { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */ { /* end of list */ }, }; MODULE_DEVICE_TABLE(i2c, twl_ids); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 5a62cf916987..20d29bafc9f5 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -778,7 +778,7 @@ int twl4030_sih_setup(int module) /* FIXME pass in which interrupt line we'll use ... */ #define twl_irq_line 0 -int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) +int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) { static struct irq_chip twl4030_irq_chip; @@ -858,7 +858,7 @@ fail: return status; } -int twl_exit_irq(void) +int twl4030_exit_irq(void) { /* FIXME undo twl_init_irq() */ if (twl4030_irq_base) { @@ -868,7 +868,7 @@ int twl_exit_irq(void) return 0; } -int twl_init_chip_irq(const char *chip) +int twl4030_init_chip_irq(const char *chip) { if (!strcmp(chip, "twl5031")) { sih_modules = sih_modules_twl5031; diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c new file mode 100644 index 000000000000..10bf228ad626 --- /dev/null +++ b/drivers/mfd/twl6030-irq.c @@ -0,0 +1,299 @@ +/* + * twl6030-irq.c - TWL6030 irq support + * + * Copyright (C) 2005-2009 Texas Instruments, Inc. + * + * Modifications to defer interrupt handling to a kernel thread: + * Copyright (C) 2006 MontaVista Software, Inc. + * + * Based on tlv320aic23.c: + * Copyright (c) by Kai Svahn + * + * Code cleanup and modifications to IRQ handler. + * by syed khasim + * + * TWL6030 specific code and IRQ handling changes by + * Jagadeesh Bhaskar Pakaravoor + * Balaji T K + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +/* + * TWL6030 (unlike its predecessors, which had two level interrupt handling) + * three interrupt registers INT_STS_A, INT_STS_B and INT_STS_C. + * It exposes status bits saying who has raised an interrupt. There are + * three mask registers that corresponds to these status registers, that + * enables/disables these interrupts. + * + * We set up IRQs starting at a platform-specified base. An interrupt map table, + * specifies mapping between interrupt number and the associated module. + * + */ + +static int twl6030_interrupt_mapping[24] = { + PWR_INTR_OFFSET, /* Bit 0 PWRON */ + PWR_INTR_OFFSET, /* Bit 1 RPWRON */ + PWR_INTR_OFFSET, /* Bit 2 BAT_VLOW */ + RTC_INTR_OFFSET, /* Bit 3 RTC_ALARM */ + RTC_INTR_OFFSET, /* Bit 4 RTC_PERIOD */ + HOTDIE_INTR_OFFSET, /* Bit 5 HOT_DIE */ + SMPSLDO_INTR_OFFSET, /* Bit 6 VXXX_SHORT */ + SMPSLDO_INTR_OFFSET, /* Bit 7 VMMC_SHORT */ + + SMPSLDO_INTR_OFFSET, /* Bit 8 VUSIM_SHORT */ + BATDETECT_INTR_OFFSET, /* Bit 9 BAT */ + SIMDETECT_INTR_OFFSET, /* Bit 10 SIM */ + MMCDETECT_INTR_OFFSET, /* Bit 11 MMC */ + RSV_INTR_OFFSET, /* Bit 12 Reserved */ + MADC_INTR_OFFSET, /* Bit 13 GPADC_RT_EOC */ + MADC_INTR_OFFSET, /* Bit 14 GPADC_SW_EOC */ + GASGAUGE_INTR_OFFSET, /* Bit 15 CC_AUTOCAL */ + + USBOTG_INTR_OFFSET, /* Bit 16 ID_WKUP */ + USBOTG_INTR_OFFSET, /* Bit 17 VBUS_WKUP */ + USBOTG_INTR_OFFSET, /* Bit 18 ID */ + USBOTG_INTR_OFFSET, /* Bit 19 VBUS */ + CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */ + CHARGER_INTR_OFFSET, /* Bit 21 EXT_CHRG */ + CHARGER_INTR_OFFSET, /* Bit 22 INT_CHRG */ + RSV_INTR_OFFSET, /* Bit 23 Reserved */ +}; +/*----------------------------------------------------------------------*/ + +static unsigned twl6030_irq_base; + +static struct completion irq_event; + +/* + * This thread processes interrupts reported by the Primary Interrupt Handler. + */ +static int twl6030_irq_thread(void *data) +{ + long irq = (long)data; + static unsigned i2c_errors; + static const unsigned max_i2c_errors = 100; + int ret; + + current->flags |= PF_NOFREEZE; + + while (!kthread_should_stop()) { + int i; + union { + u8 bytes[4]; + u32 int_sts; + } sts; + + /* Wait for IRQ, then read PIH irq status (also blocking) */ + wait_for_completion_interruptible(&irq_event); + + /* read INT_STS_A, B and C in one shot using a burst read */ + ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes, + REG_INT_STS_A, 3); + if (ret) { + pr_warning("twl6030: I2C error %d reading PIH ISR\n", + ret); + if (++i2c_errors >= max_i2c_errors) { + printk(KERN_ERR "Maximum I2C error count" + " exceeded. Terminating %s.\n", + __func__); + break; + } + complete(&irq_event); + continue; + } + + + + sts.bytes[3] = 0; /* Only 24 bits are valid*/ + + for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) { + local_irq_disable(); + if (sts.int_sts & 0x1) { + int module_irq = twl6030_irq_base + + twl6030_interrupt_mapping[i]; + struct irq_desc *d = irq_to_desc(module_irq); + + if (!d) { + pr_err("twl6030: Invalid SIH IRQ: %d\n", + module_irq); + return -EINVAL; + } + + /* These can't be masked ... always warn + * if we get any surprises. + */ + if (d->status & IRQ_DISABLED) + note_interrupt(module_irq, d, + IRQ_NONE); + else + d->handle_irq(module_irq, d); + + } + local_irq_enable(); + } + ret = twl_i2c_write(TWL_MODULE_PIH, sts.bytes, + REG_INT_STS_A, 3); /* clear INT_STS_A */ + if (ret) + pr_warning("twl6030: I2C error in clearing PIH ISR\n"); + + enable_irq(irq); + } + + return 0; +} + +/* + * handle_twl6030_int() is the desc->handle method for the twl6030 interrupt. + * This is a chained interrupt, so there is no desc->action method for it. + * Now we need to query the interrupt controller in the twl6030 to determine + * which module is generating the interrupt request. However, we can't do i2c + * transactions in interrupt context, so we must defer that work to a kernel + * thread. All we do here is acknowledge and mask the interrupt and wakeup + * the kernel thread. + */ +static irqreturn_t handle_twl6030_pih(int irq, void *devid) +{ + disable_irq_nosync(irq); + complete(devid); + return IRQ_HANDLED; +} + +/*----------------------------------------------------------------------*/ + +static inline void activate_irq(int irq) +{ +#ifdef CONFIG_ARM + /* ARM requires an extra step to clear IRQ_NOREQUEST, which it + * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. + */ + set_irq_flags(irq, IRQF_VALID); +#else + /* same effect on other architectures */ + set_irq_noprobe(irq); +#endif +} + +/*----------------------------------------------------------------------*/ + +static unsigned twl6030_irq_next; + +/*----------------------------------------------------------------------*/ +int twl6030_interrupt_unmask(u8 bit_mask, u8 offset) +{ + int ret; + u8 unmask_value; + ret = twl_i2c_read_u8(TWL_MODULE_PIH, &unmask_value, + REG_INT_STS_A + offset); + unmask_value &= (~(bit_mask)); + ret |= twl_i2c_write_u8(TWL_MODULE_PIH, unmask_value, + REG_INT_STS_A + offset); /* unmask INT_MSK_A/B/C */ + return ret; +} +EXPORT_SYMBOL(twl6030_interrupt_unmask); + +int twl6030_interrupt_mask(u8 bit_mask, u8 offset) +{ + int ret; + u8 mask_value; + ret = twl_i2c_read_u8(TWL_MODULE_PIH, &mask_value, + REG_INT_STS_A + offset); + mask_value |= (bit_mask); + ret |= twl_i2c_write_u8(TWL_MODULE_PIH, mask_value, + REG_INT_STS_A + offset); /* mask INT_MSK_A/B/C */ + return ret; +} +EXPORT_SYMBOL(twl6030_interrupt_mask); + +int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) +{ + + int status = 0; + int i; + struct task_struct *task; + int ret; + u8 mask[4]; + + static struct irq_chip twl6030_irq_chip; + mask[1] = 0xFF; + mask[2] = 0xFF; + mask[3] = 0xFF; + ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0], + REG_INT_MSK_LINE_A, 3); /* MASK ALL INT LINES */ + ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0], + REG_INT_MSK_STS_A, 3); /* MASK ALL INT STS */ + ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0], + REG_INT_STS_A, 3); /* clear INT_STS_A,B,C */ + + twl6030_irq_base = irq_base; + + /* install an irq handler for each of the modules; + * clone dummy irq_chip since PIH can't *do* anything + */ + twl6030_irq_chip = dummy_irq_chip; + twl6030_irq_chip.name = "twl6030"; + twl6030_irq_chip.set_type = NULL; + + for (i = irq_base; i < irq_end; i++) { + set_irq_chip_and_handler(i, &twl6030_irq_chip, + handle_simple_irq); + activate_irq(i); + } + + twl6030_irq_next = i; + pr_info("twl6030: %s (irq %d) chaining IRQs %d..%d\n", "PIH", + irq_num, irq_base, twl6030_irq_next - 1); + + /* install an irq handler to demultiplex the TWL6030 interrupt */ + init_completion(&irq_event); + task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq"); + if (IS_ERR(task)) { + pr_err("twl6030: could not create irq %d thread!\n", irq_num); + status = PTR_ERR(task); + goto fail_kthread; + } + + status = request_irq(irq_num, handle_twl6030_pih, IRQF_DISABLED, + "TWL6030-PIH", &irq_event); + if (status < 0) { + pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status); + goto fail_irq; + } + return status; +fail_irq: + free_irq(irq_num, &irq_event); + +fail_kthread: + for (i = irq_base; i < irq_end; i++) + set_irq_chip_and_handler(i, NULL, NULL); + return status; +} + +int twl6030_exit_irq(void) +{ + + if (twl6030_irq_base) { + pr_err("twl6030: can't yet clean up IRQs?\n"); + return -ENOSYS; + } + return 0; +} + diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 0f812f5aa723..8e7405d9c624 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h @@ -89,6 +89,67 @@ #define BCI_PRES_INTR_OFFSET 9 #define USB_PRES_INTR_OFFSET 10 #define RTC_INTR_OFFSET 11 + +/* + * Offset from TWL6030_IRQ_BASE / pdata->irq_base + */ +#define PWR_INTR_OFFSET 0 +#define HOTDIE_INTR_OFFSET 12 +#define SMPSLDO_INTR_OFFSET 13 +#define BATDETECT_INTR_OFFSET 14 +#define SIMDETECT_INTR_OFFSET 15 +#define MMCDETECT_INTR_OFFSET 16 +#define GASGAUGE_INTR_OFFSET 17 +#define USBOTG_INTR_OFFSET 4 +#define CHARGER_INTR_OFFSET 2 +#define RSV_INTR_OFFSET 0 + +/* INT register offsets */ +#define REG_INT_STS_A 0x00 +#define REG_INT_STS_B 0x01 +#define REG_INT_STS_C 0x02 + +#define REG_INT_MSK_LINE_A 0x03 +#define REG_INT_MSK_LINE_B 0x04 +#define REG_INT_MSK_LINE_C 0x05 + +#define REG_INT_MSK_STS_A 0x06 +#define REG_INT_MSK_STS_B 0x07 +#define REG_INT_MSK_STS_C 0x08 + +/* MASK INT REG GROUP A */ +#define TWL6030_PWR_INT_MASK 0x07 +#define TWL6030_RTC_INT_MASK 0x18 +#define TWL6030_HOTDIE_INT_MASK 0x20 +#define TWL6030_SMPSLDOA_INT_MASK 0xC0 + +/* MASK INT REG GROUP B */ +#define TWL6030_SMPSLDOB_INT_MASK 0x01 +#define TWL6030_BATDETECT_INT_MASK 0x02 +#define TWL6030_SIMDETECT_INT_MASK 0x04 +#define TWL6030_MMCDETECT_INT_MASK 0x08 +#define TWL6030_GPADC_INT_MASK 0x60 +#define TWL6030_GASGAUGE_INT_MASK 0x80 + +/* MASK INT REG GROUP C */ +#define TWL6030_USBOTG_INT_MASK 0x0F +#define TWL6030_CHARGER_CTRL_INT_MASK 0x10 +#define TWL6030_CHARGER_FAULT_INT_MASK 0x60 + + +#define TWL4030_CLASS_ID 0x4030 +#define TWL6030_CLASS_ID 0x6030 +unsigned int twl_rev(void); +#define GET_TWL_REV (twl_rev()) +#define TWL_CLASS_IS(class, id) \ +static inline int twl_class_is_ ##class(void) \ +{ \ + return ((id) == (GET_TWL_REV)) ? 1 : 0; \ +} + +TWL_CLASS_IS(4030, TWL4030_CLASS_ID) +TWL_CLASS_IS(6030, TWL6030_CLASS_ID) + /* * Read and write single 8-bit registers */ @@ -104,6 +165,9 @@ int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); +int twl6030_interrupt_mask(u8 bit_mask, u8 offset); + /*----------------------------------------------------------------------*/ /* -- cgit v1.2.3 From 441a450554dada1c59fc06fdf068cb0eeba53c6d Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Sun, 13 Dec 2009 22:19:23 +0100 Subject: regulator: Add support for twl6030 regulators This patch updates the regulator driver to add support for TWL6030 PMIC specific LDO regulators. SMPS resources are not yet supported for TWL6030 and also .set_mode and .get_status for LDO's are yet to be implemented for TWL6030. Signed-off-by: Rajendra Nayak Signed-off-by: Balaji T K Acked-by: Mark Brown Reviewed-by: Tony Lindgren Signed-off-by: Samuel Ortiz --- drivers/regulator/Kconfig | 2 +- drivers/regulator/twl-regulator.c | 116 +++++++++++++++++++++++++++++++------- include/linux/i2c/twl.h | 34 +++++++++++ 3 files changed, 130 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index bcbb161bde0b..7cfdd65bebb4 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -70,7 +70,7 @@ config REGULATOR_MAX1586 for PXA27x chips to control VCC_CORE and VCC_USIM voltages. config REGULATOR_TWL4030 - bool "TI TWL4030/TWL5030/TPS695x0 PMIC" + bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC" depends on TWL4030_CORE help This driver supports the voltage regulators provided by diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 8e1b68a20ef0..7ea1c3a31081 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -52,27 +52,38 @@ struct twlreg_info { * The first three registers of all power resource banks help hardware to * manage the various resource groups. */ +/* Common offset in TWL4030/6030 */ #define VREG_GRP 0 +/* TWL4030 register offsets */ #define VREG_TYPE 1 #define VREG_REMAP 2 #define VREG_DEDICATED 3 /* LDO control */ - +/* TWL6030 register offsets */ +#define VREG_TRANS 1 +#define VREG_STATE 2 +#define VREG_VOLTAGE 3 +/* TWL6030 Misc register offsets */ +#define VREG_BC_ALL 1 +#define VREG_BC_REF 2 +#define VREG_BC_PROC 3 +#define VREG_BC_CLK_RST 4 static inline int -twlreg_read(struct twlreg_info *info, unsigned offset) +twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) { u8 value; int status; - status = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, + status = twl_i2c_read_u8(slave_subgp, &value, info->base + offset); return (status < 0) ? status : value; } static inline int -twlreg_write(struct twlreg_info *info, unsigned offset, u8 value) +twlreg_write(struct twlreg_info *info, unsigned slave_subgp, unsigned offset, + u8 value) { - return twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, + return twl_i2c_write_u8(slave_subgp, value, info->base + offset); } @@ -82,17 +93,22 @@ twlreg_write(struct twlreg_info *info, unsigned offset, u8 value) static int twlreg_grp(struct regulator_dev *rdev) { - return twlreg_read(rdev_get_drvdata(rdev), VREG_GRP); + return twlreg_read(rdev_get_drvdata(rdev), TWL_MODULE_PM_RECEIVER, + VREG_GRP); } /* * Enable/disable regulators by joining/leaving the P1 (processor) group. * We assume nobody else is updating the DEV_GRP registers. */ - -#define P3_GRP BIT(7) /* "peripherals" */ -#define P2_GRP BIT(6) /* secondary processor, modem, etc */ -#define P1_GRP BIT(5) /* CPU/Linux */ +/* definition for 4030 family */ +#define P3_GRP_4030 BIT(7) /* "peripherals" */ +#define P2_GRP_4030 BIT(6) /* secondary processor, modem, etc */ +#define P1_GRP_4030 BIT(5) /* CPU/Linux */ +/* definition for 6030 family */ +#define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */ +#define P2_GRP_6030 BIT(1) /* "peripherals" */ +#define P1_GRP_6030 BIT(0) /* CPU/Linux */ static int twlreg_is_enabled(struct regulator_dev *rdev) { @@ -101,7 +117,11 @@ static int twlreg_is_enabled(struct regulator_dev *rdev) if (state < 0) return state; - return (state & P1_GRP) != 0; + if (twl_class_is_4030()) + state &= P1_GRP_4030; + else + state &= P1_GRP_6030; + return state; } static int twlreg_enable(struct regulator_dev *rdev) @@ -109,12 +129,16 @@ static int twlreg_enable(struct regulator_dev *rdev) struct twlreg_info *info = rdev_get_drvdata(rdev); int grp; - grp = twlreg_read(info, VREG_GRP); + grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); if (grp < 0) return grp; - grp |= P1_GRP; - return twlreg_write(info, VREG_GRP, grp); + if (twl_class_is_4030()) + grp |= P1_GRP_4030; + else + grp |= P1_GRP_6030; + + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); } static int twlreg_disable(struct regulator_dev *rdev) @@ -122,18 +146,25 @@ static int twlreg_disable(struct regulator_dev *rdev) struct twlreg_info *info = rdev_get_drvdata(rdev); int grp; - grp = twlreg_read(info, VREG_GRP); + grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); if (grp < 0) return grp; - grp &= ~P1_GRP; - return twlreg_write(info, VREG_GRP, grp); + if (twl_class_is_4030()) + grp &= ~P1_GRP_4030; + else + grp &= ~P1_GRP_6030; + + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); } static int twlreg_get_status(struct regulator_dev *rdev) { int state = twlreg_grp(rdev); + if (twl_class_is_6030()) + return 0; /* FIXME return for 6030 regulator */ + if (state < 0) return state; state &= 0x0f; @@ -152,6 +183,9 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) unsigned message; int status; + if (twl_class_is_6030()) + return 0; /* FIXME return for 6030 regulator */ + /* We can only set the mode through state machine commands... */ switch (mode) { case REGULATOR_MODE_NORMAL: @@ -168,7 +202,7 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) status = twlreg_grp(rdev); if (status < 0) return status; - if (!(status & (P3_GRP | P2_GRP | P1_GRP))) + if (!(status & (P3_GRP_4030 | P2_GRP_4030 | P1_GRP_4030))) return -EACCES; status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, @@ -260,7 +294,29 @@ static const u16 VSIM_VSEL_table[] = { static const u16 VDAC_VSEL_table[] = { 1200, 1300, 1800, 1800, }; - +static const u16 VAUX1_6030_VSEL_table[] = { + 1000, 1300, 1800, 2500, + 2800, 2900, 3000, 3000, +}; +static const u16 VAUX2_6030_VSEL_table[] = { + 1200, 1800, 2500, 2750, + 2800, 2800, 2800, 2800, +}; +static const u16 VAUX3_6030_VSEL_table[] = { + 1000, 1200, 1300, 1800, + 2500, 2800, 3000, 3000, +}; +static const u16 VMMC_VSEL_table[] = { + 1200, 1800, 2800, 2900, + 3000, 3000, 3000, 3000, +}; +static const u16 VPP_VSEL_table[] = { + 1800, 1900, 2000, 2100, + 2200, 2300, 2400, 2500, +}; +static const u16 VUSIM_VSEL_table[] = { + 1200, 1800, 2500, 2900, +}; static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index) { @@ -288,7 +344,8 @@ twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) /* use the first in-range value */ if (min_uV <= uV && uV <= max_uV) - return twlreg_write(info, VREG_DEDICATED, vsel); + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, + VREG_VOLTAGE, vsel); } return -EDOM; @@ -297,7 +354,8 @@ twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) static int twlldo_get_voltage(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); - int vsel = twlreg_read(info, VREG_DEDICATED); + int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, + VREG_VOLTAGE); if (vsel < 0) return vsel; @@ -360,6 +418,10 @@ static struct regulator_ops twlfixed_ops = { TWL_ADJUSTABLE_LDO(label, offset, num, TWL4030) #define TWL4030_FIXED_LDO(label, offset, mVolts, num) \ TWL_FIXED_LDO(label, offset, mVolts, num, TWL4030) +#define TWL6030_ADJUSTABLE_LDO(label, offset, num) \ + TWL_ADJUSTABLE_LDO(label, offset, num, TWL6030) +#define TWL6030_FIXED_LDO(label, offset, mVolts, num) \ + TWL_FIXED_LDO(label, offset, mVolts, num, TWL6030) #define TWL_ADJUSTABLE_LDO(label, offset, num, family) { \ .base = offset, \ @@ -420,6 +482,18 @@ static struct twlreg_info twl_regs[] = { TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18), TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19), /* VUSBCP is managed *only* by the USB subchip */ + + /* 6030 REG with base as PMC Slave Misc : 0x0030 */ + TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1), + TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2), + TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3), + TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4), + TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5), + TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7), + TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15), + TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16), + TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17), + TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18) }; static int twlreg_probe(struct platform_device *pdev) diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 8e7405d9c624..7679e87df177 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h @@ -427,6 +427,12 @@ int twl6030_interrupt_mask(u8 bit_mask, u8 offset); #define MSG_SINGULAR(devgrp, id, state) \ ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) +#define MSG_BROADCAST_ALL(devgrp, state) \ + ((devgrp) << 5 | (state)) + +#define MSG_BROADCAST_REF MSG_BROADCAST_ALL +#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL +#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL /*----------------------------------------------------------------------*/ struct twl4030_clock_init_data { @@ -602,6 +608,7 @@ int twl4030_sih_setup(int module); * VIO is generally fixed. */ +/* TWL4030 SMPS/LDO's */ /* EXTERNAL dc-to-dc buck converters */ #define TWL4030_REG_VDD1 0 #define TWL4030_REG_VDD2 1 @@ -628,4 +635,31 @@ int twl4030_sih_setup(int module); #define TWL4030_REG_VUSB1V8 18 #define TWL4030_REG_VUSB3V1 19 +/* TWL6030 SMPS/LDO's */ +/* EXTERNAL dc-to-dc buck convertor contollable via SR */ +#define TWL6030_REG_VDD1 30 +#define TWL6030_REG_VDD2 31 +#define TWL6030_REG_VDD3 32 + +/* Non SR compliant dc-to-dc buck convertors */ +#define TWL6030_REG_VMEM 33 +#define TWL6030_REG_V2V1 34 +#define TWL6030_REG_V1V29 35 +#define TWL6030_REG_V1V8 36 + +/* EXTERNAL LDOs */ +#define TWL6030_REG_VAUX1_6030 37 +#define TWL6030_REG_VAUX2_6030 38 +#define TWL6030_REG_VAUX3_6030 39 +#define TWL6030_REG_VMMC 40 +#define TWL6030_REG_VPP 41 +#define TWL6030_REG_VUSIM 42 +#define TWL6030_REG_VANA 43 +#define TWL6030_REG_VCXIO 44 +#define TWL6030_REG_VDAC 45 +#define TWL6030_REG_VUSB 46 + +/* INTERNAL LDOs */ +#define TWL6030_REG_VRTC 47 + #endif /* End of __TWL4030_H */ -- cgit v1.2.3 From 9da66539281b5e15afc4a4739014c8923059d894 Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Sun, 13 Dec 2009 22:29:47 +0100 Subject: mfd: Add twl6030 regulator subdevices This patch adds initial support for creating twl6030 PMIC specific voltage regulators in the twl mfd driver. Board specific regulator configurations will have to be passed from respective board files. Signed-off-by: Rajendra Nayak Signed-off-by: Balaji T K Acked-by: Mark Brown Reviewed-by: Tony Lindgren Signed-off-by: Samuel Ortiz --- drivers/mfd/twl-core.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++--- include/linux/i2c/twl.h | 16 ++++++++++++---- 2 files changed, 60 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index c48a6138c575..2a7606534196 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -639,7 +639,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) return PTR_ERR(child); } - if (twl_has_usb() && pdata->usb) { + if (twl_has_usb() && pdata->usb && twl_class_is_4030()) { static struct regulator_consumer_supply usb1v5 = { .supply = "usb1v5", @@ -719,7 +719,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) return PTR_ERR(child); } - if (twl_has_regulator()) { + /* twl4030 regulators */ + if (twl_has_regulator() && twl_class_is_4030()) { child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); if (IS_ERR(child)) return PTR_ERR(child); @@ -765,7 +766,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) } /* maybe add LDOs that are omitted on cost-reduced parts */ - if (twl_has_regulator() && !(features & TPS_SUBSET)) { + if (twl_has_regulator() && !(features & TPS_SUBSET) + && twl_class_is_4030()) { child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); if (IS_ERR(child)) return PTR_ERR(child); @@ -791,6 +793,49 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) return PTR_ERR(child); } + /* twl6030 regulators */ + if (twl_has_regulator() && twl_class_is_6030()) { + child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VPP, pdata->vpp); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VANA, pdata->vana); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VDAC, pdata->vdac); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VUSB, pdata->vusb); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2); + if (IS_ERR(child)) + return PTR_ERR(child); + + child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3); + if (IS_ERR(child)) + return PTR_ERR(child); + } + return 0; } diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 7679e87df177..bf1c5be1f5b6 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h @@ -560,16 +560,17 @@ struct twl4030_platform_data { struct twl4030_power_data *power; struct twl4030_codec_data *codec; - /* LDO regulators */ + /* Common LDO regulators for TWL4030/TWL6030 */ struct regulator_init_data *vdac; + struct regulator_init_data *vaux1; + struct regulator_init_data *vaux2; + struct regulator_init_data *vaux3; + /* TWL4030 LDO regulators */ struct regulator_init_data *vpll1; struct regulator_init_data *vpll2; struct regulator_init_data *vmmc1; struct regulator_init_data *vmmc2; struct regulator_init_data *vsim; - struct regulator_init_data *vaux1; - struct regulator_init_data *vaux2; - struct regulator_init_data *vaux3; struct regulator_init_data *vaux4; struct regulator_init_data *vio; struct regulator_init_data *vdd1; @@ -577,6 +578,13 @@ struct twl4030_platform_data { struct regulator_init_data *vintana1; struct regulator_init_data *vintana2; struct regulator_init_data *vintdig; + /* TWL6030 LDO regulators */ + struct regulator_init_data *vmmc; + struct regulator_init_data *vpp; + struct regulator_init_data *vusim; + struct regulator_init_data *vana; + struct regulator_init_data *vcxio; + struct regulator_init_data *vusb; }; /*----------------------------------------------------------------------*/ -- cgit v1.2.3 From 7820f9e1dddcfebae2698fb2a245d04ce3aa6e74 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 14 Dec 2009 12:49:47 +1100 Subject: md: remove sparse warning:symbol XXX was not declared. Signed-off-by: NeilBrown --- drivers/md/raid6algos.c | 19 ------------------- include/linux/raid/pq.h | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c index 866215ac7f25..8ce102cc2e7b 100644 --- a/drivers/md/raid6algos.c +++ b/drivers/md/raid6algos.c @@ -31,25 +31,6 @@ EXPORT_SYMBOL(raid6_empty_zero_page); struct raid6_calls raid6_call; EXPORT_SYMBOL_GPL(raid6_call); -/* Various routine sets */ -extern const struct raid6_calls raid6_intx1; -extern const struct raid6_calls raid6_intx2; -extern const struct raid6_calls raid6_intx4; -extern const struct raid6_calls raid6_intx8; -extern const struct raid6_calls raid6_intx16; -extern const struct raid6_calls raid6_intx32; -extern const struct raid6_calls raid6_mmxx1; -extern const struct raid6_calls raid6_mmxx2; -extern const struct raid6_calls raid6_sse1x1; -extern const struct raid6_calls raid6_sse1x2; -extern const struct raid6_calls raid6_sse2x1; -extern const struct raid6_calls raid6_sse2x2; -extern const struct raid6_calls raid6_sse2x4; -extern const struct raid6_calls raid6_altivec1; -extern const struct raid6_calls raid6_altivec2; -extern const struct raid6_calls raid6_altivec4; -extern const struct raid6_calls raid6_altivec8; - const struct raid6_calls * const raid6_algos[] = { &raid6_intx1, &raid6_intx2, diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index d92480f8285c..1cbbd2c11aa9 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -78,6 +78,25 @@ struct raid6_calls { /* Selected algorithm */ extern struct raid6_calls raid6_call; +/* Various routine sets */ +extern const struct raid6_calls raid6_intx1; +extern const struct raid6_calls raid6_intx2; +extern const struct raid6_calls raid6_intx4; +extern const struct raid6_calls raid6_intx8; +extern const struct raid6_calls raid6_intx16; +extern const struct raid6_calls raid6_intx32; +extern const struct raid6_calls raid6_mmxx1; +extern const struct raid6_calls raid6_mmxx2; +extern const struct raid6_calls raid6_sse1x1; +extern const struct raid6_calls raid6_sse1x2; +extern const struct raid6_calls raid6_sse2x1; +extern const struct raid6_calls raid6_sse2x2; +extern const struct raid6_calls raid6_sse2x4; +extern const struct raid6_calls raid6_altivec1; +extern const struct raid6_calls raid6_altivec2; +extern const struct raid6_calls raid6_altivec4; +extern const struct raid6_calls raid6_altivec8; + /* Algorithm list */ extern const struct raid6_calls * const raid6_algos[]; int raid6_select_algo(void); -- cgit v1.2.3 From c7cd606f60e7679c7f9eee7010f02a6f000209c1 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Sat, 12 Dec 2009 04:13:21 +0000 Subject: can: Fix data length code handling in rx path A valid CAN dataframe can have a data length code (DLC) of 0 .. 8 data bytes. When reading the CAN controllers register the 4-bit value may contain values from 0 .. 15 which may exceed the reserved space in the socket buffer! The ISO 11898-1 Chapter 8.4.2.3 (DLC field) says that register values > 8 should be reduced to 8 without any error reporting or frame drop. This patch introduces a new helper macro to cast a given 4-bit data length code (dlc) to __u8 and ensure the DLC value to be max. 8 bytes. The different handlings in the rx path of the CAN netdevice drivers are fixed. Signed-off-by: Oliver Hartkopp Signed-off-by: Wolfgang Grandegger Signed-off-by: David S. Miller --- drivers/net/can/at91_can.c | 2 +- drivers/net/can/bfin_can.c | 2 +- drivers/net/can/mcp251x.c | 13 +++---------- drivers/net/can/mscan/mscan.c | 3 ++- drivers/net/can/sja1000/sja1000.c | 18 ++++++++---------- drivers/net/can/ti_hecc.c | 2 +- drivers/net/can/usb/ems_usb.c | 2 +- include/linux/can/dev.h | 9 +++++++++ 8 files changed, 26 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index cbe3fce53e3b..d0ec17878ffc 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -474,7 +474,7 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb, reg_msr = at91_read(priv, AT91_MSR(mb)); if (reg_msr & AT91_MSR_MRTR) cf->can_id |= CAN_RTR_FLAG; - cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8); + cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf); *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index c7fc1de28173..0ec1524523cc 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -392,7 +392,7 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc) cf->can_id |= CAN_RTR_FLAG; /* get data length code */ - cf->can_dlc = bfin_read16(®->chl[obj].dlc); + cf->can_dlc = get_can_dlc(bfin_read16(®->chl[obj].dlc) & 0xF); /* get payload */ for (i = 0; i < 8; i += 2) { diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 78b1b69b2921..9c5a1537939c 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -403,9 +403,8 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, for (i = 1; i < RXBDAT_OFF; i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); - len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK; - if (len > 8) - len = 8; + + len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); for (; i < (RXBDAT_OFF + len); i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); } else { @@ -455,13 +454,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT); } /* Data length */ - frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK; - if (frame->can_dlc > 8) { - dev_warn(&spi->dev, "invalid frame recevied\n"); - priv->net->stats.rx_errors++; - dev_kfree_skb(skb); - return; - } + frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc); priv->net->stats.rx_packets++; diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index bb06dfb58f25..07346f880ca6 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -297,7 +297,8 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) frame->can_id |= can_id >> 1; if (can_id & 1) frame->can_id |= CAN_RTR_FLAG; - frame->can_dlc = in_8(®s->rx.dlr) & 0xf; + + frame->can_dlc = get_can_dlc(in_8(®s->rx.dlr) & 0xf); if (!(frame->can_id & CAN_RTR_FLAG)) { void __iomem *data = ®s->rx.dsr1_0; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index b4ba88a31075..542a4f7255b4 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -293,15 +293,14 @@ static void sja1000_rx(struct net_device *dev) uint8_t fi; uint8_t dreg; canid_t id; - uint8_t dlc; int i; + /* create zero'ed CAN frame buffer */ skb = alloc_can_skb(dev, &cf); if (skb == NULL) return; fi = priv->read_reg(priv, REG_FI); - dlc = fi & 0x0F; if (fi & FI_FF) { /* extended frame format (EFF) */ @@ -318,16 +317,15 @@ static void sja1000_rx(struct net_device *dev) | (priv->read_reg(priv, REG_ID2) >> 5); } - if (fi & FI_RTR) + if (fi & FI_RTR) { id |= CAN_RTR_FLAG; + } else { + cf->can_dlc = get_can_dlc(fi & 0x0F); + for (i = 0; i < cf->can_dlc; i++) + cf->data[i] = priv->read_reg(priv, dreg++); + } cf->can_id = id; - cf->can_dlc = dlc; - for (i = 0; i < dlc; i++) - cf->data[i] = priv->read_reg(priv, dreg++); - - while (i < 8) - cf->data[i++] = 0; /* release receive buffer */ priv->write_reg(priv, REG_CMR, CMD_RRB); @@ -335,7 +333,7 @@ static void sja1000_rx(struct net_device *dev) netif_rx(skb); stats->rx_packets++; - stats->rx_bytes += dlc; + stats->rx_bytes += cf->can_dlc; } static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 07e8016b17ec..5c993c2da528 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -552,7 +552,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); if (data & HECC_CANMCF_RTR) cf->can_id |= CAN_RTR_FLAG; - cf->can_dlc = data & 0xF; + cf->can_dlc = get_can_dlc(data & 0xF); data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); *(u32 *)(cf->data) = cpu_to_be32(data); if (cf->can_dlc > 4) { diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 591eb0eb1c2b..efbb05c71bf4 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -316,7 +316,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) return; cf->can_id = le32_to_cpu(msg->msg.can_msg.id); - cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); + cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF); if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 1ed2a5cc03f5..3db7767d2a17 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -51,6 +51,15 @@ struct can_priv { struct sk_buff **echo_skb; }; +/* + * get_can_dlc(value) - helper macro to cast a given data length code (dlc) + * to __u8 and ensure the dlc value to be max. 8 bytes. + * + * To be used in the CAN netdriver receive path to ensure conformance with + * ISO 11898-1 Chapter 8.4.2.3 (DLC field) + */ +#define get_can_dlc(i) (min_t(__u8, (i), 8)) + struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); void free_candev(struct net_device *dev); -- cgit v1.2.3 From 5185fb069972b653dd7177292e7510ff99d9e8aa Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 14 Dec 2009 14:03:27 +0000 Subject: FRV: Fix no-hardware-breakpoint case If there is no hardware breakpoint support, modify_user_hw_breakpoint() tries to return a NULL pointer through as an 'int' return value: In file included from kernel/exit.c:53: include/linux/hw_breakpoint.h: In function 'modify_user_hw_breakpoint': include/linux/hw_breakpoint.h:96: warning: return makes integer from pointer without a cast Return 0 instead. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- include/linux/hw_breakpoint.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 69f07a9f1277..e268388146e8 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -93,7 +93,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, struct task_struct *tsk) { return NULL; } static inline int modify_user_hw_breakpoint(struct perf_event *bp, - struct perf_event_attr *attr) { return NULL; } + struct perf_event_attr *attr) { return 0; } static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, -- cgit v1.2.3 From 491424c0f46c282a854b88830212bdb0763e93dc Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 14 Dec 2009 14:13:44 +0000 Subject: PCI: Global variable decls must match the defs in section attributes Global variable declarations must match the definitions in section attributes as the compiler is at liberty to vary the method it uses to access a variable, depending on the section it is in. When building the FRV arch, I now see: drivers/built-in.o: In function `pci_apply_final_quirks': drivers/pci/quirks.c:2606: relocation truncated to fit: R_FRV_GPREL12 against symbol `pci_dfl_cache_line_size' defined in .devinit.data section in drivers/built-in.o drivers/pci/quirks.c:2623: relocation truncated to fit: R_FRV_GPREL12 against symbol `pci_dfl_cache_line_size' defined in .devinit.data section in drivers/built-in.o drivers/pci/quirks.c:2630: relocation truncated to fit: R_FRV_GPREL12 against symbol `pci_dfl_cache_line_size' defined in .devinit.data section in drivers/built-in.o because the declaration of pci_dfl_cache_line_size in linux/pci.h does not match the definition in drivers/pci/pci.c. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- include/linux/pci.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/pci.h b/include/linux/pci.h index 04771b9c3316..bf1e67080849 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1255,7 +1255,7 @@ extern int pci_pci_problems; extern unsigned long pci_cardbus_io_size; extern unsigned long pci_cardbus_mem_size; -extern u8 pci_dfl_cache_line_size; +extern u8 __devinitdata pci_dfl_cache_line_size; extern u8 pci_cache_line_size; extern unsigned long pci_hotplug_io_size; -- cgit v1.2.3 From d24deb2580823ab0b8425790c6f5d18e2ff749d8 Mon Sep 17 00:00:00 2001 From: Gertjan van Wingerde Date: Fri, 4 Dec 2009 23:46:54 +0100 Subject: mac80211: Add define for TX headroom reserved by mac80211 itself. Add a definition of the amount of TX headroom reserved by mac80211 itself for its own purposes. Also add BUILD_BUG_ON to validate the value. This define can then be used by drivers to request additional TX headroom in the most efficient manner. Signed-off-by: Gertjan van Wingerde Acked-by: Johannes Berg Signed-off-by: John W. Linville --- include/net/mac80211.h | 6 ++++++ net/mac80211/main.c | 2 ++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 2aff4906b2ae..538d6b761887 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1737,6 +1737,12 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw, local_bh_enable(); } +/* + * The TX headroom reserved by mac80211 for its own tx_status functions. + * This is enough for the radiotap header. + */ +#define IEEE80211_TX_STATUS_HEADROOM 13 + /** * ieee80211_tx_status - transmit status callback * diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 8116d1a96a4a..0d2d94881f1f 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -515,6 +515,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) * and we need some headroom for passing the frame to monitor * interfaces, but never both at the same time. */ + BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM != + sizeof(struct ieee80211_tx_status_rtap_hdr)); local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, sizeof(struct ieee80211_tx_status_rtap_hdr)); -- cgit v1.2.3 From 310ec79210d754afe51e2e4a983e846b60179abd Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Dec 2009 21:17:23 +0100 Subject: i2c: Drop the kind parameter from detect callbacks The "kind" parameter always has value -1, and nobody is using it any longer, so we can remove it. Signed-off-by: Jean Delvare Tested-by: Wolfram Sang --- drivers/hwmon/adm1021.c | 4 ++-- drivers/hwmon/adm1025.c | 4 ++-- drivers/hwmon/adm1026.c | 4 ++-- drivers/hwmon/adm1029.c | 4 ++-- drivers/hwmon/adm1031.c | 4 ++-- drivers/hwmon/adm9240.c | 4 ++-- drivers/hwmon/ads7828.c | 4 ++-- drivers/hwmon/adt7462.c | 4 ++-- drivers/hwmon/adt7470.c | 4 ++-- drivers/hwmon/adt7473.c | 4 ++-- drivers/hwmon/adt7475.c | 2 +- drivers/hwmon/asb100.c | 4 ++-- drivers/hwmon/atxp1.c | 5 ++--- drivers/hwmon/dme1737.c | 2 +- drivers/hwmon/ds1621.c | 2 +- drivers/hwmon/f75375s.c | 4 ++-- drivers/hwmon/fschmd.c | 4 ++-- drivers/hwmon/gl518sm.c | 6 ++---- drivers/hwmon/gl520sm.c | 6 ++---- drivers/hwmon/lm63.c | 5 ++--- drivers/hwmon/lm73.c | 2 +- drivers/hwmon/lm75.c | 2 +- drivers/hwmon/lm77.c | 5 ++--- drivers/hwmon/lm78.c | 4 ++-- drivers/hwmon/lm80.c | 6 ++---- drivers/hwmon/lm83.c | 4 ++-- drivers/hwmon/lm85.c | 6 ++---- drivers/hwmon/lm87.c | 4 ++-- drivers/hwmon/lm90.c | 5 ++--- drivers/hwmon/lm92.c | 2 +- drivers/hwmon/lm93.c | 3 +-- drivers/hwmon/lm95241.c | 2 +- drivers/hwmon/max1619.c | 4 ++-- drivers/hwmon/max6650.c | 4 ++-- drivers/hwmon/pcf8591.c | 2 +- drivers/hwmon/smsc47m192.c | 4 ++-- drivers/hwmon/thmc50.c | 4 ++-- drivers/hwmon/tmp401.c | 4 ++-- drivers/hwmon/tmp421.c | 2 +- drivers/hwmon/w83781d.c | 3 +-- drivers/hwmon/w83791d.c | 4 ++-- drivers/hwmon/w83792d.c | 4 ++-- drivers/hwmon/w83793.c | 4 ++-- drivers/hwmon/w83l785ts.c | 4 ++-- drivers/hwmon/w83l786ng.c | 5 ++--- drivers/i2c/i2c-core.c | 2 +- drivers/misc/eeprom/eeprom.c | 3 +-- drivers/misc/ics932s401.c | 4 ++-- include/linux/i2c.h | 2 +- 49 files changed, 84 insertions(+), 100 deletions(-) (limited to 'include') diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 33acf29531af..3ebcdd9c25a5 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -97,7 +97,7 @@ struct adm1021_data { static int adm1021_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1021_detect(struct i2c_client *client, int kind, +static int adm1021_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm1021_init_client(struct i2c_client *client); static int adm1021_remove(struct i2c_client *client); @@ -284,7 +284,7 @@ static const struct attribute_group adm1021_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1021_detect(struct i2c_client *client, int kind, +static int adm1021_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c index db6ac2b04f6f..357c9ffa147e 100644 --- a/drivers/hwmon/adm1025.c +++ b/drivers/hwmon/adm1025.c @@ -111,7 +111,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 }; static int adm1025_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1025_detect(struct i2c_client *client, int kind, +static int adm1025_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm1025_init_client(struct i2c_client *client); static int adm1025_remove(struct i2c_client *client); @@ -409,7 +409,7 @@ static const struct attribute_group adm1025_group_in4 = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1025_detect(struct i2c_client *client, int kind, +static int adm1025_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index fb5363985e21..8deb17a402da 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -293,7 +293,7 @@ struct adm1026_data { static int adm1026_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1026_detect(struct i2c_client *client, int kind, +static int adm1026_detect(struct i2c_client *client, struct i2c_board_info *info); static int adm1026_remove(struct i2c_client *client); static int adm1026_read_value(struct i2c_client *client, u8 reg); @@ -1650,7 +1650,7 @@ static const struct attribute_group adm1026_group_in8_9 = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1026_detect(struct i2c_client *client, int kind, +static int adm1026_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index ef91e2a4a567..9bc9dbcacdbd 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -117,7 +117,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = { static int adm1029_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1029_detect(struct i2c_client *client, int kind, +static int adm1029_detect(struct i2c_client *client, struct i2c_board_info *info); static int adm1029_remove(struct i2c_client *client); static struct adm1029_data *adm1029_update_device(struct device *dev); @@ -297,7 +297,7 @@ static const struct attribute_group adm1029_group = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1029_detect(struct i2c_client *client, int kind, +static int adm1029_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 0e722175aae0..cebfbf6926da 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -102,7 +102,7 @@ struct adm1031_data { static int adm1031_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm1031_detect(struct i2c_client *client, int kind, +static int adm1031_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm1031_init_client(struct i2c_client *client); static int adm1031_remove(struct i2c_client *client); @@ -813,7 +813,7 @@ static const struct attribute_group adm1031_group_opt = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm1031_detect(struct i2c_client *client, int kind, +static int adm1031_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index 20e0481cc206..9316e074d690 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -132,7 +132,7 @@ static inline unsigned int AOUT_FROM_REG(u8 reg) static int adm9240_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adm9240_detect(struct i2c_client *client, int kind, +static int adm9240_detect(struct i2c_client *client, struct i2c_board_info *info); static void adm9240_init_client(struct i2c_client *client); static int adm9240_remove(struct i2c_client *client); @@ -545,7 +545,7 @@ static const struct attribute_group adm9240_group = { /*** sensor chip detect and driver install ***/ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adm9240_detect(struct i2c_client *new_client, int kind, +static int adm9240_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index 451977bca7d6..b50893111532 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -72,7 +72,7 @@ struct ads7828_data { }; /* Function declaration - necessary due to function dependencies */ -static int ads7828_detect(struct i2c_client *client, int kind, +static int ads7828_detect(struct i2c_client *client, struct i2c_board_info *info); static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -187,7 +187,7 @@ static struct i2c_driver ads7828_driver = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int ads7828_detect(struct i2c_client *client, int kind, +static int ads7828_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index f9c9562b6a94..30cf002c677f 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -237,7 +237,7 @@ struct adt7462_data { static int adt7462_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adt7462_detect(struct i2c_client *client, int kind, +static int adt7462_detect(struct i2c_client *client, struct i2c_board_info *info); static int adt7462_remove(struct i2c_client *client); @@ -1902,7 +1902,7 @@ static struct attribute *adt7462_attr[] = }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adt7462_detect(struct i2c_client *client, int kind, +static int adt7462_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 32b1750a6890..9ffe5c6e495d 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -177,7 +177,7 @@ struct adt7470_data { static int adt7470_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adt7470_detect(struct i2c_client *client, int kind, +static int adt7470_detect(struct i2c_client *client, struct i2c_board_info *info); static int adt7470_remove(struct i2c_client *client); @@ -1225,7 +1225,7 @@ static struct attribute *adt7470_attr[] = }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adt7470_detect(struct i2c_client *client, int kind, +static int adt7470_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c index aea244db974e..78fadaa431e2 100644 --- a/drivers/hwmon/adt7473.c +++ b/drivers/hwmon/adt7473.c @@ -166,7 +166,7 @@ struct adt7473_data { static int adt7473_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int adt7473_detect(struct i2c_client *client, int kind, +static int adt7473_detect(struct i2c_client *client, struct i2c_board_info *info); static int adt7473_remove(struct i2c_client *client); @@ -1085,7 +1085,7 @@ static struct attribute *adt7473_attr[] = }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int adt7473_detect(struct i2c_client *client, int kind, +static int adt7473_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 99abfddedbc3..3c0571551a9f 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -1172,7 +1172,7 @@ static struct attribute_group in4_attr_group = { .attrs = in4_attrs }; static struct attribute_group in5_attr_group = { .attrs = in5_attrs }; static struct attribute_group vid_attr_group = { .attrs = vid_attrs }; -static int adt7475_detect(struct i2c_client *client, int kind, +static int adt7475_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c index 480f80ea1fa0..507e116d5456 100644 --- a/drivers/hwmon/asb100.c +++ b/drivers/hwmon/asb100.c @@ -209,7 +209,7 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val); static int asb100_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int asb100_detect(struct i2c_client *client, int kind, +static int asb100_detect(struct i2c_client *client, struct i2c_board_info *info); static int asb100_remove(struct i2c_client *client); static struct asb100_data *asb100_update_device(struct device *dev); @@ -697,7 +697,7 @@ ERROR_SC_2: } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int asb100_detect(struct i2c_client *client, int kind, +static int asb100_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c index d6b490d3e36f..6b7459745b66 100644 --- a/drivers/hwmon/atxp1.c +++ b/drivers/hwmon/atxp1.c @@ -50,8 +50,7 @@ static int atxp1_probe(struct i2c_client *client, const struct i2c_device_id *id); static int atxp1_remove(struct i2c_client *client); static struct atxp1_data * atxp1_update_device(struct device *dev); -static int atxp1_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info); static const struct i2c_device_id atxp1_id[] = { { "atxp1", atxp1 }, @@ -275,7 +274,7 @@ static const struct attribute_group atxp1_group = { /* Return 0 if detection is successful, -ENODEV otherwise */ -static int atxp1_detect(struct i2c_client *new_client, int kind, +static int atxp1_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 4377bb0cc526..7024617c1194 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -2208,7 +2208,7 @@ exit: } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int dme1737_i2c_detect(struct i2c_client *client, int kind, +static int dme1737_i2c_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index 2a4c6a05b14f..5fde2f5139fd 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -224,7 +224,7 @@ static const struct attribute_group ds1621_group = { /* Return 0 if detection is successful, -ENODEV otherwise */ -static int ds1621_detect(struct i2c_client *client, int kind, +static int ds1621_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 40dfbcd3f3f2..2ffcf56b6f4e 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -113,7 +113,7 @@ struct f75375_data { s8 temp_max_hyst[2]; }; -static int f75375_detect(struct i2c_client *client, int kind, +static int f75375_detect(struct i2c_client *client, struct i2c_board_info *info); static int f75375_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -677,7 +677,7 @@ static int f75375_remove(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int f75375_detect(struct i2c_client *client, int kind, +static int f75375_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index 281829cd1533..bce18e0f1d61 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -221,7 +221,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 }; static int fschmd_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int fschmd_detect(struct i2c_client *client, int kind, +static int fschmd_detect(struct i2c_client *client, struct i2c_board_info *info); static int fschmd_remove(struct i2c_client *client); static struct fschmd_data *fschmd_update_device(struct device *dev); @@ -1000,7 +1000,7 @@ static void fschmd_dmi_decode(const struct dmi_header *header, void *dummy) } } -static int fschmd_detect(struct i2c_client *client, int _kind, +static int fschmd_detect(struct i2c_client *client, struct i2c_board_info *info) { enum chips kind; diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c index 1d69458aa0b6..34f83c6a3f45 100644 --- a/drivers/hwmon/gl518sm.c +++ b/drivers/hwmon/gl518sm.c @@ -139,8 +139,7 @@ struct gl518_data { static int gl518_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int gl518_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info); static void gl518_init_client(struct i2c_client *client); static int gl518_remove(struct i2c_client *client); static int gl518_read_value(struct i2c_client *client, u8 reg); @@ -484,8 +483,7 @@ static const struct attribute_group gl518_group_r80 = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int gl518_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int rev; diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c index 92b5720ceaff..d03ba692fc42 100644 --- a/drivers/hwmon/gl520sm.c +++ b/drivers/hwmon/gl520sm.c @@ -81,8 +81,7 @@ static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 }; static int gl520_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int gl520_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info); static void gl520_init_client(struct i2c_client *client); static int gl520_remove(struct i2c_client *client); static int gl520_read_value(struct i2c_client *client, u8 reg); @@ -681,8 +680,7 @@ static const struct attribute_group gl520_group_opt = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int gl520_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 5da66ab04f74..26844fc4a66d 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -134,8 +134,7 @@ static int lm63_remove(struct i2c_client *client); static struct lm63_data *lm63_update_device(struct device *dev); -static int lm63_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm63_init_client(struct i2c_client *client); /* @@ -423,7 +422,7 @@ static const struct attribute_group lm63_group_fan1 = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm63_detect(struct i2c_client *new_client, int kind, +static int lm63_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c index 0bf8b2a8e9f0..e610da9bd80c 100644 --- a/drivers/hwmon/lm73.c +++ b/drivers/hwmon/lm73.c @@ -151,7 +151,7 @@ static const struct i2c_device_id lm73_ids[] = { MODULE_DEVICE_TABLE(i2c, lm73_ids); /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm73_detect(struct i2c_client *new_client, int kind, +static int lm73_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index e392548cccb8..8fd759d28ddf 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -234,7 +234,7 @@ static const struct i2c_device_id lm75_ids[] = { MODULE_DEVICE_TABLE(i2c, lm75_ids); /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm75_detect(struct i2c_client *new_client, int kind, +static int lm75_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c index ac067fd19482..6373ab2cd952 100644 --- a/drivers/hwmon/lm77.c +++ b/drivers/hwmon/lm77.c @@ -66,8 +66,7 @@ struct lm77_data { static int lm77_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int lm77_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm77_init_client(struct i2c_client *client); static int lm77_remove(struct i2c_client *client); static u16 lm77_read_value(struct i2c_client *client, u8 reg); @@ -245,7 +244,7 @@ static const struct attribute_group lm77_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm77_detect(struct i2c_client *new_client, int kind, +static int lm77_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index 5978291cebb3..f58850a9d9e0 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -142,7 +142,7 @@ struct lm78_data { }; -static int lm78_i2c_detect(struct i2c_client *client, int kind, +static int lm78_i2c_detect(struct i2c_client *client, struct i2c_board_info *info); static int lm78_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -558,7 +558,7 @@ static int lm78_alias_detect(struct i2c_client *client, u8 chipid) return 1; } -static int lm78_i2c_detect(struct i2c_client *client, int kind, +static int lm78_i2c_detect(struct i2c_client *client, struct i2c_board_info *info) { int i; diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index bcffc1899403..e3222f3d4a5e 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -133,8 +133,7 @@ struct lm80_data { static int lm80_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int lm80_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm80_init_client(struct i2c_client *client); static int lm80_remove(struct i2c_client *client); static struct lm80_data *lm80_update_device(struct device *dev); @@ -447,8 +446,7 @@ static const struct attribute_group lm80_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm80_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int i, cur; diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c index 08b03e6ed0b7..bfb7477cb6b3 100644 --- a/drivers/hwmon/lm83.c +++ b/drivers/hwmon/lm83.c @@ -118,7 +118,7 @@ static const u8 LM83_REG_W_HIGH[] = { * Functions declaration */ -static int lm83_detect(struct i2c_client *new_client, int kind, +static int lm83_detect(struct i2c_client *new_client, struct i2c_board_info *info); static int lm83_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -291,7 +291,7 @@ static const struct attribute_group lm83_group_opt = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm83_detect(struct i2c_client *new_client, int kind, +static int lm83_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index d56da2e74708..f5fc45ac6fe5 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -323,8 +323,7 @@ struct lm85_data { struct lm85_zone zone[3]; }; -static int lm85_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info); static int lm85_probe(struct i2c_client *client, const struct i2c_device_id *id); static int lm85_remove(struct i2c_client *client); @@ -1156,8 +1155,7 @@ static int lm85_is_fake(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm85_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int address = client->addr; diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c index 4929b1815eee..d545b8596b05 100644 --- a/drivers/hwmon/lm87.c +++ b/drivers/hwmon/lm87.c @@ -158,7 +158,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C }; static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int lm87_detect(struct i2c_client *new_client, int kind, +static int lm87_detect(struct i2c_client *new_client, struct i2c_board_info *info); static void lm87_init_client(struct i2c_client *client); static int lm87_remove(struct i2c_client *client); @@ -662,7 +662,7 @@ static const struct attribute_group lm87_group_opt = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm87_detect(struct i2c_client *new_client, int kind, +static int lm87_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index b7c905f50ed4..3acbacadac77 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -152,8 +152,7 @@ I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, * Functions declaration */ -static int lm90_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info); +static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info); static int lm90_probe(struct i2c_client *client, const struct i2c_device_id *id); static void lm90_init_client(struct i2c_client *client); @@ -656,7 +655,7 @@ static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm90_detect(struct i2c_client *new_client, int kind, +static int lm90_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index 47ac698709dc..da354222468d 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -319,7 +319,7 @@ static const struct attribute_group lm92_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm92_detect(struct i2c_client *new_client, int kind, +static int lm92_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index 124dd7cea54c..7b9c97b72eb8 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c @@ -2501,8 +2501,7 @@ static void lm93_init_client(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm93_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int mfr, ver; diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 906b896cf1d0..05ede4137e22 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c @@ -310,7 +310,7 @@ static const struct attribute_group lm95241_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int lm95241_detect(struct i2c_client *new_client, int kind, +static int lm95241_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index 7fcf5ff89e7f..4baf94efd372 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -88,7 +88,7 @@ static int temp_to_reg(int val) static int max1619_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int max1619_detect(struct i2c_client *client, int kind, +static int max1619_detect(struct i2c_client *client, struct i2c_board_info *info); static void max1619_init_client(struct i2c_client *client); static int max1619_remove(struct i2c_client *client); @@ -226,7 +226,7 @@ static const struct attribute_group max1619_group = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int max1619_detect(struct i2c_client *client, int kind, +static int max1619_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index 1da561e0cb37..fd5d1acfcc95 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -116,7 +116,7 @@ I2C_CLIENT_INSMOD_1(max6650); static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int max6650_detect(struct i2c_client *client, int kind, +static int max6650_detect(struct i2c_client *client, struct i2c_board_info *info); static int max6650_init_client(struct i2c_client *client); static int max6650_remove(struct i2c_client *client); @@ -528,7 +528,7 @@ static struct attribute_group max6650_attr_grp = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int max6650_detect(struct i2c_client *client, int kind, +static int max6650_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c index 1d7ffebd679d..4355aada01f2 100644 --- a/drivers/hwmon/pcf8591.c +++ b/drivers/hwmon/pcf8591.c @@ -169,7 +169,7 @@ static const struct attribute_group pcf8591_attr_group_opt = { */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int pcf8591_detect(struct i2c_client *client, int kind, +static int pcf8591_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c index 4d88c045781c..1683fc76759f 100644 --- a/drivers/hwmon/smsc47m192.c +++ b/drivers/hwmon/smsc47m192.c @@ -115,7 +115,7 @@ struct smsc47m192_data { static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int smsc47m192_detect(struct i2c_client *client, int kind, +static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info); static int smsc47m192_remove(struct i2c_client *client); static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); @@ -481,7 +481,7 @@ static void smsc47m192_init_client(struct i2c_client *client) } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int smsc47m192_detect(struct i2c_client *client, int kind, +static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c index 4b793849c738..02ac0d4323a4 100644 --- a/drivers/hwmon/thmc50.c +++ b/drivers/hwmon/thmc50.c @@ -84,7 +84,7 @@ struct thmc50_data { u8 alarms; }; -static int thmc50_detect(struct i2c_client *client, int kind, +static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info); static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id); @@ -286,7 +286,7 @@ static const struct attribute_group temp3_group = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int thmc50_detect(struct i2c_client *client, int kind, +static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info) { unsigned company; diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index ee9673467c4a..7cf1d541a5fa 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -98,7 +98,7 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 }; static int tmp401_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int tmp401_detect(struct i2c_client *client, int kind, +static int tmp401_detect(struct i2c_client *client, struct i2c_board_info *info); static int tmp401_remove(struct i2c_client *client); static struct tmp401_data *tmp401_update_device(struct device *dev); @@ -488,7 +488,7 @@ static void tmp401_init_client(struct i2c_client *client) i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config); } -static int tmp401_detect(struct i2c_client *client, int _kind, +static int tmp401_detect(struct i2c_client *client, struct i2c_board_info *info) { enum chips kind; diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index bb5464a289ca..34eb34c548ff 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -223,7 +223,7 @@ static int tmp421_init_client(struct i2c_client *client) return 0; } -static int tmp421_detect(struct i2c_client *client, int _kind, +static int tmp421_detect(struct i2c_client *client, struct i2c_board_info *info) { enum chips kind; diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 7ab7967da0a0..44704d2dee63 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -1051,8 +1051,7 @@ w83781d_create_files(struct device *dev, int kind, int is_isa) /* Return 0 if detection is successful, -ENODEV otherwise */ static int -w83781d_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +w83781d_detect(struct i2c_client *client, struct i2c_board_info *info) { int val1, val2; struct w83781d_data *isa = w83781d_data_if_isa(); diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 0410bf12c521..b3e91b6e651a 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -326,7 +326,7 @@ struct w83791d_data { static int w83791d_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83791d_detect(struct i2c_client *client, int kind, +static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83791d_remove(struct i2c_client *client); @@ -1259,7 +1259,7 @@ error_sc_0: /* Return 0 if detection is successful, -ENODEV otherwise */ -static int w83791d_detect(struct i2c_client *client, int kind, +static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c index 38978851333f..03b836cdafa6 100644 --- a/drivers/hwmon/w83792d.c +++ b/drivers/hwmon/w83792d.c @@ -302,7 +302,7 @@ struct w83792d_data { static int w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83792d_detect(struct i2c_client *client, int kind, +static int w83792d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83792d_remove(struct i2c_client *client); static struct w83792d_data *w83792d_update_device(struct device *dev); @@ -1263,7 +1263,7 @@ static const struct attribute_group w83792d_group = { /* Return 0 if detection is successful, -ENODEV otherwise */ static int -w83792d_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) +w83792d_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int val1, val2; diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 80a2191bf127..acf35e6e2ab9 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -230,7 +230,7 @@ static u8 w83793_read_value(struct i2c_client *client, u16 reg); static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value); static int w83793_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83793_detect(struct i2c_client *client, int kind, +static int w83793_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83793_remove(struct i2c_client *client); static void w83793_init_client(struct i2c_client *client); @@ -1161,7 +1161,7 @@ ERROR_SC_0: } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int w83793_detect(struct i2c_client *client, int kind, +static int w83793_detect(struct i2c_client *client, struct i2c_board_info *info) { u8 tmp, bank, chip_id; diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c index 9b6c4c10fba7..ec6e4b7fb741 100644 --- a/drivers/hwmon/w83l785ts.c +++ b/drivers/hwmon/w83l785ts.c @@ -83,7 +83,7 @@ I2C_CLIENT_INSMOD_1(w83l785ts); static int w83l785ts_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83l785ts_detect(struct i2c_client *client, int kind, +static int w83l785ts_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83l785ts_remove(struct i2c_client *client); static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval); @@ -146,7 +146,7 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 1); */ /* Return 0 if detection is successful, -ENODEV otherwise */ -static int w83l785ts_detect(struct i2c_client *client, int kind, +static int w83l785ts_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index 27da7d2b15fb..12a5fd67bee0 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -147,7 +147,7 @@ struct w83l786ng_data { static int w83l786ng_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int w83l786ng_detect(struct i2c_client *client, int kind, +static int w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83l786ng_remove(struct i2c_client *client); static void w83l786ng_init_client(struct i2c_client *client); @@ -586,8 +586,7 @@ static const struct attribute_group w83l786ng_group = { }; static int -w83l786ng_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; u16 man_id; diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 4f34823e86b1..c1047d644d8f 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1184,7 +1184,7 @@ static int i2c_detect_address(struct i2c_client *temp_client, /* Finally call the custom detection function */ memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = addr; - err = driver->detect(temp_client, -1, &info); + err = driver->detect(temp_client, &info); if (err) { /* -ENODEV is returned if the detection fails. We catch it here as this isn't an error. */ diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 2c27193aeaa0..2c428f464539 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -135,8 +135,7 @@ static struct bin_attribute eeprom_attr = { }; /* Return 0 if detection is successful, -ENODEV otherwise */ -static int eeprom_detect(struct i2c_client *client, int kind, - struct i2c_board_info *info) +static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index 4bb7a3af9ad9..75097ec43edd 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -106,7 +106,7 @@ struct ics932s401_data { static int ics932s401_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int ics932s401_detect(struct i2c_client *client, int kind, +static int ics932s401_detect(struct i2c_client *client, struct i2c_board_info *info); static int ics932s401_remove(struct i2c_client *client); @@ -413,7 +413,7 @@ static ssize_t show_spread(struct device *dev, } /* Return 0 if detection is successful, -ENODEV otherwise */ -static int ics932s401_detect(struct i2c_client *client, int kind, +static int ics932s401_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 419ab546b266..f6f2c080ba67 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -161,7 +161,7 @@ struct i2c_driver { const struct i2c_device_id *id_table; /* Device detection callback for automatic device creation */ - int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *); + int (*detect)(struct i2c_client *, struct i2c_board_info *); const struct i2c_client_address_data *address_data; struct list_head clients; }; -- cgit v1.2.3 From c3813d6af177fab19e322f3114b1f64fbcf08d71 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Dec 2009 21:17:25 +0100 Subject: i2c: Get rid of struct i2c_client_address_data Struct i2c_client_address_data only contains one field at this point, which makes its usefulness questionable. Get rid of it and pass simple address lists around instead. Signed-off-by: Jean Delvare Tested-by: Wolfram Sang --- Documentation/i2c/writing-clients | 2 +- drivers/hwmon/adm1021.c | 2 +- drivers/hwmon/adm1025.c | 2 +- drivers/hwmon/adm1026.c | 2 +- drivers/hwmon/adm1029.c | 2 +- drivers/hwmon/adm1031.c | 2 +- drivers/hwmon/adm9240.c | 2 +- drivers/hwmon/ads7828.c | 2 +- drivers/hwmon/adt7462.c | 2 +- drivers/hwmon/adt7470.c | 2 +- drivers/hwmon/adt7473.c | 2 +- drivers/hwmon/adt7475.c | 2 +- drivers/hwmon/asb100.c | 2 +- drivers/hwmon/atxp1.c | 2 +- drivers/hwmon/dme1737.c | 2 +- drivers/hwmon/ds1621.c | 2 +- drivers/hwmon/f75375s.c | 2 +- drivers/hwmon/fschmd.c | 2 +- drivers/hwmon/gl518sm.c | 2 +- drivers/hwmon/gl520sm.c | 2 +- drivers/hwmon/lm63.c | 2 +- drivers/hwmon/lm73.c | 2 +- drivers/hwmon/lm75.c | 2 +- drivers/hwmon/lm77.c | 2 +- drivers/hwmon/lm78.c | 2 +- drivers/hwmon/lm80.c | 2 +- drivers/hwmon/lm83.c | 2 +- drivers/hwmon/lm85.c | 2 +- drivers/hwmon/lm87.c | 2 +- drivers/hwmon/lm90.c | 2 +- drivers/hwmon/lm92.c | 2 +- drivers/hwmon/lm93.c | 2 +- drivers/hwmon/lm95241.c | 2 +- drivers/hwmon/max1619.c | 2 +- drivers/hwmon/max6650.c | 2 +- drivers/hwmon/pcf8591.c | 2 +- drivers/hwmon/smsc47m192.c | 2 +- drivers/hwmon/thmc50.c | 2 +- drivers/hwmon/tmp401.c | 2 +- drivers/hwmon/tmp421.c | 2 +- drivers/hwmon/w83781d.c | 2 +- drivers/hwmon/w83791d.c | 2 +- drivers/hwmon/w83792d.c | 2 +- drivers/hwmon/w83793.c | 2 +- drivers/hwmon/w83l785ts.c | 2 +- drivers/hwmon/w83l786ng.c | 2 +- drivers/i2c/i2c-core.c | 15 +++++++------ drivers/misc/eeprom/eeprom.c | 2 +- drivers/misc/ics932s401.c | 2 +- include/linux/i2c.h | 44 ++++++++++----------------------------- 50 files changed, 66 insertions(+), 89 deletions(-) (limited to 'include') diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients index 7860aafb483d..0a74603eb671 100644 --- a/Documentation/i2c/writing-clients +++ b/Documentation/i2c/writing-clients @@ -44,7 +44,7 @@ static struct i2c_driver foo_driver = { /* if device autodetection is needed: */ .class = I2C_CLASS_SOMETHING, .detect = foo_detect, - .address_data = &addr_data, + .address_list = normal_i2c, .shutdown = foo_shutdown, /* optional */ .suspend = foo_suspend, /* optional */ diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 3ebcdd9c25a5..d7af039a4d43 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -130,7 +130,7 @@ static struct i2c_driver adm1021_driver = { .remove = adm1021_remove, .id_table = adm1021_id, .detect = adm1021_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static ssize_t show_temp(struct device *dev, diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c index 357c9ffa147e..e17651083b09 100644 --- a/drivers/hwmon/adm1025.c +++ b/drivers/hwmon/adm1025.c @@ -137,7 +137,7 @@ static struct i2c_driver adm1025_driver = { .remove = adm1025_remove, .id_table = adm1025_id, .detect = adm1025_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index 8deb17a402da..85bf23aea7db 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -319,7 +319,7 @@ static struct i2c_driver adm1026_driver = { .remove = adm1026_remove, .id_table = adm1026_id, .detect = adm1026_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int adm1026_read_value(struct i2c_client *client, u8 reg) diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index 9bc9dbcacdbd..a006ae5fbd2b 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -142,7 +142,7 @@ static struct i2c_driver adm1029_driver = { .remove = adm1029_remove, .id_table = adm1029_id, .detect = adm1029_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index cebfbf6926da..1e02799b870e 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -125,7 +125,7 @@ static struct i2c_driver adm1031_driver = { .remove = adm1031_remove, .id_table = adm1031_id, .detect = adm1031_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg) diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index 9316e074d690..d9942e74ed4a 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -156,7 +156,7 @@ static struct i2c_driver adm9240_driver = { .remove = adm9240_remove, .id_table = adm9240_id, .detect = adm9240_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* per client data */ diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index b50893111532..3827ce4be071 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -183,7 +183,7 @@ static struct i2c_driver ads7828_driver = { .remove = ads7828_remove, .id_table = ads7828_id, .detect = ads7828_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* Return 0 if detection is successful, -ENODEV otherwise */ diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index 30cf002c677f..325700428ef0 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -256,7 +256,7 @@ static struct i2c_driver adt7462_driver = { .remove = adt7462_remove, .id_table = adt7462_id, .detect = adt7462_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 9ffe5c6e495d..33aa0fa3e990 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -196,7 +196,7 @@ static struct i2c_driver adt7470_driver = { .remove = adt7470_remove, .id_table = adt7470_id, .detect = adt7470_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c index 78fadaa431e2..1535733ddf19 100644 --- a/drivers/hwmon/adt7473.c +++ b/drivers/hwmon/adt7473.c @@ -184,7 +184,7 @@ static struct i2c_driver adt7473_driver = { .remove = adt7473_remove, .id_table = adt7473_id, .detect = adt7473_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 3c0571551a9f..1fb8940428c6 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -1412,7 +1412,7 @@ static struct i2c_driver adt7475_driver = { .remove = adt7475_remove, .id_table = adt7475_id, .detect = adt7475_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static void adt7475_read_hystersis(struct i2c_client *client) diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c index 507e116d5456..a92512a4a366 100644 --- a/drivers/hwmon/asb100.c +++ b/drivers/hwmon/asb100.c @@ -230,7 +230,7 @@ static struct i2c_driver asb100_driver = { .remove = asb100_remove, .id_table = asb100_id, .detect = asb100_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* 7 Voltages */ diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c index 6b7459745b66..b0c3051d8a4f 100644 --- a/drivers/hwmon/atxp1.c +++ b/drivers/hwmon/atxp1.c @@ -67,7 +67,7 @@ static struct i2c_driver atxp1_driver = { .remove = atxp1_remove, .id_table = atxp1_id, .detect = atxp1_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; struct atxp1_data { diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 7024617c1194..a3af09f9dbad 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -2318,7 +2318,7 @@ static struct i2c_driver dme1737_i2c_driver = { .remove = dme1737_i2c_remove, .id_table = dme1737_id, .detect = dme1737_i2c_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* --------------------------------------------------------------------- diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index 5fde2f5139fd..dfa4329090d4 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -321,7 +321,7 @@ static struct i2c_driver ds1621_driver = { .remove = ds1621_remove, .id_table = ds1621_id, .detect = ds1621_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init ds1621_init(void) diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 2ffcf56b6f4e..f8992c935666 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -135,7 +135,7 @@ static struct i2c_driver f75375_driver = { .remove = f75375_remove, .id_table = f75375_id, .detect = f75375_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static inline int f75375_read8(struct i2c_client *client, u8 reg) diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index bce18e0f1d61..4eebbbeba2d1 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -251,7 +251,7 @@ static struct i2c_driver fschmd_driver = { .remove = fschmd_remove, .id_table = fschmd_id, .detect = fschmd_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c index 34f83c6a3f45..e9407acd72cb 100644 --- a/drivers/hwmon/gl518sm.c +++ b/drivers/hwmon/gl518sm.c @@ -162,7 +162,7 @@ static struct i2c_driver gl518_driver = { .remove = gl518_remove, .id_table = gl518_id, .detect = gl518_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c index d03ba692fc42..c0ec8c28731e 100644 --- a/drivers/hwmon/gl520sm.c +++ b/drivers/hwmon/gl520sm.c @@ -104,7 +104,7 @@ static struct i2c_driver gl520_driver = { .remove = gl520_remove, .id_table = gl520_id, .detect = gl520_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* Client data */ diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 26844fc4a66d..1426a455071c 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -156,7 +156,7 @@ static struct i2c_driver lm63_driver = { .remove = lm63_remove, .id_table = lm63_id, .detect = lm63_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c index e610da9bd80c..fb6ab9a9a608 100644 --- a/drivers/hwmon/lm73.c +++ b/drivers/hwmon/lm73.c @@ -182,7 +182,7 @@ static struct i2c_driver lm73_driver = { .remove = lm73_remove, .id_table = lm73_ids, .detect = lm73_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* module glue */ diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 8fd759d28ddf..ce2423cd8198 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -295,7 +295,7 @@ static struct i2c_driver lm75_driver = { .remove = lm75_remove, .id_table = lm75_ids, .detect = lm75_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /*-----------------------------------------------------------------------*/ diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c index 6373ab2cd952..b6105e570b0e 100644 --- a/drivers/hwmon/lm77.c +++ b/drivers/hwmon/lm77.c @@ -91,7 +91,7 @@ static struct i2c_driver lm77_driver = { .remove = lm77_remove, .id_table = lm77_id, .detect = lm77_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* straight from the datasheet */ diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index f58850a9d9e0..cd6a9ea921f6 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -173,7 +173,7 @@ static struct i2c_driver lm78_driver = { .remove = lm78_i2c_remove, .id_table = lm78_i2c_id, .detect = lm78_i2c_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static struct platform_driver lm78_isa_driver = { diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index e3222f3d4a5e..1cf5ff5bfa43 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -159,7 +159,7 @@ static struct i2c_driver lm80_driver = { .remove = lm80_remove, .id_table = lm80_id, .detect = lm80_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c index bfb7477cb6b3..b80ae182f851 100644 --- a/drivers/hwmon/lm83.c +++ b/drivers/hwmon/lm83.c @@ -145,7 +145,7 @@ static struct i2c_driver lm83_driver = { .remove = lm83_remove, .id_table = lm83_id, .detect = lm83_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index f5fc45ac6fe5..d29bd34a265e 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -356,7 +356,7 @@ static struct i2c_driver lm85_driver = { .remove = lm85_remove, .id_table = lm85_id, .detect = lm85_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c index d545b8596b05..60d34bc578cb 100644 --- a/drivers/hwmon/lm87.c +++ b/drivers/hwmon/lm87.c @@ -184,7 +184,7 @@ static struct i2c_driver lm87_driver = { .remove = lm87_remove, .id_table = lm87_id, .detect = lm87_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 3acbacadac77..3e916ac97ea8 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -191,7 +191,7 @@ static struct i2c_driver lm90_driver = { .remove = lm90_remove, .id_table = lm90_id, .detect = lm90_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index da354222468d..b582b3b7fdee 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -416,7 +416,7 @@ static struct i2c_driver lm92_driver = { .remove = lm92_remove, .id_table = lm92_id, .detect = lm92_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init sensors_lm92_init(void) diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index 7b9c97b72eb8..d160dfdb513f 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c @@ -2616,7 +2616,7 @@ static struct i2c_driver lm93_driver = { .remove = lm93_remove, .id_table = lm93_id, .detect = lm93_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init lm93_init(void) diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 05ede4137e22..55e3bfd49706 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c @@ -460,7 +460,7 @@ static struct i2c_driver lm95241_driver = { .remove = lm95241_remove, .id_table = lm95241_id, .detect = lm95241_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init sensors_lm95241_init(void) diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index 4baf94efd372..94cea29f157b 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -113,7 +113,7 @@ static struct i2c_driver max1619_driver = { .remove = max1619_remove, .id_table = max1619_id, .detect = max1619_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index fd5d1acfcc95..c7c126cf22dd 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -141,7 +141,7 @@ static struct i2c_driver max6650_driver = { .remove = max6650_remove, .id_table = max6650_id, .detect = max6650_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c index 4355aada01f2..c19e61bd393c 100644 --- a/drivers/hwmon/pcf8591.c +++ b/drivers/hwmon/pcf8591.c @@ -299,7 +299,7 @@ static struct i2c_driver pcf8591_driver = { .class = I2C_CLASS_HWMON, /* Nearest choice */ .detect = pcf8591_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init pcf8591_init(void) diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c index 1683fc76759f..34df2e2ee28a 100644 --- a/drivers/hwmon/smsc47m192.c +++ b/drivers/hwmon/smsc47m192.c @@ -135,7 +135,7 @@ static struct i2c_driver smsc47m192_driver = { .remove = smsc47m192_remove, .id_table = smsc47m192_id, .detect = smsc47m192_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* Voltages */ diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c index 02ac0d4323a4..866d66507596 100644 --- a/drivers/hwmon/thmc50.c +++ b/drivers/hwmon/thmc50.c @@ -108,7 +108,7 @@ static struct i2c_driver thmc50_driver = { .remove = thmc50_remove, .id_table = thmc50_id, .detect = thmc50_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static ssize_t show_analog_out(struct device *dev, diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index 7cf1d541a5fa..ed086491cc9a 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -123,7 +123,7 @@ static struct i2c_driver tmp401_driver = { .remove = tmp401_remove, .id_table = tmp401_id, .detect = tmp401_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 34eb34c548ff..018ad028c179 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -322,7 +322,7 @@ static struct i2c_driver tmp421_driver = { .remove = tmp421_remove, .id_table = tmp421_id, .detect = tmp421_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init tmp421_init(void) diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 44704d2dee63..bfaa888f6e47 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -1536,7 +1536,7 @@ static struct i2c_driver w83781d_driver = { .remove = w83781d_remove, .id_table = w83781d_ids, .detect = w83781d_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index b3e91b6e651a..e059cf0471b0 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -355,7 +355,7 @@ static struct i2c_driver w83791d_driver = { .remove = w83791d_remove, .id_table = w83791d_id, .detect = w83791d_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* following are the sysfs callback functions */ diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c index 03b836cdafa6..c6f198a3d924 100644 --- a/drivers/hwmon/w83792d.c +++ b/drivers/hwmon/w83792d.c @@ -328,7 +328,7 @@ static struct i2c_driver w83792d_driver = { .remove = w83792d_remove, .id_table = w83792d_id, .detect = w83792d_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static inline long in_count_from_reg(int nr, struct w83792d_data *data) diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index acf35e6e2ab9..ed32b18fbc42 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -252,7 +252,7 @@ static struct i2c_driver w83793_driver = { .remove = w83793_remove, .id_table = w83793_id, .detect = w83793_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static ssize_t diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c index ec6e4b7fb741..81c59937cf0d 100644 --- a/drivers/hwmon/w83l785ts.c +++ b/drivers/hwmon/w83l785ts.c @@ -108,7 +108,7 @@ static struct i2c_driver w83l785ts_driver = { .remove = w83l785ts_remove, .id_table = w83l785ts_id, .detect = w83l785ts_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; /* diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index 12a5fd67bee0..a427347ae82b 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -168,7 +168,7 @@ static struct i2c_driver w83l786ng_driver = { .remove = w83l786ng_remove, .id_table = w83l786ng_id, .detect = w83l786ng_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static u8 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index c1047d644d8f..9065c7238b5e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1214,13 +1214,13 @@ static int i2c_detect_address(struct i2c_client *temp_client, static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) { - const struct i2c_client_address_data *address_data; + const unsigned short *address_list; struct i2c_client *temp_client; int i, err = 0; int adap_id = i2c_adapter_id(adapter); - address_data = driver->address_data; - if (!driver->detect || !address_data) + address_list = driver->address_list; + if (!driver->detect || !address_list) return 0; /* Set up a temporary client to help detect callback */ @@ -1235,7 +1235,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) /* Stop here if we can't use SMBUS_QUICK */ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) { - if (address_data->normal_i2c[0] == I2C_CLIENT_END) + if (address_list[0] == I2C_CLIENT_END) goto exit_free; dev_warn(&adapter->dev, "SMBus Quick command not supported, " @@ -1244,11 +1244,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) goto exit_free; } - for (i = 0; address_data->normal_i2c[i] != I2C_CLIENT_END; i += 1) { + for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { dev_dbg(&adapter->dev, "found normal entry for adapter %d, " - "addr 0x%02x\n", adap_id, - address_data->normal_i2c[i]); - temp_client->addr = address_data->normal_i2c[i]; + "addr 0x%02x\n", adap_id, address_list[i]); + temp_client->addr = address_list[i]; err = i2c_detect_address(temp_client, driver); if (err) goto exit_free; diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 2c428f464539..3dc5e3db2c12 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -232,7 +232,7 @@ static struct i2c_driver eeprom_driver = { .class = I2C_CLASS_DDC | I2C_CLASS_SPD, .detect = eeprom_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static int __init eeprom_init(void) diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index 75097ec43edd..d8a84718d687 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -125,7 +125,7 @@ static struct i2c_driver ics932s401_driver = { .remove = ics932s401_remove, .id_table = ics932s401_id, .detect = ics932s401_detect, - .address_data = &addr_data, + .address_list = normal_i2c, }; static struct ics932s401_data *ics932s401_update_device(struct device *dev) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index f6f2c080ba67..fb9df1416ad5 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, * @driver: Device driver model driver * @id_table: List of I2C devices supported by this driver * @detect: Callback for device detection - * @address_data: The I2C addresses to probe (for detect) + * @address_list: The I2C addresses to probe (for detect) * @clients: List of detected clients we created (for i2c-core use only) * * The driver.owner field should be set to the module owner of this driver. @@ -162,7 +162,7 @@ struct i2c_driver { /* Device detection callback for automatic device creation */ int (*detect)(struct i2c_client *, struct i2c_board_info *); - const struct i2c_client_address_data *address_data; + const unsigned short *address_list; struct list_head clients; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) @@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter) #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ #define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ -/* i2c_client_address_data is the struct for holding default client - * addresses for a driver and for the parameters supplied on the - * command line - */ -struct i2c_client_address_data { - const unsigned short *normal_i2c; -}; - /* Internal numbers to terminate lists */ #define I2C_CLIENT_END 0xfffeU @@ -610,48 +602,34 @@ union i2c_smbus_data { module_param_array(var, short, &var##_num, 0); \ MODULE_PARM_DESC(var, desc) -#define I2C_CLIENT_INSMOD_COMMON \ -static const struct i2c_client_address_data addr_data = { \ - .normal_i2c = normal_i2c, \ -} - /* These are the ones you want to use in your own drivers. Pick the one which matches the number of devices the driver differenciates between. */ -#define I2C_CLIENT_INSMOD \ -I2C_CLIENT_INSMOD_COMMON +#define I2C_CLIENT_INSMOD #define I2C_CLIENT_INSMOD_1(chip1) \ -enum chips { any_chip, chip1 }; \ -I2C_CLIENT_INSMOD_COMMON +enum chips { any_chip, chip1 } #define I2C_CLIENT_INSMOD_2(chip1, chip2) \ -enum chips { any_chip, chip1, chip2 }; \ -I2C_CLIENT_INSMOD_COMMON +enum chips { any_chip, chip1, chip2 } #define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \ -enum chips { any_chip, chip1, chip2, chip3 }; \ -I2C_CLIENT_INSMOD_COMMON +enum chips { any_chip, chip1, chip2, chip3 } #define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \ -enum chips { any_chip, chip1, chip2, chip3, chip4 }; \ -I2C_CLIENT_INSMOD_COMMON +enum chips { any_chip, chip1, chip2, chip3, chip4 } #define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \ -I2C_CLIENT_INSMOD_COMMON +enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 } #define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \ -I2C_CLIENT_INSMOD_COMMON +enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 } #define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \ enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ - chip7 }; \ -I2C_CLIENT_INSMOD_COMMON + chip7 } #define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \ enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ - chip7, chip8 }; \ -I2C_CLIENT_INSMOD_COMMON + chip7, chip8 } #endif /* __KERNEL__ */ #endif /* _LINUX_I2C_H */ -- cgit v1.2.3 From 1f86df49ddfd0067cce941187d57b2fd2f749a9e Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Dec 2009 21:17:26 +0100 Subject: i2c: Drop I2C_CLIENT_INSMOD_1 This macro simply declares an enum, so drivers might as well declare it themselves. Signed-off-by: Jean Delvare Tested-by: Wolfram Sang --- drivers/hwmon/adm1026.c | 5 +---- drivers/hwmon/adm1029.c | 8 +------- drivers/hwmon/ads7828.c | 7 ++----- drivers/hwmon/adt7462.c | 5 +---- drivers/hwmon/adt7470.c | 5 +---- drivers/hwmon/adt7473.c | 5 +---- drivers/hwmon/asb100.c | 5 +---- drivers/hwmon/atxp1.c | 4 +--- drivers/hwmon/ds1621.c | 5 ++--- drivers/hwmon/gl520sm.c | 5 +---- drivers/hwmon/lm63.c | 8 +------- drivers/hwmon/lm73.c | 5 +---- drivers/hwmon/lm75.c | 10 ++-------- drivers/hwmon/lm77.c | 5 +---- drivers/hwmon/lm80.c | 5 +---- drivers/hwmon/lm92.c | 5 +---- drivers/hwmon/lm93.c | 3 +-- drivers/hwmon/lm95241.c | 5 +---- drivers/hwmon/max1619.c | 8 +------- drivers/hwmon/max6650.c | 4 +--- drivers/hwmon/pcf8591.c | 1 - drivers/hwmon/smsc47m192.c | 5 +---- drivers/hwmon/w83791d.c | 3 +-- drivers/hwmon/w83792d.c | 3 +-- drivers/hwmon/w83793.c | 3 +-- drivers/hwmon/w83l785ts.c | 8 +------- drivers/hwmon/w83l786ng.c | 3 +-- drivers/misc/eeprom/eeprom.c | 3 --- drivers/misc/ics932s401.c | 5 +---- include/linux/i2c.h | 5 ----- 30 files changed, 30 insertions(+), 121 deletions(-) (limited to 'include') diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index 85bf23aea7db..65335b268fa9 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -37,9 +37,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adm1026); - static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -305,7 +302,7 @@ static void adm1026_init_client(struct i2c_client *client); static const struct i2c_device_id adm1026_id[] = { - { "adm1026", adm1026 }, + { "adm1026", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adm1026_id); diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index a006ae5fbd2b..0b8a3b145bd2 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -43,12 +43,6 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(adm1029); - /* * The ADM1029 registers * Manufacturer ID is 0x41 for Analog Devices @@ -128,7 +122,7 @@ static int adm1029_init_client(struct i2c_client *client); */ static const struct i2c_device_id adm1029_id[] = { - { "adm1029", adm1029 }, + { "adm1029", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adm1029_id); diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index 3827ce4be071..aac85f3aed50 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -47,10 +47,7 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ads7828); - -/* Other module parameters */ +/* Module parameters */ static int se_input = 1; /* Default is SE, 0 == diff */ static int int_vref = 1; /* Default is internal ref ON */ static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */ @@ -168,7 +165,7 @@ static int ads7828_remove(struct i2c_client *client) } static const struct i2c_device_id ads7828_id[] = { - { "ads7828", ads7828 }, + { "ads7828", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ads7828_id); diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index 325700428ef0..a1a7ef14b519 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -32,9 +32,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adt7462); - /* ADT7462 registers */ #define ADT7462_REG_DEVICE 0x3D #define ADT7462_REG_VENDOR 0x3E @@ -242,7 +239,7 @@ static int adt7462_detect(struct i2c_client *client, static int adt7462_remove(struct i2c_client *client); static const struct i2c_device_id adt7462_id[] = { - { "adt7462", adt7462 }, + { "adt7462", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adt7462_id); diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 33aa0fa3e990..3445ce1cba81 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -33,9 +33,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adt7470); - /* ADT7470 registers */ #define ADT7470_REG_BASE_ADDR 0x20 #define ADT7470_REG_TEMP_BASE_ADDR 0x20 @@ -182,7 +179,7 @@ static int adt7470_detect(struct i2c_client *client, static int adt7470_remove(struct i2c_client *client); static const struct i2c_device_id adt7470_id[] = { - { "adt7470", adt7470 }, + { "adt7470", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adt7470_id); diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c index 1535733ddf19..434576f61c84 100644 --- a/drivers/hwmon/adt7473.c +++ b/drivers/hwmon/adt7473.c @@ -32,9 +32,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(adt7473); - /* ADT7473 registers */ #define ADT7473_REG_BASE_ADDR 0x20 @@ -171,7 +168,7 @@ static int adt7473_detect(struct i2c_client *client, static int adt7473_remove(struct i2c_client *client); static const struct i2c_device_id adt7473_id[] = { - { "adt7473", adt7473 }, + { "adt7473", 0 }, { } }; diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c index a92512a4a366..7dada559b3a1 100644 --- a/drivers/hwmon/asb100.c +++ b/drivers/hwmon/asb100.c @@ -51,9 +51,6 @@ /* I2C addresses to scan */ static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(asb100); - static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " @@ -216,7 +213,7 @@ static struct asb100_data *asb100_update_device(struct device *dev); static void asb100_init_client(struct i2c_client *client); static const struct i2c_device_id asb100_id[] = { - { "asb100", asb100 }, + { "asb100", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, asb100_id); diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c index b0c3051d8a4f..94cadc19f0c5 100644 --- a/drivers/hwmon/atxp1.c +++ b/drivers/hwmon/atxp1.c @@ -44,8 +44,6 @@ MODULE_AUTHOR("Sebastian Witt "); static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; -I2C_CLIENT_INSMOD_1(atxp1); - static int atxp1_probe(struct i2c_client *client, const struct i2c_device_id *id); static int atxp1_remove(struct i2c_client *client); @@ -53,7 +51,7 @@ static struct atxp1_data * atxp1_update_device(struct device *dev); static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info); static const struct i2c_device_id atxp1_id[] = { - { "atxp1", atxp1 }, + { "atxp1", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, atxp1_id); diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index dfa4329090d4..e11363467a8d 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -38,7 +38,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ds1621); static int polarity = -1; module_param(polarity, int, 0); MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low"); @@ -305,8 +304,8 @@ static int ds1621_remove(struct i2c_client *client) } static const struct i2c_device_id ds1621_id[] = { - { "ds1621", ds1621 }, - { "ds1625", ds1621 }, + { "ds1621", 0 }, + { "ds1625", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ds1621_id); diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c index c0ec8c28731e..ec588026f0a9 100644 --- a/drivers/hwmon/gl520sm.c +++ b/drivers/hwmon/gl520sm.c @@ -41,9 +41,6 @@ MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=tempe /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(gl520sm); - /* Many GL520 constants specified below One of the inputs can be configured as either temp or voltage. That's why _TEMP2 and _IN4 access the same register @@ -90,7 +87,7 @@ static struct gl520_data *gl520_update_device(struct device *dev); /* Driver data */ static const struct i2c_device_id gl520_id[] = { - { "gl520sm", gl520sm }, + { "gl520sm", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, gl520_id); diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 1426a455071c..bf81aff7051d 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -55,12 +55,6 @@ static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(lm63); - /* * The LM63 registers */ @@ -142,7 +136,7 @@ static void lm63_init_client(struct i2c_client *client); */ static const struct i2c_device_id lm63_id[] = { - { "lm63", lm63 }, + { "lm63", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm63_id); diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c index fb6ab9a9a608..c5f39ba103c0 100644 --- a/drivers/hwmon/lm73.c +++ b/drivers/hwmon/lm73.c @@ -27,9 +27,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm73); - /* LM73 registers */ #define LM73_REG_INPUT 0x00 #define LM73_REG_CONF 0x01 @@ -145,7 +142,7 @@ static int lm73_remove(struct i2c_client *client) } static const struct i2c_device_id lm73_ids[] = { - { "lm73", lm73 }, + { "lm73", 0 }, { /* LIST END */ } }; MODULE_DEVICE_TABLE(i2c, lm73_ids); diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index ce2423cd8198..8ae2cfe2d827 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -32,15 +32,12 @@ /* * This driver handles the LM75 and compatible digital temperature sensors. - * Only types which are _not_ listed in I2C_CLIENT_INSMOD_*() need to be - * listed here. We start at 9 since I2C_CLIENT_INSMOD_*() currently allow - * definition of up to 8 chip types (plus zero). */ enum lm75_type { /* keep sorted in alphabetical order */ - ds1775 = 9, + ds1775, ds75, - /* lm75 -- in I2C_CLIENT_INSMOD_1() */ + lm75, lm75a, max6625, max6626, @@ -58,9 +55,6 @@ enum lm75_type { /* keep sorted in alphabetical order */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm75); - /* The LM75 registers */ #define LM75_REG_CONF 0x01 diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c index b6105e570b0e..b28a297be50c 100644 --- a/drivers/hwmon/lm77.c +++ b/drivers/hwmon/lm77.c @@ -39,9 +39,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm77); - /* The LM77 registers */ #define LM77_REG_TEMP 0x00 #define LM77_REG_CONF 0x01 @@ -76,7 +73,7 @@ static struct lm77_data *lm77_update_device(struct device *dev); static const struct i2c_device_id lm77_id[] = { - { "lm77", lm77 }, + { "lm77", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm77_id); diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 1cf5ff5bfa43..18a0e6c5fe88 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -35,9 +35,6 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm80); - /* Many LM80 constants specified below */ /* The LM80 registers */ @@ -145,7 +142,7 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value); */ static const struct i2c_device_id lm80_id[] = { - { "lm80", lm80 }, + { "lm80", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm80_id); diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index b582b3b7fdee..7c31e6205f85 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -54,9 +54,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm92); - /* The LM92 registers */ #define LM92_REG_CONFIG 0x01 /* 8-bit, RW */ #define LM92_REG_TEMP 0x00 /* 16-bit, RO */ @@ -401,7 +398,7 @@ static int lm92_remove(struct i2c_client *client) */ static const struct i2c_device_id lm92_id[] = { - { "lm92", lm92 }, + { "lm92", 0 }, /* max6635 could be added here */ { } }; diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index d160dfdb513f..6669255aadcf 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c @@ -145,7 +145,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm93); static int disable_block; module_param(disable_block, bool, 0); @@ -2602,7 +2601,7 @@ static int lm93_remove(struct i2c_client *client) } static const struct i2c_device_id lm93_id[] = { - { "lm93", lm93 }, + { "lm93", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm93_id); diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 55e3bfd49706..8fc8eb8cba47 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c @@ -39,9 +39,6 @@ static const unsigned short normal_i2c[] = { 0x19, 0x2a, 0x2b, I2C_CLIENT_END}; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(lm95241); - /* LM95241 registers */ #define LM95241_REG_R_MAN_ID 0xFE #define LM95241_REG_R_CHIP_ID 0xFF @@ -446,7 +443,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev) /* Driver data (common to all clients) */ static const struct i2c_device_id lm95241_id[] = { - { "lm95241", lm95241 }, + { "lm95241", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm95241_id); diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index 94cea29f157b..022ded098100 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -40,12 +40,6 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(max1619); - /* * The MAX1619 registers */ @@ -99,7 +93,7 @@ static struct max1619_data *max1619_update_device(struct device *dev); */ static const struct i2c_device_id max1619_id[] = { - { "max1619", max1619 }, + { "max1619", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max1619_id); diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index c7c126cf22dd..a0160ee5caef 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -62,8 +62,6 @@ module_param(fan_voltage, int, S_IRUGO); module_param(prescaler, int, S_IRUGO); module_param(clock, int, S_IRUGO); -I2C_CLIENT_INSMOD_1(max6650); - /* * MAX 6650/6651 registers */ @@ -127,7 +125,7 @@ static struct max6650_data *max6650_update_device(struct device *dev); */ static const struct i2c_device_id max6650_id[] = { - { "max6650", max6650 }, + { "max6650", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max6650_id); diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c index c19e61bd393c..d44787949851 100644 --- a/drivers/hwmon/pcf8591.c +++ b/drivers/hwmon/pcf8591.c @@ -29,7 +29,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(pcf8591); static int input_mode; module_param(input_mode, int, 0); diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c index 34df2e2ee28a..40b26673d87f 100644 --- a/drivers/hwmon/smsc47m192.c +++ b/drivers/hwmon/smsc47m192.c @@ -36,9 +36,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(smsc47m192); - /* SMSC47M192 registers */ #define SMSC47M192_REG_IN(nr) ((nr)<6 ? (0x20 + (nr)) : \ (0x50 + (nr) - 6)) @@ -121,7 +118,7 @@ static int smsc47m192_remove(struct i2c_client *client); static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); static const struct i2c_device_id smsc47m192_id[] = { - { "smsc47m192", smsc47m192 }, + { "smsc47m192", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, smsc47m192_id); diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index e059cf0471b0..400a88bde278 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -52,7 +52,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83791d); static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); @@ -341,7 +340,7 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev); static void w83791d_init_client(struct i2c_client *client); static const struct i2c_device_id w83791d_id[] = { - { "w83791d", w83791d }, + { "w83791d", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83791d_id); diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c index c6f198a3d924..679718e6b017 100644 --- a/drivers/hwmon/w83792d.c +++ b/drivers/hwmon/w83792d.c @@ -50,7 +50,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83792d); static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); @@ -314,7 +313,7 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev); static void w83792d_init_client(struct i2c_client *client); static const struct i2c_device_id w83792d_id[] = { - { "w83792d", w83792d }, + { "w83792d", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83792d_id); diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index ed32b18fbc42..9a2022b67495 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -41,7 +41,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83793); static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); @@ -238,7 +237,7 @@ static void w83793_update_nonvolatile(struct device *dev); static struct w83793_data *w83793_update_device(struct device *dev); static const struct i2c_device_id w83793_id[] = { - { "w83793", w83793 }, + { "w83793", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83793_id); diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c index 81c59937cf0d..20781def65ed 100644 --- a/drivers/hwmon/w83l785ts.c +++ b/drivers/hwmon/w83l785ts.c @@ -51,12 +51,6 @@ static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_1(w83l785ts); - /* * The W83L785TS-S registers * Manufacturer ID is 0x5CA3 for Winbond. @@ -94,7 +88,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev); */ static const struct i2c_device_id w83l785ts_id[] = { - { "w83l785ts", w83l785ts }, + { "w83l785ts", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83l785ts_id); diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index a427347ae82b..0254e181893d 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -38,7 +38,6 @@ static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_1(w83l786ng); static int reset; module_param(reset, bool, 0); @@ -154,7 +153,7 @@ static void w83l786ng_init_client(struct i2c_client *client); static struct w83l786ng_data *w83l786ng_update_device(struct device *dev); static const struct i2c_device_id w83l786ng_id[] = { - { "w83l786ng", w83l786ng }, + { "w83l786ng", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83l786ng_id); diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 3dc5e3db2c12..f939ebc2507c 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -32,9 +32,6 @@ static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(eeprom); - /* Size of EEPROM in bytes */ #define EEPROM_SIZE 256 diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index d8a84718d687..395a4ea64e9c 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -30,9 +30,6 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_1(ics932s401); - /* ICS932S401 registers */ #define ICS932S401_REG_CFG2 0x01 #define ICS932S401_CFG1_SPREAD 0x01 @@ -111,7 +108,7 @@ static int ics932s401_detect(struct i2c_client *client, static int ics932s401_remove(struct i2c_client *client); static const struct i2c_device_id ics932s401_id[] = { - { "ics932s401", ics932s401 }, + { "ics932s401", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ics932s401_id); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index fb9df1416ad5..7178b27146ed 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -604,11 +604,6 @@ union i2c_smbus_data { /* These are the ones you want to use in your own drivers. Pick the one which matches the number of devices the driver differenciates between. */ -#define I2C_CLIENT_INSMOD - -#define I2C_CLIENT_INSMOD_1(chip1) \ -enum chips { any_chip, chip1 } - #define I2C_CLIENT_INSMOD_2(chip1, chip2) \ enum chips { any_chip, chip1, chip2 } -- cgit v1.2.3 From e5e9f44c246fbafe723e579e9fe887677beb40e4 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Dec 2009 21:17:27 +0100 Subject: i2c: Drop I2C_CLIENT_INSMOD_2 to 8 These macros simply declare an enum, so drivers might as well declare it themselves. This puts an end to the arbitrary limit of 8 chip types per i2c driver. Signed-off-by: Jean Delvare Tested-by: Wolfram Sang --- drivers/hwmon/adm1021.c | 5 ++--- drivers/hwmon/adm1025.c | 6 +----- drivers/hwmon/adm1031.c | 3 +-- drivers/hwmon/adm9240.c | 3 +-- drivers/hwmon/adt7475.c | 2 +- drivers/hwmon/dme1737.c | 6 +----- drivers/hwmon/f75375s.c | 3 +-- drivers/hwmon/fschmd.c | 3 ++- drivers/hwmon/gl518sm.c | 3 +-- drivers/hwmon/lm78.c | 3 +-- drivers/hwmon/lm83.c | 6 +----- drivers/hwmon/lm85.c | 8 +++++--- drivers/hwmon/lm87.c | 6 +----- drivers/hwmon/lm90.c | 7 +------ drivers/hwmon/thmc50.c | 2 +- drivers/hwmon/tmp401.c | 3 +-- drivers/hwmon/tmp421.c | 3 +-- drivers/hwmon/w83781d.c | 5 +++-- include/linux/i2c.h | 24 ------------------------ 19 files changed, 26 insertions(+), 75 deletions(-) (limited to 'include') diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index d7af039a4d43..1ad0a885c5a5 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -34,9 +34,8 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, - mc1066); +enum chips { + adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 }; /* adm1021 constants specified below */ diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c index e17651083b09..251b63165e2a 100644 --- a/drivers/hwmon/adm1025.c +++ b/drivers/hwmon/adm1025.c @@ -64,11 +64,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_2(adm1025, ne1619); +enum chips { adm1025, ne1619 }; /* * The ADM1025 registers diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 1e02799b870e..1644b92e7cc4 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -64,8 +64,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(adm1030, adm1031); +enum chips { adm1030, adm1031 }; typedef u8 auto_chan_table_t[8][2]; diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index d9942e74ed4a..0727ad250793 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -55,8 +55,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_3(adm9240, ds1780, lm81); +enum chips { adm9240, ds1780, lm81 }; /* ADM9240 registers */ #define ADM9240_REG_MAN_ID 0x3e diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 1fb8940428c6..a0c385145686 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -148,7 +148,7 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -I2C_CLIENT_INSMOD_4(adt7473, adt7475, adt7476, adt7490); +enum chips { adt7473, adt7475, adt7476, adt7490 }; static const struct i2c_device_id adt7475_id[] = { { "adt7473", adt7473 }, diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index a3af09f9dbad..823dd28a902c 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -57,11 +57,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC " /* Addresses to scan */ static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(dme1737, sch5027); - -/* ISA chip types */ -enum isa_chips { sch311x = sch5027 + 1 }; +enum chips { dme1737, sch5027, sch311x }; /* --------------------------------------------------------------------- * Registers diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index f8992c935666..277398f9c938 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -39,8 +39,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(f75373, f75375); +enum chips { f75373, f75375 }; /* Fintek F75375 registers */ #define F75375_REG_CONFIG0 0x0 diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index 4eebbbeba2d1..bd0fc67e804b 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -56,7 +56,8 @@ static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -I2C_CLIENT_INSMOD_7(fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl); + +enum chips { fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl }; /* * The FSCHMD registers and other defines diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c index e9407acd72cb..e7ae5743e181 100644 --- a/drivers/hwmon/gl518sm.c +++ b/drivers/hwmon/gl518sm.c @@ -46,8 +46,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80); +enum chips { gl518sm_r00, gl518sm_r80 }; /* Many GL518 constants specified below */ diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index cd6a9ea921f6..cadcbd90ff3b 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -41,8 +41,7 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; static unsigned short isa_address = 0x290; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(lm78, lm79); +enum chips { lm78, lm79 }; /* Many LM78 constants specified below */ diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c index b80ae182f851..8290476aee4a 100644 --- a/drivers/hwmon/lm83.c +++ b/drivers/hwmon/lm83.c @@ -51,11 +51,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_2(lm83, lm82); +enum chips { lm83, lm82 }; /* * The LM83 registers diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index d29bd34a265e..b3841a615595 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -38,9 +38,11 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100, - emc6d102); +enum chips { + any_chip, lm85b, lm85c, + adm1027, adt7463, adt7468, + emc6d100, emc6d102 +}; /* The LM85 registers */ diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c index 60d34bc578cb..f1e6e7512ffa 100644 --- a/drivers/hwmon/lm87.c +++ b/drivers/hwmon/lm87.c @@ -74,11 +74,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_2(lm87, adm1024); +enum chips { lm87, adm1024 }; /* * The LM87 registers diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 3e916ac97ea8..7c9bdc167426 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -93,12 +93,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; -/* - * Insmod parameters - */ - -I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, - max6646); +enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646 }; /* * The LM90 registers diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c index 866d66507596..7dfb4dec4c5f 100644 --- a/drivers/hwmon/thmc50.c +++ b/drivers/hwmon/thmc50.c @@ -35,7 +35,7 @@ MODULE_LICENSE("GPL"); static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ -I2C_CLIENT_INSMOD_2(thmc50, adm1022); +enum chips { thmc50, adm1022 }; static unsigned short adm1022_temp3[16]; static unsigned int adm1022_temp3_num; diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index ed086491cc9a..a13b30e8d8d8 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -42,8 +42,7 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_2(tmp401, tmp411); +enum chips { tmp401, tmp411 }; /* * The TMP401 registers, note some registers have different addresses for diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 018ad028c179..4f7c051e2d7b 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -39,8 +39,7 @@ static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_3(tmp421, tmp422, tmp423); +enum chips { tmp421, tmp422, tmp423 }; /* The TMP421 registers */ #define TMP421_CONFIG_REG_1 0x09 diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index bfaa888f6e47..05f9225b6f94 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -56,9 +56,10 @@ /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_4(w83781d, w83782d, w83783s, as99127f); +enum chips { w83781d, w83782d, w83783s, as99127f }; + +/* Insmod parameters */ static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7178b27146ed..cc6e1508ae90 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -602,29 +602,5 @@ union i2c_smbus_data { module_param_array(var, short, &var##_num, 0); \ MODULE_PARM_DESC(var, desc) -/* These are the ones you want to use in your own drivers. Pick the one - which matches the number of devices the driver differenciates between. */ -#define I2C_CLIENT_INSMOD_2(chip1, chip2) \ -enum chips { any_chip, chip1, chip2 } - -#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \ -enum chips { any_chip, chip1, chip2, chip3 } - -#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \ -enum chips { any_chip, chip1, chip2, chip3, chip4 } - -#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 } - -#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 } - -#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ - chip7 } - -#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \ -enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ - chip7, chip8 } #endif /* __KERNEL__ */ #endif /* _LINUX_I2C_H */ -- cgit v1.2.3 From 7f508118b1c1f9856a1c899a2bd4867a962b0225 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 14 Dec 2009 21:17:29 +0100 Subject: i2c: Get rid of I2C_CLIENT_MODULE_PARM There is no user left of I2C_CLIENT_MODULE_PARM, so we can finally get rid of this ugly macro. Signed-off-by: Jean Delvare Tested-by: Wolfram Sang --- include/linux/i2c.h | 35 ----------------------------------- 1 file changed, 35 deletions(-) (limited to 'include') diff --git a/include/linux/i2c.h b/include/linux/i2c.h index cc6e1508ae90..02fc617782ef 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -568,39 +568,4 @@ union i2c_smbus_data { #define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ #define I2C_SMBUS_I2C_BLOCK_DATA 8 - -#ifdef __KERNEL__ - -/* These defines are used for probing i2c client addresses */ -/* The length of the option lists */ -#define I2C_CLIENT_MAX_OPTS 48 - -/* Default fill of many variables */ -#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ - I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END} - -/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the - module header */ - -#define I2C_CLIENT_MODULE_PARM(var,desc) \ - static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \ - static unsigned int var##_num; \ - module_param_array(var, short, &var##_num, 0); \ - MODULE_PARM_DESC(var, desc) - -#endif /* __KERNEL__ */ #endif /* _LINUX_I2C_H */ -- cgit v1.2.3 From be1cb8689c480228ffd2e4bfccc0dab7156cd9ea Mon Sep 17 00:00:00 2001 From: Jakob Bornecrantz Date: Mon, 14 Dec 2009 22:07:45 +0000 Subject: drm/ttm: Add more driver type enums Signed-off-by: Jakob Bornecrantz Signed-off-by: Dave Airlie --- include/drm/ttm/ttm_object.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index 703ca4db0a29..0d9db099978b 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h @@ -77,7 +77,11 @@ enum ttm_object_type { ttm_buffer_type, ttm_lock_type, ttm_driver_type0 = 256, - ttm_driver_type1 + ttm_driver_type1, + ttm_driver_type2, + ttm_driver_type3, + ttm_driver_type4, + ttm_driver_type5 }; struct ttm_object_file; -- cgit v1.2.3 From fb1d9738ca053ea8afa5e86af6463155f983b01c Mon Sep 17 00:00:00 2001 From: Jakob Bornecrantz Date: Thu, 10 Dec 2009 00:19:58 +0000 Subject: drm/vmwgfx: Add DRM driver for VMware Virtual GPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds the vmwgfx driver for the VWware Virtual GPU aka SVGA. The driver is under staging the same as Nouveau and Radeon KMS. Hopefully the 2D ioctls are bug free and don't need changing, so that part of the API should be stable. But there there is a pretty big chance that the 3D API will change in the future. Signed-off-by: Thomas Hellström Signed-off-by: Jakob Bornecrantz Signed-off-by: Dave Airlie --- drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/vmwgfx/Kconfig | 13 + drivers/gpu/drm/vmwgfx/Makefile | 9 + drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 229 ++++++ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 735 ++++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 511 +++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 516 +++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 742 +++++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 521 +++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 213 ++++++ drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 81 ++ drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 295 ++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 872 ++++++++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 102 +++ drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 516 +++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 634 ++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_reg.h | 57 ++ drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 1192 ++++++++++++++++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 99 +++ drivers/staging/Kconfig | 2 + include/drm/Kbuild | 1 + include/drm/vmwgfx_drm.h | 574 ++++++++++++++ 22 files changed, 7915 insertions(+) create mode 100644 drivers/gpu/drm/vmwgfx/Kconfig create mode 100644 drivers/gpu/drm/vmwgfx/Makefile create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_irq.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_kms.h create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_reg.h create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c create mode 100644 include/drm/vmwgfx_drm.h (limited to 'include') diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 91567ac806f1..bc14ba7c3b6f 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -30,4 +30,5 @@ obj-$(CONFIG_DRM_I830) += i830/ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_SIS) += sis/ obj-$(CONFIG_DRM_SAVAGE)+= savage/ +obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ obj-$(CONFIG_DRM_VIA) +=via/ diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig new file mode 100644 index 000000000000..f20b8bcbef39 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -0,0 +1,13 @@ +config DRM_VMWGFX + tristate "DRM driver for VMware Virtual GPU" + depends on DRM && PCI + select FB_DEFERRED_IO + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + select DRM_TTM + help + KMS enabled DRM driver for SVGA2 virtual hardware. + + If unsure say n. The compiled module will be + called vmwgfx.ko diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile new file mode 100644 index 000000000000..1a3cb6816d1c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -0,0 +1,9 @@ + +ccflags-y := -Iinclude/drm + +vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ + vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ + vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ + vmwgfx_overlay.o + +obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c new file mode 100644 index 000000000000..d6f2d2b882e9 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -0,0 +1,229 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" + +static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_CACHED; + +static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_NO_EVICT; + +static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | + TTM_PL_FLAG_CACHED; + +struct ttm_placement vmw_vram_placement = { + .fpfn = 0, + .lpfn = 0, + .num_placement = 1, + .placement = &vram_placement_flags, + .num_busy_placement = 1, + .busy_placement = &vram_placement_flags +}; + +struct ttm_placement vmw_vram_ne_placement = { + .fpfn = 0, + .lpfn = 0, + .num_placement = 1, + .placement = &vram_ne_placement_flags, + .num_busy_placement = 1, + .busy_placement = &vram_ne_placement_flags +}; + +struct ttm_placement vmw_sys_placement = { + .fpfn = 0, + .lpfn = 0, + .num_placement = 1, + .placement = &sys_placement_flags, + .num_busy_placement = 1, + .busy_placement = &sys_placement_flags +}; + +struct vmw_ttm_backend { + struct ttm_backend backend; +}; + +static int vmw_ttm_populate(struct ttm_backend *backend, + unsigned long num_pages, struct page **pages, + struct page *dummy_read_page) +{ + return 0; +} + +static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) +{ + return 0; +} + +static int vmw_ttm_unbind(struct ttm_backend *backend) +{ + return 0; +} + +static void vmw_ttm_clear(struct ttm_backend *backend) +{ +} + +static void vmw_ttm_destroy(struct ttm_backend *backend) +{ + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + kfree(vmw_be); +} + +static struct ttm_backend_func vmw_ttm_func = { + .populate = vmw_ttm_populate, + .clear = vmw_ttm_clear, + .bind = vmw_ttm_bind, + .unbind = vmw_ttm_unbind, + .destroy = vmw_ttm_destroy, +}; + +struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) +{ + struct vmw_ttm_backend *vmw_be; + + vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); + if (!vmw_be) + return NULL; + + vmw_be->backend.func = &vmw_ttm_func; + + return &vmw_be->backend; +} + +int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) +{ + return 0; +} + +int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + struct vmw_private *dev_priv = + container_of(bdev, struct vmw_private, bdev); + + switch (type) { + case TTM_PL_SYSTEM: + /* System memory */ + + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + /* "On-card" video ram */ + man->gpu_offset = 0; + man->io_offset = dev_priv->vram_start; + man->io_size = dev_priv->vram_size; + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; + man->io_addr = NULL; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +void vmw_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *placement) +{ + *placement = vmw_sys_placement; +} + +/** + * FIXME: Proper access checks on buffers. + */ + +static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) +{ + return 0; +} + +/** + * FIXME: We're using the old vmware polling method to sync. + * Do this with fences instead. + */ + +static void *vmw_sync_obj_ref(void *sync_obj) +{ + return sync_obj; +} + +static void vmw_sync_obj_unref(void **sync_obj) +{ + *sync_obj = NULL; +} + +static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) +{ + struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; + + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); + mutex_unlock(&dev_priv->hw_mutex); + return 0; +} + +static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) +{ + struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; + uint32_t sequence = (unsigned long) sync_obj; + + return vmw_fence_signaled(dev_priv, sequence); +} + +static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) +{ + struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; + uint32_t sequence = (unsigned long) sync_obj; + + return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ); +} + +struct ttm_bo_driver vmw_bo_driver = { + .create_ttm_backend_entry = vmw_ttm_backend_init, + .invalidate_caches = vmw_invalidate_caches, + .init_mem_type = vmw_init_mem_type, + .evict_flags = vmw_evict_flags, + .move = NULL, + .verify_access = vmw_verify_access, + .sync_obj_signaled = vmw_sync_obj_signaled, + .sync_obj_wait = vmw_sync_obj_wait, + .sync_obj_flush = vmw_sync_obj_flush, + .sync_obj_unref = vmw_sync_obj_unref, + .sync_obj_ref = vmw_sync_obj_ref +}; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c new file mode 100644 index 000000000000..7b48bb3b63b2 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -0,0 +1,735 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "drmP.h" +#include "vmwgfx_drv.h" +#include "ttm/ttm_placement.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_object.h" +#include "ttm/ttm_module.h" + +#define VMWGFX_DRIVER_NAME "vmwgfx" +#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" +#define VMWGFX_CHIP_SVGAII 0 +#define VMW_FB_RESERVATION 0 + +/** + * Fully encoded drm commands. Might move to vmw_drm.h + */ + +#define DRM_IOCTL_VMW_GET_PARAM \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ + struct drm_vmw_getparam_arg) +#define DRM_IOCTL_VMW_ALLOC_DMABUF \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ + union drm_vmw_alloc_dmabuf_arg) +#define DRM_IOCTL_VMW_UNREF_DMABUF \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ + struct drm_vmw_unref_dmabuf_arg) +#define DRM_IOCTL_VMW_CURSOR_BYPASS \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ + struct drm_vmw_cursor_bypass_arg) + +#define DRM_IOCTL_VMW_CONTROL_STREAM \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ + struct drm_vmw_control_stream_arg) +#define DRM_IOCTL_VMW_CLAIM_STREAM \ + DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ + struct drm_vmw_stream_arg) +#define DRM_IOCTL_VMW_UNREF_STREAM \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ + struct drm_vmw_stream_arg) + +#define DRM_IOCTL_VMW_CREATE_CONTEXT \ + DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ + struct drm_vmw_context_arg) +#define DRM_IOCTL_VMW_UNREF_CONTEXT \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ + struct drm_vmw_context_arg) +#define DRM_IOCTL_VMW_CREATE_SURFACE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ + union drm_vmw_surface_create_arg) +#define DRM_IOCTL_VMW_UNREF_SURFACE \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ + struct drm_vmw_surface_arg) +#define DRM_IOCTL_VMW_REF_SURFACE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ + union drm_vmw_surface_reference_arg) +#define DRM_IOCTL_VMW_EXECBUF \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ + struct drm_vmw_execbuf_arg) +#define DRM_IOCTL_VMW_FIFO_DEBUG \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \ + struct drm_vmw_fifo_debug_arg) +#define DRM_IOCTL_VMW_FENCE_WAIT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ + struct drm_vmw_fence_wait_arg) + + +/** + * The core DRM version of this macro doesn't account for + * DRM_COMMAND_BASE. + */ + +#define VMW_IOCTL_DEF(ioctl, func, flags) \ + [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} + +/** + * Ioctl definitions. + */ + +static struct drm_ioctl_desc vmw_ioctls[] = { + VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, + vmw_kms_cursor_bypass_ioctl, 0), + + VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, + 0), + + VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, + 0), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, + 0) +}; + +static struct pci_device_id vmw_pci_id_list[] = { + {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, + {0, 0, 0} +}; + +static char *vmw_devname = "vmwgfx"; + +static int vmw_probe(struct pci_dev *, const struct pci_device_id *); +static void vmw_master_init(struct vmw_master *); + +static void vmw_print_capabilities(uint32_t capabilities) +{ + DRM_INFO("Capabilities:\n"); + if (capabilities & SVGA_CAP_RECT_COPY) + DRM_INFO(" Rect copy.\n"); + if (capabilities & SVGA_CAP_CURSOR) + DRM_INFO(" Cursor.\n"); + if (capabilities & SVGA_CAP_CURSOR_BYPASS) + DRM_INFO(" Cursor bypass.\n"); + if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) + DRM_INFO(" Cursor bypass 2.\n"); + if (capabilities & SVGA_CAP_8BIT_EMULATION) + DRM_INFO(" 8bit emulation.\n"); + if (capabilities & SVGA_CAP_ALPHA_CURSOR) + DRM_INFO(" Alpha cursor.\n"); + if (capabilities & SVGA_CAP_3D) + DRM_INFO(" 3D.\n"); + if (capabilities & SVGA_CAP_EXTENDED_FIFO) + DRM_INFO(" Extended Fifo.\n"); + if (capabilities & SVGA_CAP_MULTIMON) + DRM_INFO(" Multimon.\n"); + if (capabilities & SVGA_CAP_PITCHLOCK) + DRM_INFO(" Pitchlock.\n"); + if (capabilities & SVGA_CAP_IRQMASK) + DRM_INFO(" Irq mask.\n"); + if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) + DRM_INFO(" Display Topology.\n"); + if (capabilities & SVGA_CAP_GMR) + DRM_INFO(" GMR.\n"); + if (capabilities & SVGA_CAP_TRACES) + DRM_INFO(" Traces.\n"); +} + +static int vmw_request_device(struct vmw_private *dev_priv) +{ + int ret; + + vmw_kms_save_vga(dev_priv); + + ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); + if (unlikely(ret != 0)) { + DRM_ERROR("Unable to initialize FIFO.\n"); + return ret; + } + + return 0; +} + +static void vmw_release_device(struct vmw_private *dev_priv) +{ + vmw_fifo_release(dev_priv, &dev_priv->fifo); + vmw_kms_restore_vga(dev_priv); +} + + +static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) +{ + struct vmw_private *dev_priv; + int ret; + + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (unlikely(dev_priv == NULL)) { + DRM_ERROR("Failed allocating a device private struct.\n"); + return -ENOMEM; + } + memset(dev_priv, 0, sizeof(*dev_priv)); + + dev_priv->dev = dev; + dev_priv->vmw_chipset = chipset; + mutex_init(&dev_priv->hw_mutex); + mutex_init(&dev_priv->cmdbuf_mutex); + rwlock_init(&dev_priv->resource_lock); + idr_init(&dev_priv->context_idr); + idr_init(&dev_priv->surface_idr); + idr_init(&dev_priv->stream_idr); + ida_init(&dev_priv->gmr_ida); + mutex_init(&dev_priv->init_mutex); + init_waitqueue_head(&dev_priv->fence_queue); + init_waitqueue_head(&dev_priv->fifo_queue); + atomic_set(&dev_priv->fence_queue_waiters, 0); + atomic_set(&dev_priv->fifo_queue_waiters, 0); + INIT_LIST_HEAD(&dev_priv->gmr_lru); + + dev_priv->io_start = pci_resource_start(dev->pdev, 0); + dev_priv->vram_start = pci_resource_start(dev->pdev, 1); + dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); + + mutex_lock(&dev_priv->hw_mutex); + dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); + + if (dev_priv->capabilities & SVGA_CAP_GMR) { + dev_priv->max_gmr_descriptors = + vmw_read(dev_priv, + SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); + dev_priv->max_gmr_ids = + vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); + } + + dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); + dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); + dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); + dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); + + mutex_unlock(&dev_priv->hw_mutex); + + vmw_print_capabilities(dev_priv->capabilities); + + if (dev_priv->capabilities & SVGA_CAP_GMR) { + DRM_INFO("Max GMR ids is %u\n", + (unsigned)dev_priv->max_gmr_ids); + DRM_INFO("Max GMR descriptors is %u\n", + (unsigned)dev_priv->max_gmr_descriptors); + } + DRM_INFO("VRAM at 0x%08x size is %u kiB\n", + dev_priv->vram_start, dev_priv->vram_size / 1024); + DRM_INFO("MMIO at 0x%08x size is %u kiB\n", + dev_priv->mmio_start, dev_priv->mmio_size / 1024); + + ret = vmw_ttm_global_init(dev_priv); + if (unlikely(ret != 0)) + goto out_err0; + + + vmw_master_init(&dev_priv->fbdev_master); + ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); + dev_priv->active_master = &dev_priv->fbdev_master; + + + ret = ttm_bo_device_init(&dev_priv->bdev, + dev_priv->bo_global_ref.ref.object, + &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, + false); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing TTM buffer object driver.\n"); + goto out_err1; + } + + ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, + (dev_priv->vram_size >> PAGE_SHIFT)); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing memory manager for VRAM.\n"); + goto out_err2; + } + + dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, + dev_priv->mmio_size, DRM_MTRR_WC); + + dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, + dev_priv->mmio_size); + + if (unlikely(dev_priv->mmio_virt == NULL)) { + ret = -ENOMEM; + DRM_ERROR("Failed mapping MMIO.\n"); + goto out_err3; + } + + dev_priv->tdev = ttm_object_device_init + (dev_priv->mem_global_ref.object, 12); + + if (unlikely(dev_priv->tdev == NULL)) { + DRM_ERROR("Unable to initialize TTM object management.\n"); + ret = -ENOMEM; + goto out_err4; + } + + dev->dev_private = dev_priv; + + if (!dev->devname) + dev->devname = vmw_devname; + + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { + ret = drm_irq_install(dev); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed installing irq: %d\n", ret); + goto out_no_irq; + } + } + + ret = pci_request_regions(dev->pdev, "vmwgfx probe"); + dev_priv->stealth = (ret != 0); + if (dev_priv->stealth) { + /** + * Request at least the mmio PCI resource. + */ + + DRM_INFO("It appears like vesafb is loaded. " + "Ignore above error if any. Entering stealth mode.\n"); + ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); + goto out_no_device; + } + vmw_kms_init(dev_priv); + vmw_overlay_init(dev_priv); + } else { + ret = vmw_request_device(dev_priv); + if (unlikely(ret != 0)) + goto out_no_device; + vmw_kms_init(dev_priv); + vmw_overlay_init(dev_priv); + vmw_fb_init(dev_priv); + } + + return 0; + +out_no_device: + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) + drm_irq_uninstall(dev_priv->dev); + if (dev->devname == vmw_devname) + dev->devname = NULL; +out_no_irq: + ttm_object_device_release(&dev_priv->tdev); +out_err4: + iounmap(dev_priv->mmio_virt); +out_err3: + drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, + dev_priv->mmio_size, DRM_MTRR_WC); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); +out_err2: + (void)ttm_bo_device_release(&dev_priv->bdev); +out_err1: + vmw_ttm_global_release(dev_priv); +out_err0: + ida_destroy(&dev_priv->gmr_ida); + idr_destroy(&dev_priv->surface_idr); + idr_destroy(&dev_priv->context_idr); + idr_destroy(&dev_priv->stream_idr); + kfree(dev_priv); + return ret; +} + +static int vmw_driver_unload(struct drm_device *dev) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + + DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); + + if (!dev_priv->stealth) { + vmw_fb_close(dev_priv); + vmw_kms_close(dev_priv); + vmw_overlay_close(dev_priv); + vmw_release_device(dev_priv); + pci_release_regions(dev->pdev); + } else { + vmw_kms_close(dev_priv); + vmw_overlay_close(dev_priv); + pci_release_region(dev->pdev, 2); + } + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) + drm_irq_uninstall(dev_priv->dev); + if (dev->devname == vmw_devname) + dev->devname = NULL; + ttm_object_device_release(&dev_priv->tdev); + iounmap(dev_priv->mmio_virt); + drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, + dev_priv->mmio_size, DRM_MTRR_WC); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + (void)ttm_bo_device_release(&dev_priv->bdev); + vmw_ttm_global_release(dev_priv); + ida_destroy(&dev_priv->gmr_ida); + idr_destroy(&dev_priv->surface_idr); + idr_destroy(&dev_priv->context_idr); + idr_destroy(&dev_priv->stream_idr); + + kfree(dev_priv); + + return 0; +} + +static void vmw_postclose(struct drm_device *dev, + struct drm_file *file_priv) +{ + struct vmw_fpriv *vmw_fp; + + vmw_fp = vmw_fpriv(file_priv); + ttm_object_file_release(&vmw_fp->tfile); + if (vmw_fp->locked_master) + drm_master_put(&vmw_fp->locked_master); + kfree(vmw_fp); +} + +static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_fpriv *vmw_fp; + int ret = -ENOMEM; + + vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); + if (unlikely(vmw_fp == NULL)) + return ret; + + vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); + if (unlikely(vmw_fp->tfile == NULL)) + goto out_no_tfile; + + file_priv->driver_priv = vmw_fp; + + if (unlikely(dev_priv->bdev.dev_mapping == NULL)) + dev_priv->bdev.dev_mapping = + file_priv->filp->f_path.dentry->d_inode->i_mapping; + + return 0; + +out_no_tfile: + kfree(vmw_fp); + return ret; +} + +static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->minor->dev; + unsigned int nr = DRM_IOCTL_NR(cmd); + long ret; + + /* + * The driver private ioctls and TTM ioctls should be + * thread-safe. + */ + + if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) + && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { + struct drm_ioctl_desc *ioctl = + &vmw_ioctls[nr - DRM_COMMAND_BASE]; + + if (unlikely(ioctl->cmd != cmd)) { + DRM_ERROR("Invalid command format, ioctl %d\n", + nr - DRM_COMMAND_BASE); + return -EINVAL; + } + return drm_ioctl(filp->f_path.dentry->d_inode, + filp, cmd, arg); + } + + /* + * Not all old drm ioctls are thread-safe. + */ + + lock_kernel(); + ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + return ret; +} + +static int vmw_firstopen(struct drm_device *dev) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + dev_priv->is_opened = true; + + return 0; +} + +static void vmw_lastclose(struct drm_device *dev) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_crtc *crtc; + struct drm_mode_set set; + int ret; + + /** + * Do nothing on the lastclose call from drm_unload. + */ + + if (!dev_priv->is_opened) + return; + + dev_priv->is_opened = false; + set.x = 0; + set.y = 0; + set.fb = NULL; + set.mode = NULL; + set.connectors = NULL; + set.num_connectors = 0; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + set.crtc = crtc; + ret = crtc->funcs->set_config(&set); + WARN_ON(ret != 0); + } + +} + +static void vmw_master_init(struct vmw_master *vmaster) +{ + ttm_lock_init(&vmaster->lock); +} + +static int vmw_master_create(struct drm_device *dev, + struct drm_master *master) +{ + struct vmw_master *vmaster; + + DRM_INFO("Master create.\n"); + vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); + if (unlikely(vmaster == NULL)) + return -ENOMEM; + + ttm_lock_init(&vmaster->lock); + ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); + master->driver_priv = vmaster; + + return 0; +} + +static void vmw_master_destroy(struct drm_device *dev, + struct drm_master *master) +{ + struct vmw_master *vmaster = vmw_master(master); + + DRM_INFO("Master destroy.\n"); + master->driver_priv = NULL; + kfree(vmaster); +} + + +static int vmw_master_set(struct drm_device *dev, + struct drm_file *file_priv, + bool from_open) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); + struct vmw_master *active = dev_priv->active_master; + struct vmw_master *vmaster = vmw_master(file_priv->master); + int ret = 0; + + DRM_INFO("Master set.\n"); + if (dev_priv->stealth) { + ret = vmw_request_device(dev_priv); + if (unlikely(ret != 0)) + return ret; + } + + if (active) { + BUG_ON(active != &dev_priv->fbdev_master); + ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); + if (unlikely(ret != 0)) + goto out_no_active_lock; + + ttm_lock_set_kill(&active->lock, true, SIGTERM); + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); + if (unlikely(ret != 0)) { + DRM_ERROR("Unable to clean VRAM on " + "master drop.\n"); + } + + dev_priv->active_master = NULL; + } + + ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); + if (!from_open) { + ttm_vt_unlock(&vmaster->lock); + BUG_ON(vmw_fp->locked_master != file_priv->master); + drm_master_put(&vmw_fp->locked_master); + } + + dev_priv->active_master = vmaster; + + return 0; + +out_no_active_lock: + vmw_release_device(dev_priv); + return ret; +} + +static void vmw_master_drop(struct drm_device *dev, + struct drm_file *file_priv, + bool from_release) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); + struct vmw_master *vmaster = vmw_master(file_priv->master); + int ret; + + DRM_INFO("Master drop.\n"); + + /** + * Make sure the master doesn't disappear while we have + * it locked. + */ + + vmw_fp->locked_master = drm_master_get(file_priv->master); + ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); + + if (unlikely((ret != 0))) { + DRM_ERROR("Unable to lock TTM at VT switch.\n"); + drm_master_put(&vmw_fp->locked_master); + } + + ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); + + if (dev_priv->stealth) { + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); + if (unlikely(ret != 0)) + DRM_ERROR("Unable to clean VRAM on master drop.\n"); + vmw_release_device(dev_priv); + } + dev_priv->active_master = &dev_priv->fbdev_master; + ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); + ttm_vt_unlock(&dev_priv->fbdev_master.lock); + + if (!dev_priv->stealth) + vmw_fb_on(dev_priv); +} + + +static void vmw_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); +} + +static struct drm_driver driver = { + .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | + DRIVER_MODESET, + .load = vmw_driver_load, + .unload = vmw_driver_unload, + .firstopen = vmw_firstopen, + .lastclose = vmw_lastclose, + .irq_preinstall = vmw_irq_preinstall, + .irq_postinstall = vmw_irq_postinstall, + .irq_uninstall = vmw_irq_uninstall, + .irq_handler = vmw_irq_handler, + .reclaim_buffers_locked = NULL, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = vmw_ioctls, + .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), + .dma_quiescent = NULL, /*vmw_dma_quiescent, */ + .master_create = vmw_master_create, + .master_destroy = vmw_master_destroy, + .master_set = vmw_master_set, + .master_drop = vmw_master_drop, + .open = vmw_driver_open, + .postclose = vmw_postclose, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = vmw_unlocked_ioctl, + .mmap = vmw_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) + .compat_ioctl = drm_compat_ioctl, +#endif + }, + .pci_driver = { + .name = VMWGFX_DRIVER_NAME, + .id_table = vmw_pci_id_list, + .probe = vmw_probe, + .remove = vmw_remove + }, + .name = VMWGFX_DRIVER_NAME, + .desc = VMWGFX_DRIVER_DESC, + .date = VMWGFX_DRIVER_DATE, + .major = VMWGFX_DRIVER_MAJOR, + .minor = VMWGFX_DRIVER_MINOR, + .patchlevel = VMWGFX_DRIVER_PATCHLEVEL +}; + +static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init vmwgfx_init(void) +{ + int ret; + ret = drm_init(&driver); + if (ret) + DRM_ERROR("Failed initializing DRM.\n"); + return ret; +} + +static void __exit vmwgfx_exit(void) +{ + drm_exit(&driver); +} + +module_init(vmwgfx_init); +module_exit(vmwgfx_exit); + +MODULE_AUTHOR("VMware Inc. and others"); +MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h new file mode 100644 index 000000000000..43546d09d1b0 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -0,0 +1,511 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef _VMWGFX_DRV_H_ +#define _VMWGFX_DRV_H_ + +#include "vmwgfx_reg.h" +#include "drmP.h" +#include "vmwgfx_drm.h" +#include "drm_hashtab.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_object.h" +#include "ttm/ttm_lock.h" +#include "ttm/ttm_execbuf_util.h" +#include "ttm/ttm_module.h" + +#define VMWGFX_DRIVER_DATE "20090724" +#define VMWGFX_DRIVER_MAJOR 0 +#define VMWGFX_DRIVER_MINOR 1 +#define VMWGFX_DRIVER_PATCHLEVEL 2 +#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 +#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) +#define VMWGFX_MAX_RELOCATIONS 2048 +#define VMWGFX_MAX_GMRS 2048 + +struct vmw_fpriv { + struct drm_master *locked_master; + struct ttm_object_file *tfile; +}; + +struct vmw_dma_buffer { + struct ttm_buffer_object base; + struct list_head validate_list; + struct list_head gmr_lru; + uint32_t gmr_id; + bool gmr_bound; + uint32_t cur_validate_node; + bool on_validate_list; +}; + +struct vmw_resource { + struct kref kref; + struct vmw_private *dev_priv; + struct idr *idr; + int id; + enum ttm_object_type res_type; + bool avail; + void (*hw_destroy) (struct vmw_resource *res); + void (*res_free) (struct vmw_resource *res); + + /* TODO is a generic snooper needed? */ +#if 0 + void (*snoop)(struct vmw_resource *res, + struct ttm_object_file *tfile, + SVGA3dCmdHeader *header); + void *snoop_priv; +#endif +}; + +struct vmw_cursor_snooper { + struct drm_crtc *crtc; + size_t age; + uint32_t *image; +}; + +struct vmw_surface { + struct vmw_resource res; + uint32_t flags; + uint32_t format; + uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; + struct drm_vmw_size *sizes; + uint32_t num_sizes; + + /* TODO so far just a extra pointer */ + struct vmw_cursor_snooper snooper; +}; + +struct vmw_fifo_state { + unsigned long reserved_size; + __le32 *dynamic_buffer; + __le32 *static_buffer; + __le32 *last_buffer; + uint32_t last_data_size; + uint32_t last_buffer_size; + bool last_buffer_add; + unsigned long static_buffer_size; + bool using_bounce_buffer; + uint32_t capabilities; + struct rw_semaphore rwsem; +}; + +struct vmw_relocation { + SVGAGuestPtr *location; + uint32_t index; +}; + +struct vmw_sw_context{ + struct ida bo_list; + uint32_t last_cid; + bool cid_valid; + uint32_t last_sid; + bool sid_valid; + struct ttm_object_file *tfile; + struct list_head validate_nodes; + struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; + uint32_t cur_reloc; + struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS]; + uint32_t cur_val_buf; +}; + +struct vmw_legacy_display; +struct vmw_overlay; + +struct vmw_master { + struct ttm_lock lock; +}; + +struct vmw_private { + struct ttm_bo_device bdev; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_global_reference mem_global_ref; + + struct vmw_fifo_state fifo; + + struct drm_device *dev; + unsigned long vmw_chipset; + unsigned int io_start; + uint32_t vram_start; + uint32_t vram_size; + uint32_t mmio_start; + uint32_t mmio_size; + uint32_t fb_max_width; + uint32_t fb_max_height; + __le32 __iomem *mmio_virt; + int mmio_mtrr; + uint32_t capabilities; + uint32_t max_gmr_descriptors; + uint32_t max_gmr_ids; + struct mutex hw_mutex; + + /* + * VGA registers. + */ + + uint32_t vga_width; + uint32_t vga_height; + uint32_t vga_depth; + uint32_t vga_bpp; + uint32_t vga_pseudo; + uint32_t vga_red_mask; + uint32_t vga_blue_mask; + uint32_t vga_green_mask; + + /* + * Framebuffer info. + */ + + void *fb_info; + struct vmw_legacy_display *ldu_priv; + struct vmw_overlay *overlay_priv; + + /* + * Context and surface management. + */ + + rwlock_t resource_lock; + struct idr context_idr; + struct idr surface_idr; + struct idr stream_idr; + + /* + * Block lastclose from racing with firstopen. + */ + + struct mutex init_mutex; + + /* + * A resource manager for kernel-only surfaces and + * contexts. + */ + + struct ttm_object_device *tdev; + + /* + * Fencing and IRQs. + */ + + uint32_t fence_seq; + wait_queue_head_t fence_queue; + wait_queue_head_t fifo_queue; + atomic_t fence_queue_waiters; + atomic_t fifo_queue_waiters; + uint32_t last_read_sequence; + spinlock_t irq_lock; + + /* + * Device state + */ + + uint32_t traces_state; + uint32_t enable_state; + uint32_t config_done_state; + + /** + * Execbuf + */ + /** + * Protected by the cmdbuf mutex. + */ + + struct vmw_sw_context ctx; + uint32_t val_seq; + struct mutex cmdbuf_mutex; + + /** + * GMR management. Protected by the lru spinlock. + */ + + struct ida gmr_ida; + struct list_head gmr_lru; + + + /** + * Operating mode. + */ + + bool stealth; + bool is_opened; + + /** + * Master management. + */ + + struct vmw_master *active_master; + struct vmw_master fbdev_master; +}; + +static inline struct vmw_private *vmw_priv(struct drm_device *dev) +{ + return (struct vmw_private *)dev->dev_private; +} + +static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) +{ + return (struct vmw_fpriv *)file_priv->driver_priv; +} + +static inline struct vmw_master *vmw_master(struct drm_master *master) +{ + return (struct vmw_master *) master->driver_priv; +} + +static inline void vmw_write(struct vmw_private *dev_priv, + unsigned int offset, uint32_t value) +{ + outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); + outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); +} + +static inline uint32_t vmw_read(struct vmw_private *dev_priv, + unsigned int offset) +{ + uint32_t val; + + outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); + val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); + return val; +} + +/** + * GMR utilities - vmwgfx_gmr.c + */ + +extern int vmw_gmr_bind(struct vmw_private *dev_priv, + struct ttm_buffer_object *bo); +extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); + +/** + * Resource utilities - vmwgfx_resource.c + */ + +extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); +extern void vmw_resource_unreference(struct vmw_resource **p_res); +extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); +extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_context_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int id); +extern void vmw_surface_res_free(struct vmw_resource *res); +extern int vmw_surface_init(struct vmw_private *dev_priv, + struct vmw_surface *srf, + void (*res_free) (struct vmw_resource *res)); +extern int vmw_user_surface_lookup(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int sid, struct vmw_surface **out); +extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_surface_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int id); +extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); +extern int vmw_dmabuf_init(struct vmw_private *dev_priv, + struct vmw_dma_buffer *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interuptable, + void (*bo_free) (struct ttm_buffer_object *bo)); +extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, + uint32_t cur_validate_node); +extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); +extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, + uint32_t id, struct vmw_dma_buffer **out); +extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo); +extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id); +extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id); +extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, + struct vmw_dma_buffer *bo); +extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, + struct vmw_dma_buffer *bo); +extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t *inout_id, + struct vmw_resource **out); + + +/** + * Misc Ioctl functionality - vmwgfx_ioctl.c + */ + +extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/** + * Fifo utilities - vmwgfx_fifo.c + */ + +extern int vmw_fifo_init(struct vmw_private *dev_priv, + struct vmw_fifo_state *fifo); +extern void vmw_fifo_release(struct vmw_private *dev_priv, + struct vmw_fifo_state *fifo); +extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); +extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); +extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, + uint32_t *sequence); +extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); +extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); + +/** + * TTM glue - vmwgfx_ttm_glue.c + */ + +extern int vmw_ttm_global_init(struct vmw_private *dev_priv); +extern void vmw_ttm_global_release(struct vmw_private *dev_priv); +extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); + +/** + * TTM buffer object driver - vmwgfx_buffer.c + */ + +extern struct ttm_placement vmw_vram_placement; +extern struct ttm_placement vmw_vram_ne_placement; +extern struct ttm_placement vmw_sys_placement; +extern struct ttm_bo_driver vmw_bo_driver; +extern int vmw_dma_quiescent(struct drm_device *dev); + +/** + * Command submission - vmwgfx_execbuf.c + */ + +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/** + * IRQs and wating - vmwgfx_irq.c + */ + +extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); +extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy, + uint32_t sequence, bool interruptible, + unsigned long timeout); +extern void vmw_irq_preinstall(struct drm_device *dev); +extern int vmw_irq_postinstall(struct drm_device *dev); +extern void vmw_irq_uninstall(struct drm_device *dev); +extern bool vmw_fence_signaled(struct vmw_private *dev_priv, + uint32_t sequence); +extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_fallback_wait(struct vmw_private *dev_priv, + bool lazy, + bool fifo_idle, + uint32_t sequence, + bool interruptible, + unsigned long timeout); + +/** + * Kernel framebuffer - vmwgfx_fb.c + */ + +int vmw_fb_init(struct vmw_private *vmw_priv); +int vmw_fb_close(struct vmw_private *dev_priv); +int vmw_fb_off(struct vmw_private *vmw_priv); +int vmw_fb_on(struct vmw_private *vmw_priv); + +/** + * Kernel modesetting - vmwgfx_kms.c + */ + +int vmw_kms_init(struct vmw_private *dev_priv); +int vmw_kms_close(struct vmw_private *dev_priv); +int vmw_kms_save_vga(struct vmw_private *vmw_priv); +int vmw_kms_restore_vga(struct vmw_private *vmw_priv); +int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); +void vmw_kms_cursor_snoop(struct vmw_surface *srf, + struct ttm_object_file *tfile, + struct ttm_buffer_object *bo, + SVGA3dCmdHeader *header); + +/** + * Overlay control - vmwgfx_overlay.c + */ + +int vmw_overlay_init(struct vmw_private *dev_priv); +int vmw_overlay_close(struct vmw_private *dev_priv); +int vmw_overlay_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vmw_overlay_stop_all(struct vmw_private *dev_priv); +int vmw_overlay_resume_all(struct vmw_private *dev_priv); +int vmw_overlay_pause_all(struct vmw_private *dev_priv); +int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); +int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); +int vmw_overlay_num_overlays(struct vmw_private *dev_priv); +int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); + +/** + * Inline helper functions + */ + +static inline void vmw_surface_unreference(struct vmw_surface **srf) +{ + struct vmw_surface *tmp_srf = *srf; + struct vmw_resource *res = &tmp_srf->res; + *srf = NULL; + + vmw_resource_unreference(&res); +} + +static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) +{ + (void) vmw_resource_reference(&srf->res); + return srf; +} + +static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) +{ + struct vmw_dma_buffer *tmp_buf = *buf; + struct ttm_buffer_object *bo = &tmp_buf->base; + *buf = NULL; + + ttm_bo_unref(&bo); +} + +static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) +{ + if (ttm_bo_reference(&buf->base)) + return buf; + return NULL; +} + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c new file mode 100644 index 000000000000..7a39f3e6dc2c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -0,0 +1,516 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "vmwgfx_reg.h" +#include "ttm/ttm_bo_api.h" +#include "ttm/ttm_placement.h" + +static int vmw_cmd_invalid(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + return capable(CAP_SYS_ADMIN) ? : -EINVAL; +} + +static int vmw_cmd_ok(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + return 0; +} + +static int vmw_cmd_cid_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_cid_cmd { + SVGA3dCmdHeader header; + __le32 cid; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_cid_cmd, header); + if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) + return 0; + + ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not find or use context %u\n", + (unsigned) cmd->cid); + return ret; + } + + sw_context->last_cid = cmd->cid; + sw_context->cid_valid = true; + + return 0; +} + +static int vmw_cmd_sid_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + uint32_t sid) +{ + if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) && + sid != SVGA3D_INVALID_ID)) { + int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid); + + if (unlikely(ret != 0)) { + DRM_ERROR("Could ot find or use surface %u\n", + (unsigned) sid); + return ret; + } + + sw_context->last_sid = sid; + sw_context->sid_valid = true; + } + return 0; +} + + +static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_sid_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSetRenderTarget body; + } *cmd; + int ret; + + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + if (unlikely(ret != 0)) + return ret; + + cmd = container_of(header, struct vmw_sid_cmd, header); + return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid); +} + +static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_sid_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceCopy body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_sid_cmd, header); + ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); + if (unlikely(ret != 0)) + return ret; + return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); +} + +static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_sid_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceStretchBlt body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_sid_cmd, header); + ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); + if (unlikely(ret != 0)) + return ret; + return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); +} + +static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_sid_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdBlitSurfaceToScreen body; + } *cmd; + + cmd = container_of(header, struct vmw_sid_cmd, header); + return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid); +} + +static int vmw_cmd_present_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_sid_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdPresent body; + } *cmd; + + cmd = container_of(header, struct vmw_sid_cmd, header); + return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid); +} + +static int vmw_cmd_dma(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + uint32_t handle; + struct vmw_dma_buffer *vmw_bo = NULL; + struct ttm_buffer_object *bo; + struct vmw_surface *srf = NULL; + struct vmw_dma_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA dma; + } *cmd; + struct vmw_relocation *reloc; + int ret; + uint32_t cur_validate_node; + struct ttm_validate_buffer *val_buf; + + + cmd = container_of(header, struct vmw_dma_cmd, header); + ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid); + if (unlikely(ret != 0)) + return ret; + + handle = cmd->dma.guest.ptr.gmrId; + ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not find or use GMR region.\n"); + return -EINVAL; + } + bo = &vmw_bo->base; + + if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { + DRM_ERROR("Max number of DMA commands per submission" + " exceeded\n"); + ret = -EINVAL; + goto out_no_reloc; + } + + reloc = &sw_context->relocs[sw_context->cur_reloc++]; + reloc->location = &cmd->dma.guest.ptr; + + cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); + if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { + DRM_ERROR("Max number of DMA buffers per submission" + " exceeded.\n"); + ret = -EINVAL; + goto out_no_reloc; + } + + reloc->index = cur_validate_node; + if (unlikely(cur_validate_node == sw_context->cur_val_buf)) { + val_buf = &sw_context->val_bufs[cur_validate_node]; + val_buf->bo = ttm_bo_reference(bo); + val_buf->new_sync_obj_arg = (void *) dev_priv; + list_add_tail(&val_buf->head, &sw_context->validate_nodes); + ++sw_context->cur_val_buf; + } + + ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile, + cmd->dma.host.sid, &srf); + if (ret) { + DRM_ERROR("could not find surface\n"); + goto out_no_reloc; + } + + vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); + vmw_surface_unreference(&srf); + +out_no_reloc: + vmw_dmabuf_unreference(&vmw_bo); + return ret; +} + + +typedef int (*vmw_cmd_func) (struct vmw_private *, + struct vmw_sw_context *, + SVGA3dCmdHeader *); + +#define VMW_CMD_DEF(cmd, func) \ + [cmd - SVGA_3D_CMD_BASE] = func + +static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), + VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), + VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), + VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, + &vmw_cmd_set_render_target_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), + VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), + VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, + &vmw_cmd_blt_surf_screen_check) +}; + +static int vmw_cmd_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + void *buf, uint32_t *size) +{ + uint32_t cmd_id; + SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; + int ret; + + cmd_id = ((uint32_t *)buf)[0]; + if (cmd_id == SVGA_CMD_UPDATE) { + *size = 5 << 2; + return 0; + } + + cmd_id = le32_to_cpu(header->id); + *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); + + cmd_id -= SVGA_3D_CMD_BASE; + if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) + goto out_err; + + ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); + if (unlikely(ret != 0)) + goto out_err; + + return 0; +out_err: + DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", + cmd_id + SVGA_3D_CMD_BASE); + return -EINVAL; +} + +static int vmw_cmd_check_all(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + void *buf, uint32_t size) +{ + int32_t cur_size = size; + int ret; + + while (cur_size > 0) { + ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); + if (unlikely(ret != 0)) + return ret; + buf = (void *)((unsigned long) buf + size); + cur_size -= size; + } + + if (unlikely(cur_size != 0)) { + DRM_ERROR("Command verifier out of sync.\n"); + return -EINVAL; + } + + return 0; +} + +static void vmw_free_relocations(struct vmw_sw_context *sw_context) +{ + sw_context->cur_reloc = 0; +} + +static void vmw_apply_relocations(struct vmw_sw_context *sw_context) +{ + uint32_t i; + struct vmw_relocation *reloc; + struct ttm_validate_buffer *validate; + struct ttm_buffer_object *bo; + + for (i = 0; i < sw_context->cur_reloc; ++i) { + reloc = &sw_context->relocs[i]; + validate = &sw_context->val_bufs[reloc->index]; + bo = validate->bo; + reloc->location->offset += bo->offset; + reloc->location->gmrId = vmw_dmabuf_gmr(bo); + } + vmw_free_relocations(sw_context); +} + +static void vmw_clear_validations(struct vmw_sw_context *sw_context) +{ + struct ttm_validate_buffer *entry, *next; + + list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, + head) { + list_del(&entry->head); + vmw_dmabuf_validate_clear(entry->bo); + ttm_bo_unref(&entry->bo); + sw_context->cur_val_buf--; + } + BUG_ON(sw_context->cur_val_buf != 0); +} + +static int vmw_validate_single_buffer(struct vmw_private *dev_priv, + struct ttm_buffer_object *bo) +{ + int ret; + + if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) + return 0; + + ret = vmw_gmr_bind(dev_priv, bo); + if (likely(ret == 0 || ret == -ERESTART)) + return ret; + + + ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); + return ret; +} + + +static int vmw_validate_buffers(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context) +{ + struct ttm_validate_buffer *entry; + int ret; + + list_for_each_entry(entry, &sw_context->validate_nodes, head) { + ret = vmw_validate_single_buffer(dev_priv, entry->bo); + if (unlikely(ret != 0)) + return ret; + } + return 0; +} + +int vmw_execbuf_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; + struct drm_vmw_fence_rep fence_rep; + struct drm_vmw_fence_rep __user *user_fence_rep; + int ret; + void *user_cmd; + void *cmd; + uint32_t sequence; + struct vmw_sw_context *sw_context = &dev_priv->ctx; + struct vmw_master *vmaster = vmw_master(file_priv->master); + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + + ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); + if (unlikely(ret != 0)) { + ret = -ERESTART; + goto out_no_cmd_mutex; + } + + cmd = vmw_fifo_reserve(dev_priv, arg->command_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving fifo space for commands.\n"); + ret = -ENOMEM; + goto out_unlock; + } + + user_cmd = (void __user *)(unsigned long)arg->commands; + ret = copy_from_user(cmd, user_cmd, arg->command_size); + + if (unlikely(ret != 0)) { + DRM_ERROR("Failed copying commands.\n"); + goto out_commit; + } + + sw_context->tfile = vmw_fpriv(file_priv)->tfile; + sw_context->cid_valid = false; + sw_context->sid_valid = false; + sw_context->cur_reloc = 0; + sw_context->cur_val_buf = 0; + + INIT_LIST_HEAD(&sw_context->validate_nodes); + + ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); + if (unlikely(ret != 0)) + goto out_err; + ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, + dev_priv->val_seq++); + if (unlikely(ret != 0)) + goto out_err; + + ret = vmw_validate_buffers(dev_priv, sw_context); + if (unlikely(ret != 0)) + goto out_err; + + vmw_apply_relocations(sw_context); + vmw_fifo_commit(dev_priv, arg->command_size); + + ret = vmw_fifo_send_fence(dev_priv, &sequence); + + ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, + (void *)(unsigned long) sequence); + vmw_clear_validations(sw_context); + mutex_unlock(&dev_priv->cmdbuf_mutex); + + /* + * This error is harmless, because if fence submission fails, + * vmw_fifo_send_fence will sync. + */ + + if (ret != 0) + DRM_ERROR("Fence submission error. Syncing.\n"); + + fence_rep.error = ret; + fence_rep.fence_seq = (uint64_t) sequence; + + user_fence_rep = (struct drm_vmw_fence_rep __user *) + (unsigned long)arg->fence_rep; + + /* + * copy_to_user errors will be detected by user space not + * seeing fence_rep::error filled in. + */ + + ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep)); + + vmw_kms_cursor_post_execbuf(dev_priv); + ttm_read_unlock(&vmaster->lock); + return 0; +out_err: + vmw_free_relocations(sw_context); + ttm_eu_backoff_reservation(&sw_context->validate_nodes); + vmw_clear_validations(sw_context); +out_commit: + vmw_fifo_commit(dev_priv, 0); +out_unlock: + mutex_unlock(&dev_priv->cmdbuf_mutex); +out_no_cmd_mutex: + ttm_read_unlock(&vmaster->lock); + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c new file mode 100644 index 000000000000..641dde76ada1 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -0,0 +1,742 @@ +/************************************************************************** + * + * Copyright © 2007 David Airlie + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "drmP.h" +#include "vmwgfx_drv.h" + +#include "ttm/ttm_placement.h" + +#define VMW_DIRTY_DELAY (HZ / 30) + +struct vmw_fb_par { + struct vmw_private *vmw_priv; + + void *vmalloc; + + struct vmw_dma_buffer *vmw_bo; + struct ttm_bo_kmap_obj map; + + u32 pseudo_palette[17]; + + unsigned depth; + unsigned bpp; + + unsigned max_width; + unsigned max_height; + + void *bo_ptr; + unsigned bo_size; + bool bo_iowrite; + + struct { + spinlock_t lock; + bool active; + unsigned x1; + unsigned y1; + unsigned x2; + unsigned y2; + } dirty; +}; + +static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, + struct fb_info *info) +{ + struct vmw_fb_par *par = info->par; + u32 *pal = par->pseudo_palette; + + if (regno > 15) { + DRM_ERROR("Bad regno %u.\n", regno); + return 1; + } + + switch (par->depth) { + case 24: + case 32: + pal[regno] = ((red & 0xff00) << 8) | + (green & 0xff00) | + ((blue & 0xff00) >> 8); + break; + default: + DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); + return 1; + } + + return 0; +} + +static int vmw_fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + int depth = var->bits_per_pixel; + struct vmw_fb_par *par = info->par; + struct vmw_private *vmw_priv = par->vmw_priv; + + switch (var->bits_per_pixel) { + case 32: + depth = (var->transp.length > 0) ? 32 : 24; + break; + default: + DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); + return -EINVAL; + } + + switch (depth) { + case 24: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 32: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 8; + var->transp.offset = 24; + break; + default: + DRM_ERROR("Bad depth %u.\n", depth); + return -EINVAL; + } + + /* without multimon its hard to resize */ + if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && + (var->xres != par->max_width || + var->yres != par->max_height)) { + DRM_ERROR("Tried to resize, but we don't have multimon\n"); + return -EINVAL; + } + + if (var->xres > par->max_width || + var->yres > par->max_height) { + DRM_ERROR("Requested geom can not fit in framebuffer\n"); + return -EINVAL; + } + + return 0; +} + +static int vmw_fb_set_par(struct fb_info *info) +{ + struct vmw_fb_par *par = info->par; + struct vmw_private *vmw_priv = par->vmw_priv; + + if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { + vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + + vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); + vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); + vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); + vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); + vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); + vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); + vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); + vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); + + /* TODO check if pitch and offset changes */ + + vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + } else { + vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); + vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); + + /* TODO check if pitch and offset changes */ + } + + return 0; +} + +static int vmw_fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static int vmw_fb_blank(int blank, struct fb_info *info) +{ + return 0; +} + +/* + * Dirty code + */ + +static void vmw_fb_dirty_flush(struct vmw_fb_par *par) +{ + struct vmw_private *vmw_priv = par->vmw_priv; + struct fb_info *info = vmw_priv->fb_info; + int stride = (info->fix.line_length / 4); + int *src = (int *)info->screen_base; + __le32 __iomem *vram_mem = par->bo_ptr; + unsigned long flags; + unsigned x, y, w, h; + int i, k; + struct { + uint32_t header; + SVGAFifoCmdUpdate body; + } *cmd; + + spin_lock_irqsave(&par->dirty.lock, flags); + if (!par->dirty.active) { + spin_unlock_irqrestore(&par->dirty.lock, flags); + return; + } + x = par->dirty.x1; + y = par->dirty.y1; + w = min(par->dirty.x2, info->var.xres) - x; + h = min(par->dirty.y2, info->var.yres) - y; + par->dirty.x1 = par->dirty.x2 = 0; + par->dirty.y1 = par->dirty.y2 = 0; + spin_unlock_irqrestore(&par->dirty.lock, flags); + + for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { + for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) + iowrite32(src[k], vram_mem + k); + } + +#if 0 + DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); +#endif + + cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + return; + } + + cmd->header = cpu_to_le32(SVGA_CMD_UPDATE); + cmd->body.x = cpu_to_le32(x); + cmd->body.y = cpu_to_le32(y); + cmd->body.width = cpu_to_le32(w); + cmd->body.height = cpu_to_le32(h); + vmw_fifo_commit(vmw_priv, sizeof(*cmd)); +} + +static void vmw_fb_dirty_mark(struct vmw_fb_par *par, + unsigned x1, unsigned y1, + unsigned width, unsigned height) +{ + struct fb_info *info = par->vmw_priv->fb_info; + unsigned long flags; + unsigned x2 = x1 + width; + unsigned y2 = y1 + height; + + spin_lock_irqsave(&par->dirty.lock, flags); + if (par->dirty.x1 == par->dirty.x2) { + par->dirty.x1 = x1; + par->dirty.y1 = y1; + par->dirty.x2 = x2; + par->dirty.y2 = y2; + /* if we are active start the dirty work + * we share the work with the defio system */ + if (par->dirty.active) + schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY); + } else { + if (x1 < par->dirty.x1) + par->dirty.x1 = x1; + if (y1 < par->dirty.y1) + par->dirty.y1 = y1; + if (x2 > par->dirty.x2) + par->dirty.x2 = x2; + if (y2 > par->dirty.y2) + par->dirty.y2 = y2; + } + spin_unlock_irqrestore(&par->dirty.lock, flags); +} + +static void vmw_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ + struct vmw_fb_par *par = info->par; + unsigned long start, end, min, max; + unsigned long flags; + struct page *page; + int y1, y2; + + min = ULONG_MAX; + max = 0; + list_for_each_entry(page, pagelist, lru) { + start = page->index << PAGE_SHIFT; + end = start + PAGE_SIZE - 1; + min = min(min, start); + max = max(max, end); + } + + if (min < max) { + y1 = min / info->fix.line_length; + y2 = (max / info->fix.line_length) + 1; + + spin_lock_irqsave(&par->dirty.lock, flags); + par->dirty.x1 = 0; + par->dirty.y1 = y1; + par->dirty.x2 = info->var.xres; + par->dirty.y2 = y2; + spin_unlock_irqrestore(&par->dirty.lock, flags); + } + + vmw_fb_dirty_flush(par); +}; + +struct fb_deferred_io vmw_defio = { + .delay = VMW_DIRTY_DELAY, + .deferred_io = vmw_deferred_io, +}; + +/* + * Draw code + */ + +static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + cfb_fillrect(info, rect); + vmw_fb_dirty_mark(info->par, rect->dx, rect->dy, + rect->width, rect->height); +} + +static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) +{ + cfb_copyarea(info, region); + vmw_fb_dirty_mark(info->par, region->dx, region->dy, + region->width, region->height); +} + +static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) +{ + cfb_imageblit(info, image); + vmw_fb_dirty_mark(info->par, image->dx, image->dy, + image->width, image->height); +} + +/* + * Bring up code + */ + +static struct fb_ops vmw_fb_ops = { + .owner = THIS_MODULE, + .fb_check_var = vmw_fb_check_var, + .fb_set_par = vmw_fb_set_par, + .fb_setcolreg = vmw_fb_setcolreg, + .fb_fillrect = vmw_fb_fillrect, + .fb_copyarea = vmw_fb_copyarea, + .fb_imageblit = vmw_fb_imageblit, + .fb_pan_display = vmw_fb_pan_display, + .fb_blank = vmw_fb_blank, +}; + +static int vmw_fb_create_bo(struct vmw_private *vmw_priv, + size_t size, struct vmw_dma_buffer **out) +{ + struct vmw_dma_buffer *vmw_bo; + struct ttm_placement ne_placement = vmw_vram_ne_placement; + int ret; + + ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + /* interuptable? */ + ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false); + if (unlikely(ret != 0)) + return ret; + + vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); + if (!vmw_bo) + goto err_unlock; + + ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, + &ne_placement, + false, + &vmw_dmabuf_bo_free); + if (unlikely(ret != 0)) + goto err_unlock; /* init frees the buffer on failure */ + + *out = vmw_bo; + + ttm_write_unlock(&vmw_priv->fbdev_master.lock); + + return 0; + +err_unlock: + ttm_write_unlock(&vmw_priv->fbdev_master.lock); + return ret; +} + +int vmw_fb_init(struct vmw_private *vmw_priv) +{ + struct device *device = &vmw_priv->dev->pdev->dev; + struct vmw_fb_par *par; + struct fb_info *info; + unsigned initial_width, initial_height; + unsigned fb_width, fb_height; + unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; + int ret; + + initial_width = 800; + initial_height = 600; + + fb_bbp = 32; + fb_depth = 24; + + if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { + fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); + fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); + } else { + fb_width = min(vmw_priv->fb_max_width, initial_width); + fb_height = min(vmw_priv->fb_max_height, initial_height); + } + + initial_width = min(fb_width, initial_width); + initial_height = min(fb_height, initial_height); + + vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); + vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); + vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); + vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); + vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); + vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); + vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); + + fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); + fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); + fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); + + DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); + DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); + DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); + DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); + DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); + DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); + DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); + DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); + DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); + DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); + DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); + DRM_DEBUG("fb_pitch %u\n", fb_pitch); + DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); + + info = framebuffer_alloc(sizeof(*par), device); + if (!info) + return -ENOMEM; + + /* + * Par + */ + vmw_priv->fb_info = info; + par = info->par; + par->vmw_priv = vmw_priv; + par->depth = fb_depth; + par->bpp = fb_bbp; + par->vmalloc = NULL; + par->max_width = fb_width; + par->max_height = fb_height; + + /* + * Create buffers and alloc memory + */ + par->vmalloc = vmalloc(fb_size); + if (unlikely(par->vmalloc == NULL)) { + ret = -ENOMEM; + goto err_free; + } + + ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo); + if (unlikely(ret != 0)) + goto err_free; + + ret = ttm_bo_kmap(&par->vmw_bo->base, + 0, + par->vmw_bo->base.num_pages, + &par->map); + if (unlikely(ret != 0)) + goto err_unref; + par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); + par->bo_size = fb_size; + + /* + * Fixed and var + */ + strcpy(info->fix.id, "svgadrmfb"); + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + info->fix.type_aux = 0; + info->fix.xpanstep = 1; /* doing it in hw */ + info->fix.ypanstep = 1; /* doing it in hw */ + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_NONE; + info->fix.line_length = fb_pitch; + + info->fix.smem_start = 0; + info->fix.smem_len = fb_size; + + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; + + info->pseudo_palette = par->pseudo_palette; + info->screen_base = par->vmalloc; + info->screen_size = fb_size; + + info->flags = FBINFO_DEFAULT; + info->fbops = &vmw_fb_ops; + + /* 24 depth per default */ + info->var.red.offset = 16; + info->var.green.offset = 8; + info->var.blue.offset = 0; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 0; + info->var.transp.length = 0; + + info->var.xres_virtual = fb_width; + info->var.yres_virtual = fb_height; + info->var.bits_per_pixel = par->bpp; + info->var.xoffset = 0; + info->var.yoffset = 0; + info->var.activate = FB_ACTIVATE_NOW; + info->var.height = -1; + info->var.width = -1; + + info->var.xres = initial_width; + info->var.yres = initial_height; + +#if 0 + info->pixmap.size = 64*1024; + info->pixmap.buf_align = 8; + info->pixmap.access_align = 32; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; +#else + info->pixmap.size = 0; + info->pixmap.buf_align = 8; + info->pixmap.access_align = 32; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; +#endif + + /* + * Dirty & Deferred IO + */ + par->dirty.x1 = par->dirty.x2 = 0; + par->dirty.y1 = par->dirty.y1 = 0; + par->dirty.active = true; + spin_lock_init(&par->dirty.lock); + info->fbdefio = &vmw_defio; + fb_deferred_io_init(info); + + ret = register_framebuffer(info); + if (unlikely(ret != 0)) + goto err_defio; + + return 0; + +err_defio: + fb_deferred_io_cleanup(info); + ttm_bo_kunmap(&par->map); +err_unref: + ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); +err_free: + vfree(par->vmalloc); + framebuffer_release(info); + vmw_priv->fb_info = NULL; + + return ret; +} + +int vmw_fb_close(struct vmw_private *vmw_priv) +{ + struct fb_info *info; + struct vmw_fb_par *par; + struct ttm_buffer_object *bo; + + if (!vmw_priv->fb_info) + return 0; + + info = vmw_priv->fb_info; + par = info->par; + bo = &par->vmw_bo->base; + par->vmw_bo = NULL; + + /* ??? order */ + fb_deferred_io_cleanup(info); + unregister_framebuffer(info); + + ttm_bo_kunmap(&par->map); + ttm_bo_unref(&bo); + + vfree(par->vmalloc); + framebuffer_release(info); + + return 0; +} + +int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, + struct vmw_dma_buffer *vmw_bo) +{ + struct ttm_buffer_object *bo = &vmw_bo->base; + int ret = 0; + + ret = ttm_bo_reserve(bo, false, false, false, 0); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); + ttm_bo_unreserve(bo); + + return ret; +} + +int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, + struct vmw_dma_buffer *vmw_bo) +{ + struct ttm_buffer_object *bo = &vmw_bo->base; + struct ttm_placement ne_placement = vmw_vram_ne_placement; + int ret = 0; + + ne_placement.lpfn = bo->num_pages; + + /* interuptable? */ + ret = ttm_write_lock(&vmw_priv->active_master->lock, false); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_reserve(bo, false, false, false, 0); + if (unlikely(ret != 0)) + goto err_unlock; + + if (vmw_bo->gmr_bound) { + vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id); + spin_lock(&bo->glob->lru_lock); + ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id); + spin_unlock(&bo->glob->lru_lock); + vmw_bo->gmr_bound = NULL; + } + + ret = ttm_bo_validate(bo, &ne_placement, false, false); + ttm_bo_unreserve(bo); +err_unlock: + ttm_write_unlock(&vmw_priv->active_master->lock); + + return ret; +} + +int vmw_fb_off(struct vmw_private *vmw_priv) +{ + struct fb_info *info; + struct vmw_fb_par *par; + unsigned long flags; + + if (!vmw_priv->fb_info) + return -EINVAL; + + info = vmw_priv->fb_info; + par = info->par; + + spin_lock_irqsave(&par->dirty.lock, flags); + par->dirty.active = false; + spin_unlock_irqrestore(&par->dirty.lock, flags); + + flush_scheduled_work(); + + par->bo_ptr = NULL; + ttm_bo_kunmap(&par->map); + + vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo); + + return 0; +} + +int vmw_fb_on(struct vmw_private *vmw_priv) +{ + struct fb_info *info; + struct vmw_fb_par *par; + unsigned long flags; + bool dummy; + int ret; + + if (!vmw_priv->fb_info) + return -EINVAL; + + info = vmw_priv->fb_info; + par = info->par; + + /* we are already active */ + if (par->bo_ptr != NULL) + return 0; + + /* Make sure that all overlays are stoped when we take over */ + vmw_overlay_stop_all(vmw_priv); + + ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo); + if (unlikely(ret != 0)) { + DRM_ERROR("could not move buffer to start of VRAM\n"); + goto err_no_buffer; + } + + ret = ttm_bo_kmap(&par->vmw_bo->base, + 0, + par->vmw_bo->base.num_pages, + &par->map); + BUG_ON(ret != 0); + par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy); + + spin_lock_irqsave(&par->dirty.lock, flags); + par->dirty.active = true; + spin_unlock_irqrestore(&par->dirty.lock, flags); + +err_no_buffer: + vmw_fb_set_par(info); + + vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres); + + /* If there already was stuff dirty we wont + * schedule a new work, so lets do it now */ + schedule_delayed_work(&info->deferred_work, 0); + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c new file mode 100644 index 000000000000..76b0693e2458 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -0,0 +1,521 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "drmP.h" +#include "ttm/ttm_placement.h" + +int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t max; + uint32_t min; + uint32_t dummy; + int ret; + + fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; + fifo->static_buffer = vmalloc(fifo->static_buffer_size); + if (unlikely(fifo->static_buffer == NULL)) + return -ENOMEM; + + fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE; + fifo->last_data_size = 0; + fifo->last_buffer_add = false; + fifo->last_buffer = vmalloc(fifo->last_buffer_size); + if (unlikely(fifo->last_buffer == NULL)) { + ret = -ENOMEM; + goto out_err; + } + + fifo->dynamic_buffer = NULL; + fifo->reserved_size = 0; + fifo->using_bounce_buffer = false; + + init_rwsem(&fifo->rwsem); + + /* + * Allow mapping the first page read-only to user-space. + */ + + DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); + DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); + DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); + + mutex_lock(&dev_priv->hw_mutex); + dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); + dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); + vmw_write(dev_priv, SVGA_REG_ENABLE, 1); + + min = 4; + if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) + min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); + min <<= 2; + + if (min < PAGE_SIZE) + min = PAGE_SIZE; + + iowrite32(min, fifo_mem + SVGA_FIFO_MIN); + iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); + wmb(); + iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD); + iowrite32(min, fifo_mem + SVGA_FIFO_STOP); + iowrite32(0, fifo_mem + SVGA_FIFO_BUSY); + mb(); + + vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); + mutex_unlock(&dev_priv->hw_mutex); + + max = ioread32(fifo_mem + SVGA_FIFO_MAX); + min = ioread32(fifo_mem + SVGA_FIFO_MIN); + fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); + + DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", + (unsigned int) max, + (unsigned int) min, + (unsigned int) fifo->capabilities); + + dev_priv->fence_seq = (uint32_t) -100; + dev_priv->last_read_sequence = (uint32_t) -100; + iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); + + return vmw_fifo_send_fence(dev_priv, &dummy); +out_err: + vfree(fifo->static_buffer); + fifo->static_buffer = NULL; + return ret; +} + +void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + + mutex_lock(&dev_priv->hw_mutex); + + if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { + iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); + vmw_write(dev_priv, SVGA_REG_SYNC, reason); + } + + mutex_unlock(&dev_priv->hw_mutex); +} + +void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + + mutex_lock(&dev_priv->hw_mutex); + + while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) + vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); + + dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); + + vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, + dev_priv->config_done_state); + vmw_write(dev_priv, SVGA_REG_ENABLE, + dev_priv->enable_state); + + mutex_unlock(&dev_priv->hw_mutex); + + if (likely(fifo->last_buffer != NULL)) { + vfree(fifo->last_buffer); + fifo->last_buffer = NULL; + } + + if (likely(fifo->static_buffer != NULL)) { + vfree(fifo->static_buffer); + fifo->static_buffer = NULL; + } + + if (likely(fifo->dynamic_buffer != NULL)) { + vfree(fifo->dynamic_buffer); + fifo->dynamic_buffer = NULL; + } +} + +static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); + uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); + uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); + uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); + + return ((max - next_cmd) + (stop - min) <= bytes); +} + +static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, + uint32_t bytes, bool interruptible, + unsigned long timeout) +{ + int ret = 0; + unsigned long end_jiffies = jiffies + timeout; + DEFINE_WAIT(__wait); + + DRM_INFO("Fifo wait noirq.\n"); + + for (;;) { + prepare_to_wait(&dev_priv->fifo_queue, &__wait, + (interruptible) ? + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + if (!vmw_fifo_is_full(dev_priv, bytes)) + break; + if (time_after_eq(jiffies, end_jiffies)) { + ret = -EBUSY; + DRM_ERROR("SVGA device lockup.\n"); + break; + } + schedule_timeout(1); + if (interruptible && signal_pending(current)) { + ret = -ERESTART; + break; + } + } + finish_wait(&dev_priv->fifo_queue, &__wait); + wake_up_all(&dev_priv->fifo_queue); + DRM_INFO("Fifo noirq exit.\n"); + return ret; +} + +static int vmw_fifo_wait(struct vmw_private *dev_priv, + uint32_t bytes, bool interruptible, + unsigned long timeout) +{ + long ret = 1L; + unsigned long irq_flags; + + if (likely(!vmw_fifo_is_full(dev_priv, bytes))) + return 0; + + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); + if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) + return vmw_fifo_wait_noirq(dev_priv, bytes, + interruptible, timeout); + + mutex_lock(&dev_priv->hw_mutex); + if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + outl(SVGA_IRQFLAG_FIFO_PROGRESS, + dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + vmw_write(dev_priv, SVGA_REG_IRQMASK, + vmw_read(dev_priv, SVGA_REG_IRQMASK) | + SVGA_IRQFLAG_FIFO_PROGRESS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } + mutex_unlock(&dev_priv->hw_mutex); + + if (interruptible) + ret = wait_event_interruptible_timeout + (dev_priv->fifo_queue, + !vmw_fifo_is_full(dev_priv, bytes), timeout); + else + ret = wait_event_timeout + (dev_priv->fifo_queue, + !vmw_fifo_is_full(dev_priv, bytes), timeout); + + if (unlikely(ret == -ERESTARTSYS)) + ret = -ERESTART; + else if (unlikely(ret == 0)) + ret = -EBUSY; + else if (likely(ret > 0)) + ret = 0; + + mutex_lock(&dev_priv->hw_mutex); + if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + vmw_write(dev_priv, SVGA_REG_IRQMASK, + vmw_read(dev_priv, SVGA_REG_IRQMASK) & + ~SVGA_IRQFLAG_FIFO_PROGRESS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } + mutex_unlock(&dev_priv->hw_mutex); + + return ret; +} + +void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) +{ + struct vmw_fifo_state *fifo_state = &dev_priv->fifo; + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t max; + uint32_t min; + uint32_t next_cmd; + uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; + int ret; + + down_write(&fifo_state->rwsem); + max = ioread32(fifo_mem + SVGA_FIFO_MAX); + min = ioread32(fifo_mem + SVGA_FIFO_MIN); + next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); + + if (unlikely(bytes >= (max - min))) + goto out_err; + + BUG_ON(fifo_state->reserved_size != 0); + BUG_ON(fifo_state->dynamic_buffer != NULL); + + fifo_state->reserved_size = bytes; + + while (1) { + uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); + bool need_bounce = false; + bool reserve_in_place = false; + + if (next_cmd >= stop) { + if (likely((next_cmd + bytes < max || + (next_cmd + bytes == max && stop > min)))) + reserve_in_place = true; + + else if (vmw_fifo_is_full(dev_priv, bytes)) { + ret = vmw_fifo_wait(dev_priv, bytes, + false, 3 * HZ); + if (unlikely(ret != 0)) + goto out_err; + } else + need_bounce = true; + + } else { + + if (likely((next_cmd + bytes < stop))) + reserve_in_place = true; + else { + ret = vmw_fifo_wait(dev_priv, bytes, + false, 3 * HZ); + if (unlikely(ret != 0)) + goto out_err; + } + } + + if (reserve_in_place) { + if (reserveable || bytes <= sizeof(uint32_t)) { + fifo_state->using_bounce_buffer = false; + + if (reserveable) + iowrite32(bytes, fifo_mem + + SVGA_FIFO_RESERVED); + return fifo_mem + (next_cmd >> 2); + } else { + need_bounce = true; + } + } + + if (need_bounce) { + fifo_state->using_bounce_buffer = true; + if (bytes < fifo_state->static_buffer_size) + return fifo_state->static_buffer; + else { + fifo_state->dynamic_buffer = vmalloc(bytes); + return fifo_state->dynamic_buffer; + } + } + } +out_err: + fifo_state->reserved_size = 0; + up_write(&fifo_state->rwsem); + return NULL; +} + +static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, + __le32 __iomem *fifo_mem, + uint32_t next_cmd, + uint32_t max, uint32_t min, uint32_t bytes) +{ + uint32_t chunk_size = max - next_cmd; + uint32_t rest; + uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? + fifo_state->dynamic_buffer : fifo_state->static_buffer; + + if (bytes < chunk_size) + chunk_size = bytes; + + iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED); + mb(); + memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size); + rest = bytes - chunk_size; + if (rest) + memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), + rest); +} + +static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, + __le32 __iomem *fifo_mem, + uint32_t next_cmd, + uint32_t max, uint32_t min, uint32_t bytes) +{ + uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? + fifo_state->dynamic_buffer : fifo_state->static_buffer; + + while (bytes > 0) { + iowrite32(*buffer++, fifo_mem + (next_cmd >> 2)); + next_cmd += sizeof(uint32_t); + if (unlikely(next_cmd == max)) + next_cmd = min; + mb(); + iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); + mb(); + bytes -= sizeof(uint32_t); + } +} + +void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) +{ + struct vmw_fifo_state *fifo_state = &dev_priv->fifo; + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); + uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); + uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); + bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; + + BUG_ON((bytes & 3) != 0); + BUG_ON(bytes > fifo_state->reserved_size); + + fifo_state->reserved_size = 0; + + if (fifo_state->using_bounce_buffer) { + if (reserveable) + vmw_fifo_res_copy(fifo_state, fifo_mem, + next_cmd, max, min, bytes); + else + vmw_fifo_slow_copy(fifo_state, fifo_mem, + next_cmd, max, min, bytes); + + if (fifo_state->dynamic_buffer) { + vfree(fifo_state->dynamic_buffer); + fifo_state->dynamic_buffer = NULL; + } + + } + + if (fifo_state->using_bounce_buffer || reserveable) { + next_cmd += bytes; + if (next_cmd >= max) + next_cmd -= max - min; + mb(); + iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); + } + + if (reserveable) + iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); + mb(); + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); + up_write(&fifo_state->rwsem); +} + +int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) +{ + struct vmw_fifo_state *fifo_state = &dev_priv->fifo; + struct svga_fifo_cmd_fence *cmd_fence; + void *fm; + int ret = 0; + uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); + + fm = vmw_fifo_reserve(dev_priv, bytes); + if (unlikely(fm == NULL)) { + down_write(&fifo_state->rwsem); + *sequence = dev_priv->fence_seq; + up_write(&fifo_state->rwsem); + ret = -ENOMEM; + (void)vmw_fallback_wait(dev_priv, false, true, *sequence, + false, 3*HZ); + goto out_err; + } + + do { + *sequence = dev_priv->fence_seq++; + } while (*sequence == 0); + + if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { + + /* + * Don't request hardware to send a fence. The + * waiting code in vmwgfx_irq.c will emulate this. + */ + + vmw_fifo_commit(dev_priv, 0); + return 0; + } + + *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); + cmd_fence = (struct svga_fifo_cmd_fence *) + ((unsigned long)fm + sizeof(__le32)); + + iowrite32(*sequence, &cmd_fence->fence); + fifo_state->last_buffer_add = true; + vmw_fifo_commit(dev_priv, bytes); + fifo_state->last_buffer_add = false; + +out_err: + return ret; +} + +/** + * Map the first page of the FIFO read-only to user-space. + */ + +static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + int ret; + unsigned long address = (unsigned long)vmf->virtual_address; + + if (address != vma->vm_start) + return VM_FAULT_SIGBUS; + + ret = vm_insert_pfn(vma, address, vma->vm_pgoff); + if (likely(ret == -EBUSY || ret == 0)) + return VM_FAULT_NOPAGE; + else if (ret == -ENOMEM) + return VM_FAULT_OOM; + + return VM_FAULT_SIGBUS; +} + +static struct vm_operations_struct vmw_fifo_vm_ops = { + .fault = vmw_fifo_vm_fault, + .open = NULL, + .close = NULL +}; + +int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct vmw_private *dev_priv; + + file_priv = (struct drm_file *)filp->private_data; + dev_priv = vmw_priv(file_priv->minor->dev); + + if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) || + (vma->vm_end - vma->vm_start) != PAGE_SIZE) + return -EINVAL; + + vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED, + vma->vm_page_prot); + vma->vm_ops = &vmw_fifo_vm_ops; + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c new file mode 100644 index 000000000000..5f8908a5d7fd --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -0,0 +1,213 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "drmP.h" +#include "ttm/ttm_bo_driver.h" + +/** + * FIXME: Adjust to the ttm lowmem / highmem storage to minimize + * the number of used descriptors. + */ + +static int vmw_gmr_build_descriptors(struct list_head *desc_pages, + struct page *pages[], + unsigned long num_pages) +{ + struct page *page, *next; + struct svga_guest_mem_descriptor *page_virtual = NULL; + struct svga_guest_mem_descriptor *desc_virtual = NULL; + unsigned int desc_per_page; + unsigned long prev_pfn; + unsigned long pfn; + int ret; + + desc_per_page = PAGE_SIZE / + sizeof(struct svga_guest_mem_descriptor) - 1; + + while (likely(num_pages != 0)) { + page = alloc_page(__GFP_HIGHMEM); + if (unlikely(page == NULL)) { + ret = -ENOMEM; + goto out_err; + } + + list_add_tail(&page->lru, desc_pages); + + /* + * Point previous page terminating descriptor to this + * page before unmapping it. + */ + + if (likely(page_virtual != NULL)) { + desc_virtual->ppn = page_to_pfn(page); + kunmap_atomic(page_virtual, KM_USER0); + } + + page_virtual = kmap_atomic(page, KM_USER0); + desc_virtual = page_virtual - 1; + prev_pfn = ~(0UL); + + while (likely(num_pages != 0)) { + pfn = page_to_pfn(*pages); + + if (pfn != prev_pfn + 1) { + + if (desc_virtual - page_virtual == + desc_per_page - 1) + break; + + (++desc_virtual)->ppn = cpu_to_le32(pfn); + desc_virtual->num_pages = cpu_to_le32(1); + } else { + uint32_t tmp = + le32_to_cpu(desc_virtual->num_pages); + desc_virtual->num_pages = cpu_to_le32(tmp + 1); + } + prev_pfn = pfn; + --num_pages; + ++pages; + } + + (++desc_virtual)->ppn = cpu_to_le32(0); + desc_virtual->num_pages = cpu_to_le32(0); + } + + if (likely(page_virtual != NULL)) + kunmap_atomic(page_virtual, KM_USER0); + + return 0; +out_err: + list_for_each_entry_safe(page, next, desc_pages, lru) { + list_del_init(&page->lru); + __free_page(page); + } + return ret; +} + +static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages) +{ + struct page *page, *next; + + list_for_each_entry_safe(page, next, desc_pages, lru) { + list_del_init(&page->lru); + __free_page(page); + } +} + +static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, + int gmr_id, struct list_head *desc_pages) +{ + struct page *page; + + if (unlikely(list_empty(desc_pages))) + return; + + page = list_entry(desc_pages->next, struct page, lru); + + mutex_lock(&dev_priv->hw_mutex); + + vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); + wmb(); + vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page)); + mb(); + + mutex_unlock(&dev_priv->hw_mutex); + +} + +/** + * FIXME: Adjust to the ttm lowmem / highmem storage to minimize + * the number of used descriptors. + */ + +static unsigned long vmw_gmr_count_descriptors(struct page *pages[], + unsigned long num_pages) +{ + unsigned long prev_pfn = ~(0UL); + unsigned long pfn; + unsigned long descriptors = 0; + + while (num_pages--) { + pfn = page_to_pfn(*pages++); + if (prev_pfn + 1 != pfn) + ++descriptors; + prev_pfn = pfn; + } + + return descriptors; +} + +int vmw_gmr_bind(struct vmw_private *dev_priv, + struct ttm_buffer_object *bo) +{ + struct ttm_tt *ttm = bo->ttm; + unsigned long descriptors; + int ret; + uint32_t id; + struct list_head desc_pages; + + if (!(dev_priv->capabilities & SVGA_CAP_GMR)) + return -EINVAL; + + ret = ttm_tt_populate(ttm); + if (unlikely(ret != 0)) + return ret; + + descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages); + if (unlikely(descriptors > dev_priv->max_gmr_descriptors)) + return -EINVAL; + + INIT_LIST_HEAD(&desc_pages); + ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages, + ttm->num_pages); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_gmr_id_alloc(dev_priv, &id); + if (unlikely(ret != 0)) + goto out_no_id; + + vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages); + vmw_gmr_free_descriptors(&desc_pages); + vmw_dmabuf_set_gmr(bo, id); + return 0; + +out_no_id: + vmw_gmr_free_descriptors(&desc_pages); + return ret; +} + +void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) +{ + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); + wmb(); + vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); + mb(); + mutex_unlock(&dev_priv->hw_mutex); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c new file mode 100644 index 000000000000..5fa6a4ed238a --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -0,0 +1,81 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "vmwgfx_drm.h" + +int vmw_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_vmw_getparam_arg *param = + (struct drm_vmw_getparam_arg *)data; + + switch (param->param) { + case DRM_VMW_PARAM_NUM_STREAMS: + param->value = vmw_overlay_num_overlays(dev_priv); + break; + case DRM_VMW_PARAM_NUM_FREE_STREAMS: + param->value = vmw_overlay_num_free_overlays(dev_priv); + break; + case DRM_VMW_PARAM_3D: + param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0; + break; + case DRM_VMW_PARAM_FIFO_OFFSET: + param->value = dev_priv->mmio_start; + break; + default: + DRM_ERROR("Illegal vmwgfx get param request: %d\n", + param->param); + return -EINVAL; + } + + return 0; +} + +int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_fifo_state *fifo_state = &dev_priv->fifo; + struct drm_vmw_fifo_debug_arg *arg = + (struct drm_vmw_fifo_debug_arg *)data; + __le32 __user *buffer = (__le32 __user *) + (unsigned long)arg->debug_buffer; + + if (unlikely(fifo_state->last_buffer == NULL)) + return -EINVAL; + + if (arg->debug_buffer_size < fifo_state->last_data_size) { + arg->used_size = arg->debug_buffer_size; + arg->did_not_fit = 1; + } else { + arg->used_size = fifo_state->last_data_size; + arg->did_not_fit = 0; + } + return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c new file mode 100644 index 000000000000..9e0f0306eedb --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -0,0 +1,295 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "drmP.h" +#include "vmwgfx_drv.h" + +#define VMW_FENCE_WRAP (1 << 24) + +irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct vmw_private *dev_priv = vmw_priv(dev); + uint32_t status; + + spin_lock(&dev_priv->irq_lock); + status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + spin_unlock(&dev_priv->irq_lock); + + if (status & SVGA_IRQFLAG_ANY_FENCE) + wake_up_all(&dev_priv->fence_queue); + if (status & SVGA_IRQFLAG_FIFO_PROGRESS) + wake_up_all(&dev_priv->fifo_queue); + + if (likely(status)) { + outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) +{ + uint32_t busy; + + mutex_lock(&dev_priv->hw_mutex); + busy = vmw_read(dev_priv, SVGA_REG_BUSY); + mutex_unlock(&dev_priv->hw_mutex); + + return (busy == 0); +} + + +bool vmw_fence_signaled(struct vmw_private *dev_priv, + uint32_t sequence) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + struct vmw_fifo_state *fifo_state; + bool ret; + + if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) + return true; + + dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); + if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) + return true; + + fifo_state = &dev_priv->fifo; + if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && + vmw_fifo_idle(dev_priv, sequence)) + return true; + + /** + * Below is to signal stale fences that have wrapped. + * First, block fence submission. + */ + + down_read(&fifo_state->rwsem); + + /** + * Then check if the sequence is higher than what we've actually + * emitted. Then the fence is stale and signaled. + */ + + ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); + up_read(&fifo_state->rwsem); + + return ret; +} + +int vmw_fallback_wait(struct vmw_private *dev_priv, + bool lazy, + bool fifo_idle, + uint32_t sequence, + bool interruptible, + unsigned long timeout) +{ + struct vmw_fifo_state *fifo_state = &dev_priv->fifo; + + uint32_t count = 0; + uint32_t signal_seq; + int ret; + unsigned long end_jiffies = jiffies + timeout; + bool (*wait_condition)(struct vmw_private *, uint32_t); + DEFINE_WAIT(__wait); + + wait_condition = (fifo_idle) ? &vmw_fifo_idle : + &vmw_fence_signaled; + + /** + * Block command submission while waiting for idle. + */ + + if (fifo_idle) + down_read(&fifo_state->rwsem); + signal_seq = dev_priv->fence_seq; + ret = 0; + + for (;;) { + prepare_to_wait(&dev_priv->fence_queue, &__wait, + (interruptible) ? + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + if (wait_condition(dev_priv, sequence)) + break; + if (time_after_eq(jiffies, end_jiffies)) { + DRM_ERROR("SVGA device lockup.\n"); + break; + } + if (lazy) + schedule_timeout(1); + else if ((++count & 0x0F) == 0) { + /** + * FIXME: Use schedule_hr_timeout here for + * newer kernels and lower CPU utilization. + */ + + __set_current_state(TASK_RUNNING); + schedule(); + __set_current_state((interruptible) ? + TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); + } + if (interruptible && signal_pending(current)) { + ret = -ERESTART; + break; + } + } + finish_wait(&dev_priv->fence_queue, &__wait); + if (ret == 0 && fifo_idle) { + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); + } + wake_up_all(&dev_priv->fence_queue); + if (fifo_idle) + up_read(&fifo_state->rwsem); + + return ret; +} + +int vmw_wait_fence(struct vmw_private *dev_priv, + bool lazy, uint32_t sequence, + bool interruptible, unsigned long timeout) +{ + long ret; + unsigned long irq_flags; + struct vmw_fifo_state *fifo = &dev_priv->fifo; + + if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) + return 0; + + if (likely(vmw_fence_signaled(dev_priv, sequence))) + return 0; + + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); + + if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) + return vmw_fallback_wait(dev_priv, lazy, true, sequence, + interruptible, timeout); + + if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) + return vmw_fallback_wait(dev_priv, lazy, false, sequence, + interruptible, timeout); + + mutex_lock(&dev_priv->hw_mutex); + if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) { + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + outl(SVGA_IRQFLAG_ANY_FENCE, + dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + vmw_write(dev_priv, SVGA_REG_IRQMASK, + vmw_read(dev_priv, SVGA_REG_IRQMASK) | + SVGA_IRQFLAG_ANY_FENCE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } + mutex_unlock(&dev_priv->hw_mutex); + + if (interruptible) + ret = wait_event_interruptible_timeout + (dev_priv->fence_queue, + vmw_fence_signaled(dev_priv, sequence), + timeout); + else + ret = wait_event_timeout + (dev_priv->fence_queue, + vmw_fence_signaled(dev_priv, sequence), + timeout); + + if (unlikely(ret == -ERESTARTSYS)) + ret = -ERESTART; + else if (unlikely(ret == 0)) + ret = -EBUSY; + else if (likely(ret > 0)) + ret = 0; + + mutex_lock(&dev_priv->hw_mutex); + if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) { + spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); + vmw_write(dev_priv, SVGA_REG_IRQMASK, + vmw_read(dev_priv, SVGA_REG_IRQMASK) & + ~SVGA_IRQFLAG_ANY_FENCE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); + } + mutex_unlock(&dev_priv->hw_mutex); + + return ret; +} + +void vmw_irq_preinstall(struct drm_device *dev) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + uint32_t status; + + if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) + return; + + spin_lock_init(&dev_priv->irq_lock); + status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); +} + +int vmw_irq_postinstall(struct drm_device *dev) +{ + return 0; +} + +void vmw_irq_uninstall(struct drm_device *dev) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + uint32_t status; + + if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) + return; + + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); + mutex_unlock(&dev_priv->hw_mutex); + + status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); + outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); +} + +#define VMW_FENCE_WAIT_TIMEOUT 3*HZ; + +int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_fence_wait_arg *arg = + (struct drm_vmw_fence_wait_arg *)data; + unsigned long timeout; + + if (!arg->cookie_valid) { + arg->cookie_valid = 1; + arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT; + } + + timeout = jiffies; + if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) + return -EBUSY; + + timeout = (unsigned long)arg->kernel_cookie - timeout; + return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c new file mode 100644 index 000000000000..e9403be446fe --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -0,0 +1,872 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_kms.h" + +/* Might need a hrtimer here? */ +#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) + + +void vmw_display_unit_cleanup(struct vmw_display_unit *du) +{ + if (du->cursor_surface) + vmw_surface_unreference(&du->cursor_surface); + if (du->cursor_dmabuf) + vmw_dmabuf_unreference(&du->cursor_dmabuf); + drm_crtc_cleanup(&du->crtc); + drm_encoder_cleanup(&du->encoder); + drm_connector_cleanup(&du->connector); +} + +/* + * Display Unit Cursor functions + */ + +int vmw_cursor_update_image(struct vmw_private *dev_priv, + u32 *image, u32 width, u32 height, + u32 hotspotX, u32 hotspotY) +{ + struct { + u32 cmd; + SVGAFifoCmdDefineAlphaCursor cursor; + } *cmd; + u32 image_size = width * height * 4; + u32 cmd_size = sizeof(*cmd) + image_size; + + if (!image) + return -EINVAL; + + cmd = vmw_fifo_reserve(dev_priv, cmd_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + return -ENOMEM; + } + + memset(cmd, 0, sizeof(*cmd)); + + memcpy(&cmd[1], image, image_size); + + cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); + cmd->cursor.id = cpu_to_le32(0); + cmd->cursor.width = cpu_to_le32(width); + cmd->cursor.height = cpu_to_le32(height); + cmd->cursor.hotspotX = cpu_to_le32(hotspotX); + cmd->cursor.hotspotY = cpu_to_le32(hotspotY); + + vmw_fifo_commit(dev_priv, cmd_size); + + return 0; +} + +void vmw_cursor_update_position(struct vmw_private *dev_priv, + bool show, int x, int y) +{ + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t count; + + iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); + iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X); + iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y); + count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT); + iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); +} + +int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height) +{ + struct vmw_private *dev_priv = vmw_priv(crtc->dev); + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct vmw_surface *surface = NULL; + struct vmw_dma_buffer *dmabuf = NULL; + int ret; + + if (handle) { + ret = vmw_user_surface_lookup(dev_priv, tfile, + handle, &surface); + if (!ret) { + if (!surface->snooper.image) { + DRM_ERROR("surface not suitable for cursor\n"); + return -EINVAL; + } + } else { + ret = vmw_user_dmabuf_lookup(tfile, + handle, &dmabuf); + if (ret) { + DRM_ERROR("failed to find surface or dmabuf: %i\n", ret); + return -EINVAL; + } + } + } + + /* takedown old cursor */ + if (du->cursor_surface) { + du->cursor_surface->snooper.crtc = NULL; + vmw_surface_unreference(&du->cursor_surface); + } + if (du->cursor_dmabuf) + vmw_dmabuf_unreference(&du->cursor_dmabuf); + + /* setup new image */ + if (surface) { + /* vmw_user_surface_lookup takes one reference */ + du->cursor_surface = surface; + + du->cursor_surface->snooper.crtc = crtc; + du->cursor_age = du->cursor_surface->snooper.age; + vmw_cursor_update_image(dev_priv, surface->snooper.image, + 64, 64, du->hotspot_x, du->hotspot_y); + } else if (dmabuf) { + struct ttm_bo_kmap_obj map; + unsigned long kmap_offset; + unsigned long kmap_num; + void *virtual; + bool dummy; + + /* vmw_user_surface_lookup takes one reference */ + du->cursor_dmabuf = dmabuf; + + kmap_offset = 0; + kmap_num = (64*64*4) >> PAGE_SHIFT; + + ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); + if (unlikely(ret != 0)) { + DRM_ERROR("reserve failed\n"); + return -EINVAL; + } + + ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); + if (unlikely(ret != 0)) + goto err_unreserve; + + virtual = ttm_kmap_obj_virtual(&map, &dummy); + vmw_cursor_update_image(dev_priv, virtual, 64, 64, + du->hotspot_x, du->hotspot_y); + + ttm_bo_kunmap(&map); +err_unreserve: + ttm_bo_unreserve(&dmabuf->base); + + } else { + vmw_cursor_update_position(dev_priv, false, 0, 0); + return 0; + } + + vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y); + + return 0; +} + +int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct vmw_private *dev_priv = vmw_priv(crtc->dev); + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false; + + du->cursor_x = x + crtc->x; + du->cursor_y = y + crtc->y; + + vmw_cursor_update_position(dev_priv, shown, + du->cursor_x, du->cursor_y); + + return 0; +} + +void vmw_kms_cursor_snoop(struct vmw_surface *srf, + struct ttm_object_file *tfile, + struct ttm_buffer_object *bo, + SVGA3dCmdHeader *header) +{ + struct ttm_bo_kmap_obj map; + unsigned long kmap_offset; + unsigned long kmap_num; + SVGA3dCopyBox *box; + unsigned box_count; + void *virtual; + bool dummy; + struct vmw_dma_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA dma; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_dma_cmd, header); + + /* No snooper installed */ + if (!srf->snooper.image) + return; + + if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { + DRM_ERROR("face and mipmap for cursors should never != 0\n"); + return; + } + + if (cmd->header.size < 64) { + DRM_ERROR("at least one full copy box must be given\n"); + return; + } + + box = (SVGA3dCopyBox *)&cmd[1]; + box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / + sizeof(SVGA3dCopyBox); + + if (cmd->dma.guest.pitch != (64 * 4) || + cmd->dma.guest.ptr.offset % PAGE_SIZE || + box->x != 0 || box->y != 0 || box->z != 0 || + box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || + box->w != 64 || box->h != 64 || box->d != 1 || + box_count != 1) { + /* TODO handle none page aligned offsets */ + /* TODO handle partial uploads and pitch != 256 */ + /* TODO handle more then one copy (size != 64) */ + DRM_ERROR("lazy programer, cant handle wierd stuff\n"); + return; + } + + kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; + kmap_num = (64*64*4) >> PAGE_SHIFT; + + ret = ttm_bo_reserve(bo, true, false, false, 0); + if (unlikely(ret != 0)) { + DRM_ERROR("reserve failed\n"); + return; + } + + ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); + if (unlikely(ret != 0)) + goto err_unreserve; + + virtual = ttm_kmap_obj_virtual(&map, &dummy); + + memcpy(srf->snooper.image, virtual, 64*64*4); + srf->snooper.age++; + + /* we can't call this function from this function since execbuf has + * reserved fifo space. + * + * if (srf->snooper.crtc) + * vmw_ldu_crtc_cursor_update_image(dev_priv, + * srf->snooper.image, 64, 64, + * du->hotspot_x, du->hotspot_y); + */ + + ttm_bo_kunmap(&map); +err_unreserve: + ttm_bo_unreserve(bo); +} + +void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct vmw_display_unit *du; + struct drm_crtc *crtc; + + mutex_lock(&dev->mode_config.mutex); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + du = vmw_crtc_to_du(crtc); + if (!du->cursor_surface || + du->cursor_age == du->cursor_surface->snooper.age) + continue; + + du->cursor_age = du->cursor_surface->snooper.age; + vmw_cursor_update_image(dev_priv, + du->cursor_surface->snooper.image, + 64, 64, du->hotspot_x, du->hotspot_y); + } + + mutex_unlock(&dev->mode_config.mutex); +} + +/* + * Generic framebuffer code + */ + +int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + if (handle) + handle = 0; + + return 0; +} + +/* + * Surface framebuffer code + */ + +#define vmw_framebuffer_to_vfbs(x) \ + container_of(x, struct vmw_framebuffer_surface, base.base) + +struct vmw_framebuffer_surface { + struct vmw_framebuffer base; + struct vmw_surface *surface; + struct delayed_work d_work; + struct mutex work_lock; + bool present_fs; +}; + +void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) +{ + struct vmw_framebuffer_surface *vfb = + vmw_framebuffer_to_vfbs(framebuffer); + + cancel_delayed_work_sync(&vfb->d_work); + drm_framebuffer_cleanup(framebuffer); + vmw_surface_unreference(&vfb->surface); + + kfree(framebuffer); +} + +static void vmw_framebuffer_present_fs_callback(struct work_struct *work) +{ + struct delayed_work *d_work = + container_of(work, struct delayed_work, work); + struct vmw_framebuffer_surface *vfbs = + container_of(d_work, struct vmw_framebuffer_surface, d_work); + struct vmw_surface *surf = vfbs->surface; + struct drm_framebuffer *framebuffer = &vfbs->base.base; + struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdPresent body; + SVGA3dCopyRect cr; + } *cmd; + + mutex_lock(&vfbs->work_lock); + if (!vfbs->present_fs) + goto out_unlock; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) + goto out_resched; + + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); + cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr)); + cmd->body.sid = cpu_to_le32(surf->res.id); + cmd->cr.x = cpu_to_le32(0); + cmd->cr.y = cpu_to_le32(0); + cmd->cr.srcx = cmd->cr.x; + cmd->cr.srcy = cmd->cr.y; + cmd->cr.w = cpu_to_le32(framebuffer->width); + cmd->cr.h = cpu_to_le32(framebuffer->height); + vfbs->present_fs = false; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); +out_resched: + /** + * Will not re-add if already pending. + */ + schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); +out_unlock: + mutex_unlock(&vfbs->work_lock); +} + + +int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips) +{ + struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct vmw_framebuffer_surface *vfbs = + vmw_framebuffer_to_vfbs(framebuffer); + struct vmw_surface *surf = vfbs->surface; + struct drm_clip_rect norect; + SVGA3dCopyRect *cr; + int i, inc = 1; + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdPresent body; + SVGA3dCopyRect cr; + } *cmd; + + if (!num_clips || + !(dev_priv->fifo.capabilities & + SVGA_FIFO_CAP_SCREEN_OBJECT)) { + int ret; + + mutex_lock(&vfbs->work_lock); + vfbs->present_fs = true; + ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); + mutex_unlock(&vfbs->work_lock); + if (ret) { + /** + * No work pending, Force immediate present. + */ + vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); + } + return 0; + } + + if (!num_clips) { + num_clips = 1; + clips = &norect; + norect.x1 = norect.y1 = 0; + norect.x2 = framebuffer->width; + norect.y2 = framebuffer->height; + } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { + num_clips /= 2; + inc = 2; /* skip source rects */ + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + return -ENOMEM; + } + + memset(cmd, 0, sizeof(*cmd)); + + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); + cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr)); + cmd->body.sid = cpu_to_le32(surf->res.id); + + for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) { + cr->x = cpu_to_le16(clips->x1); + cr->y = cpu_to_le16(clips->y1); + cr->srcx = cr->x; + cr->srcy = cr->y; + cr->w = cpu_to_le16(clips->x2 - clips->x1); + cr->h = cpu_to_le16(clips->y2 - clips->y1); + } + + vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); + + return 0; +} + +static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { + .destroy = vmw_framebuffer_surface_destroy, + .dirty = vmw_framebuffer_surface_dirty, + .create_handle = vmw_framebuffer_create_handle, +}; + +int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, + struct vmw_surface *surface, + struct vmw_framebuffer **out, + unsigned width, unsigned height) + +{ + struct drm_device *dev = dev_priv->dev; + struct vmw_framebuffer_surface *vfbs; + int ret; + + vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); + if (!vfbs) { + ret = -ENOMEM; + goto out_err1; + } + + ret = drm_framebuffer_init(dev, &vfbs->base.base, + &vmw_framebuffer_surface_funcs); + if (ret) + goto out_err2; + + if (!vmw_surface_reference(surface)) { + DRM_ERROR("failed to reference surface %p\n", surface); + goto out_err3; + } + + /* XXX get the first 3 from the surface info */ + vfbs->base.base.bits_per_pixel = 32; + vfbs->base.base.pitch = width * 32 / 4; + vfbs->base.base.depth = 24; + vfbs->base.base.width = width; + vfbs->base.base.height = height; + vfbs->base.pin = NULL; + vfbs->base.unpin = NULL; + vfbs->surface = surface; + mutex_init(&vfbs->work_lock); + INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); + *out = &vfbs->base; + + return 0; + +out_err3: + drm_framebuffer_cleanup(&vfbs->base.base); +out_err2: + kfree(vfbs); +out_err1: + return ret; +} + +/* + * Dmabuf framebuffer code + */ + +#define vmw_framebuffer_to_vfbd(x) \ + container_of(x, struct vmw_framebuffer_dmabuf, base.base) + +struct vmw_framebuffer_dmabuf { + struct vmw_framebuffer base; + struct vmw_dma_buffer *buffer; +}; + +void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) +{ + struct vmw_framebuffer_dmabuf *vfbd = + vmw_framebuffer_to_vfbd(framebuffer); + + drm_framebuffer_cleanup(framebuffer); + vmw_dmabuf_unreference(&vfbd->buffer); + + kfree(vfbd); +} + +int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, + unsigned flags, unsigned color, + struct drm_clip_rect *clips, + unsigned num_clips) +{ + struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct drm_clip_rect norect; + struct { + uint32_t header; + SVGAFifoCmdUpdate body; + } *cmd; + int i, increment = 1; + + if (!num_clips || + !(dev_priv->fifo.capabilities & + SVGA_FIFO_CAP_SCREEN_OBJECT)) { + num_clips = 1; + clips = &norect; + norect.x1 = norect.y1 = 0; + norect.x2 = framebuffer->width; + norect.y2 = framebuffer->height; + } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { + num_clips /= 2; + increment = 2; + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + return -ENOMEM; + } + + for (i = 0; i < num_clips; i++, clips += increment) { + cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); + cmd[i].body.x = cpu_to_le32(clips[i].x1); + cmd[i].body.y = cpu_to_le32(clips[i].y1); + cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); + cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); + } + + vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); + + return 0; +} + +static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { + .destroy = vmw_framebuffer_dmabuf_destroy, + .dirty = vmw_framebuffer_dmabuf_dirty, + .create_handle = vmw_framebuffer_create_handle, +}; + +static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) +{ + struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); + struct vmw_framebuffer_dmabuf *vfbd = + vmw_framebuffer_to_vfbd(&vfb->base); + int ret; + + vmw_overlay_pause_all(dev_priv); + + ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); + + if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { + vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); + vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); + vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + + vmw_write(dev_priv, SVGA_REG_ENABLE, 1); + vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); + vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); + vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); + vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); + vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); + vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); + vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); + } else + WARN_ON(true); + + vmw_overlay_resume_all(dev_priv); + + return 0; +} + +static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) +{ + struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); + struct vmw_framebuffer_dmabuf *vfbd = + vmw_framebuffer_to_vfbd(&vfb->base); + + if (!vfbd->buffer) { + WARN_ON(!vfbd->buffer); + return 0; + } + + return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); +} + +int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, + struct vmw_dma_buffer *dmabuf, + struct vmw_framebuffer **out, + unsigned width, unsigned height) + +{ + struct drm_device *dev = dev_priv->dev; + struct vmw_framebuffer_dmabuf *vfbd; + int ret; + + vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); + if (!vfbd) { + ret = -ENOMEM; + goto out_err1; + } + + ret = drm_framebuffer_init(dev, &vfbd->base.base, + &vmw_framebuffer_dmabuf_funcs); + if (ret) + goto out_err2; + + if (!vmw_dmabuf_reference(dmabuf)) { + DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); + goto out_err3; + } + + /* XXX get the first 3 from the surface info */ + vfbd->base.base.bits_per_pixel = 32; + vfbd->base.base.pitch = width * 32 / 4; + vfbd->base.base.depth = 24; + vfbd->base.base.width = width; + vfbd->base.base.height = height; + vfbd->base.pin = vmw_framebuffer_dmabuf_pin; + vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; + vfbd->buffer = dmabuf; + *out = &vfbd->base; + + return 0; + +out_err3: + drm_framebuffer_cleanup(&vfbd->base.base); +out_err2: + kfree(vfbd); +out_err1: + return ret; +} + +/* + * Generic Kernel modesetting functions + */ + +static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd *mode_cmd) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_framebuffer *vfb = NULL; + struct vmw_surface *surface = NULL; + struct vmw_dma_buffer *bo = NULL; + int ret; + + ret = vmw_user_surface_lookup(dev_priv, tfile, + mode_cmd->handle, &surface); + if (ret) + goto try_dmabuf; + + ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, + mode_cmd->width, mode_cmd->height); + + /* vmw_user_surface_lookup takes one ref so does new_fb */ + vmw_surface_unreference(&surface); + + if (ret) { + DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); + return NULL; + } + return &vfb->base; + +try_dmabuf: + DRM_INFO("%s: trying buffer\n", __func__); + + ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo); + if (ret) { + DRM_ERROR("failed to find buffer: %i\n", ret); + return NULL; + } + + ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, + mode_cmd->width, mode_cmd->height); + + /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ + vmw_dmabuf_unreference(&bo); + + if (ret) { + DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); + return NULL; + } + + return &vfb->base; +} + +static int vmw_kms_fb_changed(struct drm_device *dev) +{ + return 0; +} + +static struct drm_mode_config_funcs vmw_kms_funcs = { + .fb_create = vmw_kms_fb_create, + .fb_changed = vmw_kms_fb_changed, +}; + +int vmw_kms_init(struct vmw_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + int ret; + + drm_mode_config_init(dev); + dev->mode_config.funcs = &vmw_kms_funcs; + dev->mode_config.min_width = 640; + dev->mode_config.min_height = 480; + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + + ret = vmw_kms_init_legacy_display_system(dev_priv); + + return 0; +} + +int vmw_kms_close(struct vmw_private *dev_priv) +{ + /* + * Docs says we should take the lock before calling this function + * but since it destroys encoders and our destructor calls + * drm_encoder_cleanup which takes the lock we deadlock. + */ + drm_mode_config_cleanup(dev_priv->dev); + vmw_kms_close_legacy_display_system(dev_priv); + return 0; +} + +int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_cursor_bypass_arg *arg = data; + struct vmw_display_unit *du; + struct drm_mode_object *obj; + struct drm_crtc *crtc; + int ret = 0; + + + mutex_lock(&dev->mode_config.mutex); + if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + du = vmw_crtc_to_du(crtc); + du->hotspot_x = arg->xhot; + du->hotspot_y = arg->yhot; + } + + mutex_unlock(&dev->mode_config.mutex); + return 0; + } + + obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) { + ret = -EINVAL; + goto out; + } + + crtc = obj_to_crtc(obj); + du = vmw_crtc_to_du(crtc); + + du->hotspot_x = arg->xhot; + du->hotspot_y = arg->yhot; + +out: + mutex_unlock(&dev->mode_config.mutex); + + return ret; +} + +int vmw_kms_save_vga(struct vmw_private *vmw_priv) +{ + /* + * setup a single multimon monitor with the size + * of 0x0, this stops the UI from resizing when we + * change the framebuffer size + */ + if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { + vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); + vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + } + + vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); + vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); + vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); + vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); + vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); + vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); + vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); + vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); + + return 0; +} + +int vmw_kms_restore_vga(struct vmw_private *vmw_priv) +{ + vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); + vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); + vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); + vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); + vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); + vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); + vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); + vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); + + /* TODO check for multimon */ + vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h new file mode 100644 index 000000000000..8b95249f0531 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -0,0 +1,102 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef VMWGFX_KMS_H_ +#define VMWGFX_KMS_H_ + +#include "drmP.h" +#include "vmwgfx_drv.h" + + +#define vmw_framebuffer_to_vfb(x) \ + container_of(x, struct vmw_framebuffer, base) + +/** + * Base class for framebuffers + * + * @pin is called the when ever a crtc uses this framebuffer + * @unpin is called + */ +struct vmw_framebuffer { + struct drm_framebuffer base; + int (*pin)(struct vmw_framebuffer *fb); + int (*unpin)(struct vmw_framebuffer *fb); +}; + + +#define vmw_crtc_to_du(x) \ + container_of(x, struct vmw_display_unit, crtc) + +/* + * Basic cursor manipulation + */ +int vmw_cursor_update_image(struct vmw_private *dev_priv, + u32 *image, u32 width, u32 height, + u32 hotspotX, u32 hotspotY); +void vmw_cursor_update_position(struct vmw_private *dev_priv, + bool show, int x, int y); + +/** + * Base class display unit. + * + * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector + * so the display unit is all of them at the same time. This is true for both + * legacy multimon and screen objects. + */ +struct vmw_display_unit { + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; + + struct vmw_surface *cursor_surface; + struct vmw_dma_buffer *cursor_dmabuf; + size_t cursor_age; + + int cursor_x; + int cursor_y; + + int hotspot_x; + int hotspot_y; + + unsigned unit; +}; + +/* + * Shared display unit functions - vmwgfx_kms.c + */ +void vmw_display_unit_cleanup(struct vmw_display_unit *du); +int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height); +int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); + +/* + * Legacy display unit functions - vmwgfx_ldu.h + */ +int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); +int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c new file mode 100644 index 000000000000..90891593bf6c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -0,0 +1,516 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_kms.h" + +#define vmw_crtc_to_ldu(x) \ + container_of(x, struct vmw_legacy_display_unit, base.crtc) +#define vmw_encoder_to_ldu(x) \ + container_of(x, struct vmw_legacy_display_unit, base.encoder) +#define vmw_connector_to_ldu(x) \ + container_of(x, struct vmw_legacy_display_unit, base.connector) + +struct vmw_legacy_display { + struct list_head active; + + unsigned num_active; + + struct vmw_framebuffer *fb; +}; + +/** + * Display unit using the legacy register interface. + */ +struct vmw_legacy_display_unit { + struct vmw_display_unit base; + + struct list_head active; + + unsigned unit; +}; + +static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) +{ + list_del_init(&ldu->active); + vmw_display_unit_cleanup(&ldu->base); + kfree(ldu); +} + + +/* + * Legacy Display Unit CRTC functions + */ + +static void vmw_ldu_crtc_save(struct drm_crtc *crtc) +{ +} + +static void vmw_ldu_crtc_restore(struct drm_crtc *crtc) +{ +} + +static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc, + u16 *r, u16 *g, u16 *b, + uint32_t size) +{ +} + +static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) +{ + vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); +} + +static int vmw_ldu_commit_list(struct vmw_private *dev_priv) +{ + struct vmw_legacy_display *lds = dev_priv->ldu_priv; + struct vmw_legacy_display_unit *entry; + struct drm_crtc *crtc; + int i = 0; + + /* to stop the screen from changing size on resize */ + vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); + for (i = 0; i < lds->num_active; i++) { + vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); + vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); + vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); + vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + } + + /* Now set the mode */ + vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); + i = 0; + list_for_each_entry(entry, &lds->active, active) { + crtc = &entry->base.crtc; + + vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); + vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); + vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x); + vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y); + vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay); + vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay); + vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + + i++; + } + + return 0; +} + +static int vmw_ldu_del_active(struct vmw_private *vmw_priv, + struct vmw_legacy_display_unit *ldu) +{ + struct vmw_legacy_display *ld = vmw_priv->ldu_priv; + if (list_empty(&ldu->active)) + return 0; + + list_del_init(&ldu->active); + if (--(ld->num_active) == 0) { + BUG_ON(!ld->fb); + if (ld->fb->unpin) + ld->fb->unpin(ld->fb); + ld->fb = NULL; + } + + return 0; +} + +static int vmw_ldu_add_active(struct vmw_private *vmw_priv, + struct vmw_legacy_display_unit *ldu, + struct vmw_framebuffer *vfb) +{ + struct vmw_legacy_display *ld = vmw_priv->ldu_priv; + struct vmw_legacy_display_unit *entry; + struct list_head *at; + + if (!list_empty(&ldu->active)) + return 0; + + at = &ld->active; + list_for_each_entry(entry, &ld->active, active) { + if (entry->unit > ldu->unit) + break; + + at = &entry->active; + } + + list_add(&ldu->active, at); + if (ld->num_active++ == 0) { + BUG_ON(ld->fb); + if (vfb->pin) + vfb->pin(vfb); + ld->fb = vfb; + } + + return 0; +} + +static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) +{ + struct vmw_private *dev_priv; + struct vmw_legacy_display_unit *ldu; + struct drm_connector *connector; + struct drm_display_mode *mode; + struct drm_encoder *encoder; + struct vmw_framebuffer *vfb; + struct drm_framebuffer *fb; + struct drm_crtc *crtc; + + if (!set) + return -EINVAL; + + if (!set->crtc) + return -EINVAL; + + /* get the ldu */ + crtc = set->crtc; + ldu = vmw_crtc_to_ldu(crtc); + vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; + dev_priv = vmw_priv(crtc->dev); + + if (set->num_connectors > 1) { + DRM_ERROR("to many connectors\n"); + return -EINVAL; + } + + if (set->num_connectors == 1 && + set->connectors[0] != &ldu->base.connector) { + DRM_ERROR("connector doesn't match %p %p\n", + set->connectors[0], &ldu->base.connector); + return -EINVAL; + } + + /* ldu only supports one fb active at the time */ + if (dev_priv->ldu_priv->fb && vfb && + dev_priv->ldu_priv->fb != vfb) { + DRM_ERROR("Multiple framebuffers not supported\n"); + return -EINVAL; + } + + /* since they always map one to one these are safe */ + connector = &ldu->base.connector; + encoder = &ldu->base.encoder; + + /* should we turn the crtc off? */ + if (set->num_connectors == 0 || !set->mode || !set->fb) { + + connector->encoder = NULL; + encoder->crtc = NULL; + crtc->fb = NULL; + + vmw_ldu_del_active(dev_priv, ldu); + + vmw_ldu_commit_list(dev_priv); + + return 0; + } + + + /* we now know we want to set a mode */ + mode = set->mode; + fb = set->fb; + + if (set->x + mode->hdisplay > fb->width || + set->y + mode->vdisplay > fb->height) { + DRM_ERROR("set outside of framebuffer\n"); + return -EINVAL; + } + + vmw_fb_off(dev_priv); + + crtc->fb = fb; + encoder->crtc = crtc; + connector->encoder = encoder; + crtc->x = set->x; + crtc->y = set->y; + crtc->mode = *mode; + + vmw_ldu_add_active(dev_priv, ldu, vfb); + + vmw_ldu_commit_list(dev_priv); + + return 0; +} + +static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { + .save = vmw_ldu_crtc_save, + .restore = vmw_ldu_crtc_restore, + .cursor_set = vmw_du_crtc_cursor_set, + .cursor_move = vmw_du_crtc_cursor_move, + .gamma_set = vmw_ldu_crtc_gamma_set, + .destroy = vmw_ldu_crtc_destroy, + .set_config = vmw_ldu_crtc_set_config, +}; + +/* + * Legacy Display Unit encoder functions + */ + +static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder) +{ + vmw_ldu_destroy(vmw_encoder_to_ldu(encoder)); +} + +static struct drm_encoder_funcs vmw_legacy_encoder_funcs = { + .destroy = vmw_ldu_encoder_destroy, +}; + +/* + * Legacy Display Unit connector functions + */ + +static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode) +{ +} + +static void vmw_ldu_connector_save(struct drm_connector *connector) +{ +} + +static void vmw_ldu_connector_restore(struct drm_connector *connector) +{ +} + +static enum drm_connector_status + vmw_ldu_connector_detect(struct drm_connector *connector) +{ + /* XXX vmwctrl should control connection status */ + if (vmw_connector_to_ldu(connector)->base.unit == 0) + return connector_status_connected; + return connector_status_disconnected; +} + +static struct drm_display_mode vmw_ldu_connector_builtin[] = { + /* 640x480@60Hz */ + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 489, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 800x600@60Hz */ + { DRM_MODE("800x600", + DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, + 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@60Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, + 1184, 1344, 0, 768, 771, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1152x864@75Hz */ + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, + 1344, 1600, 0, 864, 865, 868, 900, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x768@60Hz */ + { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, + 1472, 1664, 0, 768, 771, 778, 798, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x800@60Hz */ + { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, + 1480, 1680, 0, 800, 803, 809, 831, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 1280x960@60Hz */ + { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, + 1488, 1800, 0, 960, 961, 964, 1000, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x1024@60Hz */ + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, + 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1360x768@60Hz */ + { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, + 1536, 1792, 0, 768, 771, 777, 795, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1440x1050@60Hz */ + { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, + 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1440x900@60Hz */ + { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, + 1672, 1904, 0, 900, 903, 909, 934, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1600x1200@60Hz */ + { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, + 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1680x1050@60Hz */ + { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, + 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1792x1344@60Hz */ + { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, + 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1853x1392@60Hz */ + { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, + 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1200@60Hz */ + { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, + 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1440@60Hz */ + { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, + 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 2560x1600@60Hz */ + { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, + 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* Terminate */ + { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, +}; + +static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, + uint32_t max_width, uint32_t max_height) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode = NULL; + int i; + + for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { + if (vmw_ldu_connector_builtin[i].hdisplay > max_width || + vmw_ldu_connector_builtin[i].vdisplay > max_height) + continue; + + mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]); + if (!mode) + return 0; + mode->vrefresh = drm_mode_vrefresh(mode); + + drm_mode_probed_add(connector, mode); + } + + drm_mode_connector_list_update(connector); + + return 1; +} + +static int vmw_ldu_connector_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void vmw_ldu_connector_destroy(struct drm_connector *connector) +{ + vmw_ldu_destroy(vmw_connector_to_ldu(connector)); +} + +static struct drm_connector_funcs vmw_legacy_connector_funcs = { + .dpms = vmw_ldu_connector_dpms, + .save = vmw_ldu_connector_save, + .restore = vmw_ldu_connector_restore, + .detect = vmw_ldu_connector_detect, + .fill_modes = vmw_ldu_connector_fill_modes, + .set_property = vmw_ldu_connector_set_property, + .destroy = vmw_ldu_connector_destroy, +}; + +static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) +{ + struct vmw_legacy_display_unit *ldu; + struct drm_device *dev = dev_priv->dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + ldu = kzalloc(sizeof(*ldu), GFP_KERNEL); + if (!ldu) + return -ENOMEM; + + ldu->unit = unit; + crtc = &ldu->base.crtc; + encoder = &ldu->base.encoder; + connector = &ldu->base.connector; + + drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + /* Initial status */ + if (unit == 0) + connector->status = connector_status_connected; + else + connector->status = connector_status_disconnected; + + drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + drm_mode_connector_attach_encoder(connector, encoder); + encoder->possible_crtcs = (1 << unit); + encoder->possible_clones = 0; + + INIT_LIST_HEAD(&ldu->active); + + drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); + + drm_connector_attach_property(connector, + dev->mode_config.dirty_info_property, + 1); + + return 0; +} + +int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) +{ + if (dev_priv->ldu_priv) { + DRM_INFO("ldu system already on\n"); + return -EINVAL; + } + + dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); + + if (!dev_priv->ldu_priv) + return -ENOMEM; + + INIT_LIST_HEAD(&dev_priv->ldu_priv->active); + dev_priv->ldu_priv->num_active = 0; + dev_priv->ldu_priv->fb = NULL; + + drm_mode_create_dirty_info_property(dev_priv->dev); + + vmw_ldu_init(dev_priv, 0); + vmw_ldu_init(dev_priv, 1); + vmw_ldu_init(dev_priv, 2); + vmw_ldu_init(dev_priv, 3); + vmw_ldu_init(dev_priv, 4); + vmw_ldu_init(dev_priv, 5); + vmw_ldu_init(dev_priv, 6); + vmw_ldu_init(dev_priv, 7); + + return 0; +} + +int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) +{ + if (!dev_priv->ldu_priv) + return -ENOSYS; + + BUG_ON(!list_empty(&dev_priv->ldu_priv->active)); + + kfree(dev_priv->ldu_priv); + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c new file mode 100644 index 000000000000..bb6e6a096d25 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -0,0 +1,634 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + + +#include "drmP.h" +#include "vmwgfx_drv.h" + +#include "ttm/ttm_placement.h" + +#include "svga_overlay.h" +#include "svga_escape.h" + +#define VMW_MAX_NUM_STREAMS 1 + +struct vmw_stream { + struct vmw_dma_buffer *buf; + bool claimed; + bool paused; + struct drm_vmw_control_stream_arg saved; +}; + +/** + * Overlay control + */ +struct vmw_overlay { + /* + * Each stream is a single overlay. In Xv these are called ports. + */ + struct mutex mutex; + struct vmw_stream stream[VMW_MAX_NUM_STREAMS]; +}; + +static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + return dev_priv ? dev_priv->overlay_priv : NULL; +} + +struct vmw_escape_header { + uint32_t cmd; + SVGAFifoCmdEscape body; +}; + +struct vmw_escape_video_flush { + struct vmw_escape_header escape; + SVGAEscapeVideoFlush flush; +}; + +static inline void fill_escape(struct vmw_escape_header *header, + uint32_t size) +{ + header->cmd = SVGA_CMD_ESCAPE; + header->body.nsid = SVGA_ESCAPE_NSID_VMWARE; + header->body.size = size; +} + +static inline void fill_flush(struct vmw_escape_video_flush *cmd, + uint32_t stream_id) +{ + fill_escape(&cmd->escape, sizeof(cmd->flush)); + cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; + cmd->flush.streamId = stream_id; +} + +/** + * Pin or unpin a buffer in vram. + * + * @dev_priv: Driver private. + * @buf: DMA buffer to pin or unpin. + * @pin: Pin buffer in vram if true. + * @interruptible: Use interruptible wait. + * + * Takes the current masters ttm lock in read. + * + * Returns + * -ERESTARTSYS if interrupted by a signal. + */ +static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + bool pin, bool interruptible) +{ + struct ttm_buffer_object *bo = &buf->base; + struct ttm_bo_global *glob = bo->glob; + struct ttm_placement *overlay_placement = &vmw_vram_placement; + int ret; + + ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_reserve(bo, interruptible, false, false, 0); + if (unlikely(ret != 0)) + goto err; + + if (buf->gmr_bound) { + vmw_gmr_unbind(dev_priv, buf->gmr_id); + spin_lock(&glob->lru_lock); + ida_remove(&dev_priv->gmr_ida, buf->gmr_id); + spin_unlock(&glob->lru_lock); + buf->gmr_bound = NULL; + } + + if (pin) + overlay_placement = &vmw_vram_ne_placement; + + ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); + + ttm_bo_unreserve(bo); + +err: + ttm_read_unlock(&dev_priv->active_master->lock); + + return ret; +} + +/** + * Send put command to hw. + * + * Returns + * -ERESTARTSYS if interrupted by a signal. + */ +static int vmw_overlay_send_put(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + struct drm_vmw_control_stream_arg *arg, + bool interruptible) +{ + struct { + struct vmw_escape_header escape; + struct { + struct { + uint32_t cmdType; + uint32_t streamId; + } header; + struct { + uint32_t registerId; + uint32_t value; + } items[SVGA_VIDEO_PITCH_3 + 1]; + } body; + struct vmw_escape_video_flush flush; + } *cmds; + uint32_t offset; + int i, ret; + + for (;;) { + cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); + if (cmds) + break; + + ret = vmw_fallback_wait(dev_priv, false, true, 0, + interruptible, 3*HZ); + if (interruptible && ret == -ERESTARTSYS) + return ret; + else + BUG_ON(ret != 0); + } + + fill_escape(&cmds->escape, sizeof(cmds->body)); + cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; + cmds->body.header.streamId = arg->stream_id; + + for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++) + cmds->body.items[i].registerId = i; + + offset = buf->base.offset + arg->offset; + + cmds->body.items[SVGA_VIDEO_ENABLED].value = true; + cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags; + cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset; + cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format; + cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key; + cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size; + cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width; + cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height; + cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x; + cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y; + cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; + cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; + cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x; + cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y; + cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; + cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; + cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; + cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; + cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; + + fill_flush(&cmds->flush, arg->stream_id); + + vmw_fifo_commit(dev_priv, sizeof(*cmds)); + + return 0; +} + +/** + * Send stop command to hw. + * + * Returns + * -ERESTARTSYS if interrupted by a signal. + */ +static int vmw_overlay_send_stop(struct vmw_private *dev_priv, + uint32_t stream_id, + bool interruptible) +{ + struct { + struct vmw_escape_header escape; + SVGAEscapeVideoSetRegs body; + struct vmw_escape_video_flush flush; + } *cmds; + int ret; + + for (;;) { + cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); + if (cmds) + break; + + ret = vmw_fallback_wait(dev_priv, false, true, 0, + interruptible, 3*HZ); + if (interruptible && ret == -ERESTARTSYS) + return ret; + else + BUG_ON(ret != 0); + } + + fill_escape(&cmds->escape, sizeof(cmds->body)); + cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; + cmds->body.header.streamId = stream_id; + cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED; + cmds->body.items[0].value = false; + fill_flush(&cmds->flush, stream_id); + + vmw_fifo_commit(dev_priv, sizeof(*cmds)); + + return 0; +} + +/** + * Stop or pause a stream. + * + * If the stream is paused the no evict flag is removed from the buffer + * but left in vram. This allows for instance mode_set to evict it + * should it need to. + * + * The caller must hold the overlay lock. + * + * @stream_id which stream to stop/pause. + * @pause true to pause, false to stop completely. + */ +static int vmw_overlay_stop(struct vmw_private *dev_priv, + uint32_t stream_id, bool pause, + bool interruptible) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + struct vmw_stream *stream = &overlay->stream[stream_id]; + int ret; + + /* no buffer attached the stream is completely stopped */ + if (!stream->buf) + return 0; + + /* If the stream is paused this is already done */ + if (!stream->paused) { + ret = vmw_overlay_send_stop(dev_priv, stream_id, + interruptible); + if (ret) + return ret; + + /* We just remove the NO_EVICT flag so no -ENOMEM */ + ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false, + interruptible); + if (interruptible && ret == -ERESTARTSYS) + return ret; + else + BUG_ON(ret != 0); + } + + if (!pause) { + vmw_dmabuf_unreference(&stream->buf); + stream->paused = false; + } else { + stream->paused = true; + } + + return 0; +} + +/** + * Update a stream and send any put or stop fifo commands needed. + * + * The caller must hold the overlay lock. + * + * Returns + * -ENOMEM if buffer doesn't fit in vram. + * -ERESTARTSYS if interrupted. + */ +static int vmw_overlay_update_stream(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buf, + struct drm_vmw_control_stream_arg *arg, + bool interruptible) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + struct vmw_stream *stream = &overlay->stream[arg->stream_id]; + int ret = 0; + + if (!buf) + return -EINVAL; + + DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__, + stream->buf, buf, stream->paused ? "" : "not "); + + if (stream->buf != buf) { + ret = vmw_overlay_stop(dev_priv, arg->stream_id, + false, interruptible); + if (ret) + return ret; + } else if (!stream->paused) { + /* If the buffers match and not paused then just send + * the put command, no need to do anything else. + */ + ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); + if (ret == 0) + stream->saved = *arg; + else + BUG_ON(!interruptible); + + return ret; + } + + /* We don't start the old stream if we are interrupted. + * Might return -ENOMEM if it can't fit the buffer in vram. + */ + ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible); + if (ret) + return ret; + + ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); + if (ret) { + /* This one needs to happen no matter what. We only remove + * the NO_EVICT flag so this is safe from -ENOMEM. + */ + BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0); + return ret; + } + + if (stream->buf != buf) + stream->buf = vmw_dmabuf_reference(buf); + stream->saved = *arg; + + return 0; +} + +/** + * Stop all streams. + * + * Used by the fb code when starting. + * + * Takes the overlay lock. + */ +int vmw_overlay_stop_all(struct vmw_private *dev_priv) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + int i, ret; + + if (!overlay) + return 0; + + mutex_lock(&overlay->mutex); + + for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { + struct vmw_stream *stream = &overlay->stream[i]; + if (!stream->buf) + continue; + + ret = vmw_overlay_stop(dev_priv, i, false, false); + WARN_ON(ret != 0); + } + + mutex_unlock(&overlay->mutex); + + return 0; +} + +/** + * Try to resume all paused streams. + * + * Used by the kms code after moving a new scanout buffer to vram. + * + * Takes the overlay lock. + */ +int vmw_overlay_resume_all(struct vmw_private *dev_priv) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + int i, ret; + + if (!overlay) + return 0; + + mutex_lock(&overlay->mutex); + + for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { + struct vmw_stream *stream = &overlay->stream[i]; + if (!stream->paused) + continue; + + ret = vmw_overlay_update_stream(dev_priv, stream->buf, + &stream->saved, false); + if (ret != 0) + DRM_INFO("%s: *warning* failed to resume stream %i\n", + __func__, i); + } + + mutex_unlock(&overlay->mutex); + + return 0; +} + +/** + * Pauses all active streams. + * + * Used by the kms code when moving a new scanout buffer to vram. + * + * Takes the overlay lock. + */ +int vmw_overlay_pause_all(struct vmw_private *dev_priv) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + int i, ret; + + if (!overlay) + return 0; + + mutex_lock(&overlay->mutex); + + for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { + if (overlay->stream[i].paused) + DRM_INFO("%s: *warning* stream %i already paused\n", + __func__, i); + ret = vmw_overlay_stop(dev_priv, i, true, false); + WARN_ON(ret != 0); + } + + mutex_unlock(&overlay->mutex); + + return 0; +} + +int vmw_overlay_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_overlay *overlay = dev_priv->overlay_priv; + struct drm_vmw_control_stream_arg *arg = + (struct drm_vmw_control_stream_arg *)data; + struct vmw_dma_buffer *buf; + struct vmw_resource *res; + int ret; + + if (!overlay) + return -ENOSYS; + + ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); + if (ret) + return ret; + + mutex_lock(&overlay->mutex); + + if (!arg->enabled) { + ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true); + goto out_unlock; + } + + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); + if (ret) + goto out_unlock; + + ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); + + vmw_dmabuf_unreference(&buf); + +out_unlock: + mutex_unlock(&overlay->mutex); + vmw_resource_unreference(&res); + + return ret; +} + +int vmw_overlay_num_overlays(struct vmw_private *dev_priv) +{ + if (!dev_priv->overlay_priv) + return 0; + + return VMW_MAX_NUM_STREAMS; +} + +int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + int i, k; + + if (!overlay) + return 0; + + mutex_lock(&overlay->mutex); + + for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++) + if (!overlay->stream[i].claimed) + k++; + + mutex_unlock(&overlay->mutex); + + return k; +} + +int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + int i; + + if (!overlay) + return -ENOSYS; + + mutex_lock(&overlay->mutex); + + for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { + + if (overlay->stream[i].claimed) + continue; + + overlay->stream[i].claimed = true; + *out = i; + mutex_unlock(&overlay->mutex); + return 0; + } + + mutex_unlock(&overlay->mutex); + return -ESRCH; +} + +int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + + BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS); + + if (!overlay) + return -ENOSYS; + + mutex_lock(&overlay->mutex); + + WARN_ON(!overlay->stream[stream_id].claimed); + vmw_overlay_stop(dev_priv, stream_id, false, false); + overlay->stream[stream_id].claimed = false; + + mutex_unlock(&overlay->mutex); + return 0; +} + +int vmw_overlay_init(struct vmw_private *dev_priv) +{ + struct vmw_overlay *overlay; + int i; + + if (dev_priv->overlay_priv) + return -EINVAL; + + if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) && + (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) { + DRM_INFO("hardware doesn't support overlays\n"); + return -ENOSYS; + } + + overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); + if (!overlay) + return -ENOMEM; + + memset(overlay, 0, sizeof(*overlay)); + mutex_init(&overlay->mutex); + for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { + overlay->stream[i].buf = NULL; + overlay->stream[i].paused = false; + overlay->stream[i].claimed = false; + } + + dev_priv->overlay_priv = overlay; + + return 0; +} + +int vmw_overlay_close(struct vmw_private *dev_priv) +{ + struct vmw_overlay *overlay = dev_priv->overlay_priv; + bool forgotten_buffer = false; + int i; + + if (!overlay) + return -ENOSYS; + + for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { + if (overlay->stream[i].buf) { + forgotten_buffer = true; + vmw_overlay_stop(dev_priv, i, false, false); + } + } + + WARN_ON(forgotten_buffer); + + dev_priv->overlay_priv = NULL; + kfree(overlay); + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h new file mode 100644 index 000000000000..9d0dd3a342eb --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h @@ -0,0 +1,57 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +/** + * This file contains virtual hardware defines for kernel space. + */ + +#ifndef _VMWGFX_REG_H_ +#define _VMWGFX_REG_H_ + +#include + +#define VMWGFX_INDEX_PORT 0x0 +#define VMWGFX_VALUE_PORT 0x1 +#define VMWGFX_IRQSTATUS_PORT 0x8 + +struct svga_guest_mem_descriptor { + __le32 ppn; + __le32 num_pages; +}; + +struct svga_fifo_cmd_fence { + __le32 fence; +}; + +#define SVGA_SYNC_GENERIC 1 +#define SVGA_SYNC_FIFOFULL 2 + +#include "svga_types.h" + +#include "svga3d_reg.h" + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c new file mode 100644 index 000000000000..a1ceed0c8e07 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -0,0 +1,1192 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "vmwgfx_drm.h" +#include "ttm/ttm_object.h" +#include "ttm/ttm_placement.h" +#include "drmP.h" + +#define VMW_RES_CONTEXT ttm_driver_type0 +#define VMW_RES_SURFACE ttm_driver_type1 +#define VMW_RES_STREAM ttm_driver_type2 + +struct vmw_user_context { + struct ttm_base_object base; + struct vmw_resource res; +}; + +struct vmw_user_surface { + struct ttm_base_object base; + struct vmw_surface srf; +}; + +struct vmw_user_dma_buffer { + struct ttm_base_object base; + struct vmw_dma_buffer dma; +}; + +struct vmw_bo_user_rep { + uint32_t handle; + uint64_t map_handle; +}; + +struct vmw_stream { + struct vmw_resource res; + uint32_t stream_id; +}; + +struct vmw_user_stream { + struct ttm_base_object base; + struct vmw_stream stream; +}; + +static inline struct vmw_dma_buffer * +vmw_dma_buffer(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct vmw_dma_buffer, base); +} + +static inline struct vmw_user_dma_buffer * +vmw_user_dma_buffer(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); +} + +struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) +{ + kref_get(&res->kref); + return res; +} + +static void vmw_resource_release(struct kref *kref) +{ + struct vmw_resource *res = + container_of(kref, struct vmw_resource, kref); + struct vmw_private *dev_priv = res->dev_priv; + + idr_remove(res->idr, res->id); + write_unlock(&dev_priv->resource_lock); + + if (likely(res->hw_destroy != NULL)) + res->hw_destroy(res); + + if (res->res_free != NULL) + res->res_free(res); + else + kfree(res); + + write_lock(&dev_priv->resource_lock); +} + +void vmw_resource_unreference(struct vmw_resource **p_res) +{ + struct vmw_resource *res = *p_res; + struct vmw_private *dev_priv = res->dev_priv; + + *p_res = NULL; + write_lock(&dev_priv->resource_lock); + kref_put(&res->kref, vmw_resource_release); + write_unlock(&dev_priv->resource_lock); +} + +static int vmw_resource_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + struct idr *idr, + enum ttm_object_type obj_type, + void (*res_free) (struct vmw_resource *res)) +{ + int ret; + + kref_init(&res->kref); + res->hw_destroy = NULL; + res->res_free = res_free; + res->res_type = obj_type; + res->idr = idr; + res->avail = false; + res->dev_priv = dev_priv; + + do { + if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) + return -ENOMEM; + + write_lock(&dev_priv->resource_lock); + ret = idr_get_new_above(idr, res, 1, &res->id); + write_unlock(&dev_priv->resource_lock); + + } while (ret == -EAGAIN); + + return ret; +} + +/** + * vmw_resource_activate + * + * @res: Pointer to the newly created resource + * @hw_destroy: Destroy function. NULL if none. + * + * Activate a resource after the hardware has been made aware of it. + * Set tye destroy function to @destroy. Typically this frees the + * resource and destroys the hardware resources associated with it. + * Activate basically means that the function vmw_resource_lookup will + * find it. + */ + +static void vmw_resource_activate(struct vmw_resource *res, + void (*hw_destroy) (struct vmw_resource *)) +{ + struct vmw_private *dev_priv = res->dev_priv; + + write_lock(&dev_priv->resource_lock); + res->avail = true; + res->hw_destroy = hw_destroy; + write_unlock(&dev_priv->resource_lock); +} + +struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, + struct idr *idr, int id) +{ + struct vmw_resource *res; + + read_lock(&dev_priv->resource_lock); + res = idr_find(idr, id); + if (res && res->avail) + kref_get(&res->kref); + else + res = NULL; + read_unlock(&dev_priv->resource_lock); + + if (unlikely(res == NULL)) + return NULL; + + return res; +} + +/** + * Context management: + */ + +static void vmw_hw_context_destroy(struct vmw_resource *res) +{ + + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyContext body; + } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "destruction.\n"); + return; + } + + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); + cmd->header.size = cpu_to_le32(sizeof(cmd->body)); + cmd->body.cid = cpu_to_le32(res->id); + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); +} + +static int vmw_context_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + void (*res_free) (struct vmw_resource *res)) +{ + int ret; + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineContext body; + } *cmd; + + ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, + VMW_RES_CONTEXT, res_free); + + if (unlikely(ret != 0)) { + if (res_free == NULL) + kfree(res); + else + res_free(res); + return ret; + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + vmw_resource_unreference(&res); + return -ENOMEM; + } + + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); + cmd->header.size = cpu_to_le32(sizeof(cmd->body)); + cmd->body.cid = cpu_to_le32(res->id); + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_resource_activate(res, vmw_hw_context_destroy); + return 0; +} + +struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) +{ + struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); + int ret; + + if (unlikely(res == NULL)) + return NULL; + + ret = vmw_context_init(dev_priv, res, NULL); + return (ret == 0) ? res : NULL; +} + +/** + * User-space context management: + */ + +static void vmw_user_context_free(struct vmw_resource *res) +{ + struct vmw_user_context *ctx = + container_of(res, struct vmw_user_context, res); + + kfree(ctx); +} + +/** + * This function is called when user space has no more references on the + * base object. It releases the base-object's reference on the resource object. + */ + +static void vmw_user_context_base_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct vmw_user_context *ctx = + container_of(base, struct vmw_user_context, base); + struct vmw_resource *res = &ctx->res; + + *p_base = NULL; + vmw_resource_unreference(&res); +} + +int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_resource *res; + struct vmw_user_context *ctx; + struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret = 0; + + res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_context_free) { + ret = -EINVAL; + goto out; + } + + ctx = container_of(res, struct vmw_user_context, res); + if (ctx->base.tfile != tfile && !ctx->base.shareable) { + ret = -EPERM; + goto out; + } + + ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); +out: + vmw_resource_unreference(&res); + return ret; +} + +int vmw_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + struct vmw_resource *res; + struct vmw_resource *tmp; + struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + + if (unlikely(ctx == NULL)) + return -ENOMEM; + + res = &ctx->res; + ctx->base.shareable = false; + ctx->base.tfile = NULL; + + ret = vmw_context_init(dev_priv, res, vmw_user_context_free); + if (unlikely(ret != 0)) + return ret; + + tmp = vmw_resource_reference(&ctx->res); + ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, + &vmw_user_context_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + goto out_err; + } + + arg->cid = res->id; +out_err: + vmw_resource_unreference(&res); + return ret; + +} + +int vmw_context_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int id) +{ + struct vmw_resource *res; + int ret = 0; + + read_lock(&dev_priv->resource_lock); + res = idr_find(&dev_priv->context_idr, id); + if (res && res->avail) { + struct vmw_user_context *ctx = + container_of(res, struct vmw_user_context, res); + if (ctx->base.tfile != tfile && !ctx->base.shareable) + ret = -EPERM; + } else + ret = -EINVAL; + read_unlock(&dev_priv->resource_lock); + + return ret; +} + + +/** + * Surface management. + */ + +static void vmw_hw_surface_destroy(struct vmw_resource *res) +{ + + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroySurface body; + } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "destruction.\n"); + return; + } + + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY); + cmd->header.size = cpu_to_le32(sizeof(cmd->body)); + cmd->body.sid = cpu_to_le32(res->id); + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); +} + +void vmw_surface_res_free(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + + kfree(srf->sizes); + kfree(srf->snooper.image); + kfree(srf); +} + +int vmw_surface_init(struct vmw_private *dev_priv, + struct vmw_surface *srf, + void (*res_free) (struct vmw_resource *res)) +{ + int ret; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineSurface body; + } *cmd; + SVGA3dSize *cmd_size; + struct vmw_resource *res = &srf->res; + struct drm_vmw_size *src_size; + size_t submit_size; + uint32_t cmd_len; + int i; + + BUG_ON(res_free == NULL); + ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, + VMW_RES_SURFACE, res_free); + + if (unlikely(ret != 0)) { + res_free(res); + return ret; + } + + submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize); + cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); + + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed for create surface.\n"); + vmw_resource_unreference(&res); + return -ENOMEM; + } + + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE); + cmd->header.size = cpu_to_le32(cmd_len); + cmd->body.sid = cpu_to_le32(res->id); + cmd->body.surfaceFlags = cpu_to_le32(srf->flags); + cmd->body.format = cpu_to_le32(srf->format); + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { + cmd->body.face[i].numMipLevels = + cpu_to_le32(srf->mip_levels[i]); + } + + cmd += 1; + cmd_size = (SVGA3dSize *) cmd; + src_size = srf->sizes; + + for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { + cmd_size->width = cpu_to_le32(src_size->width); + cmd_size->height = cpu_to_le32(src_size->height); + cmd_size->depth = cpu_to_le32(src_size->depth); + } + + vmw_fifo_commit(dev_priv, submit_size); + vmw_resource_activate(res, vmw_hw_surface_destroy); + return 0; +} + +static void vmw_user_surface_free(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + struct vmw_user_surface *user_srf = + container_of(srf, struct vmw_user_surface, srf); + + kfree(srf->sizes); + kfree(srf->snooper.image); + kfree(user_srf); +} + +int vmw_user_surface_lookup(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int sid, struct vmw_surface **out) +{ + struct vmw_resource *res; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + + res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_surface_free) + return -EINVAL; + + srf = container_of(res, struct vmw_surface, res); + user_srf = container_of(srf, struct vmw_user_surface, srf); + if (user_srf->base.tfile != tfile && !user_srf->base.shareable) + return -EPERM; + + *out = srf; + return 0; +} + +static void vmw_user_surface_base_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct vmw_user_surface *user_srf = + container_of(base, struct vmw_user_surface, base); + struct vmw_resource *res = &user_srf->srf.res; + + *p_base = NULL; + vmw_resource_unreference(&res); +} + +int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_resource *res; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret = 0; + + res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_surface_free) { + ret = -EINVAL; + goto out; + } + + srf = container_of(res, struct vmw_surface, res); + user_srf = container_of(srf, struct vmw_user_surface, srf); + if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { + ret = -EPERM; + goto out; + } + + ttm_ref_object_base_unref(tfile, user_srf->base.hash.key, + TTM_REF_USAGE); +out: + vmw_resource_unreference(&res); + return ret; +} + +int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_surface *user_srf = + kmalloc(sizeof(*user_srf), GFP_KERNEL); + struct vmw_surface *srf; + struct vmw_resource *res; + struct vmw_resource *tmp; + union drm_vmw_surface_create_arg *arg = + (union drm_vmw_surface_create_arg *)data; + struct drm_vmw_surface_create_req *req = &arg->req; + struct drm_vmw_surface_arg *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct drm_vmw_size __user *user_sizes; + int ret; + int i; + + if (unlikely(user_srf == NULL)) + return -ENOMEM; + + srf = &user_srf->srf; + res = &srf->res; + + srf->flags = req->flags; + srf->format = req->format; + memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); + srf->num_sizes = 0; + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + srf->num_sizes += srf->mip_levels[i]; + + if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES * + DRM_VMW_MAX_MIP_LEVELS) { + ret = -EINVAL; + goto out_err0; + } + + srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); + if (unlikely(srf->sizes == NULL)) { + ret = -ENOMEM; + goto out_err0; + } + + user_sizes = (struct drm_vmw_size __user *)(unsigned long) + req->size_addr; + + ret = copy_from_user(srf->sizes, user_sizes, + srf->num_sizes * sizeof(*srf->sizes)); + if (unlikely(ret != 0)) + goto out_err1; + + user_srf->base.shareable = false; + user_srf->base.tfile = NULL; + + /** + * From this point, the generic resource management functions + * destroy the object on failure. + */ + + ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); + if (unlikely(ret != 0)) + return ret; + + tmp = vmw_resource_reference(&srf->res); + ret = ttm_base_object_init(tfile, &user_srf->base, + req->shareable, VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + vmw_resource_unreference(&res); + return ret; + } + + if (srf->flags & (1 << 9) && + srf->num_sizes == 1 && + srf->sizes[0].width == 64 && + srf->sizes[0].height == 64 && + srf->format == SVGA3D_A8R8G8B8) { + + srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); + /* clear the image */ + if (srf->snooper.image) + memset(srf->snooper.image, 0x00, 64 * 64 * 4); + else + DRM_ERROR("Failed to allocate cursor_image\n"); + + } else { + srf->snooper.image = NULL; + } + srf->snooper.crtc = NULL; + + rep->sid = res->id; + vmw_resource_unreference(&res); + return 0; +out_err1: + kfree(srf->sizes); +out_err0: + kfree(user_srf); + return ret; +} + +int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_surface_reference_arg *arg = + (union drm_vmw_surface_reference_arg *)data; + struct drm_vmw_surface_arg *req = &arg->req; + struct drm_vmw_surface_create_req *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_resource *res; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct drm_vmw_size __user *user_sizes; + int ret; + + res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_surface_free) { + ret = -EINVAL; + goto out; + } + + srf = container_of(res, struct vmw_surface, res); + user_srf = container_of(srf, struct vmw_user_surface, srf); + if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { + DRM_ERROR("Tried to reference none shareable surface\n"); + ret = -EPERM; + goto out; + } + + ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not add a reference to a surface.\n"); + goto out; + } + + rep->flags = srf->flags; + rep->format = srf->format; + memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); + user_sizes = (struct drm_vmw_size __user *)(unsigned long) + rep->size_addr; + + if (user_sizes) + ret = copy_to_user(user_sizes, srf->sizes, + srf->num_sizes * sizeof(*srf->sizes)); + if (unlikely(ret != 0)) { + DRM_ERROR("copy_to_user failed %p %u\n", + user_sizes, srf->num_sizes); + /** + * FIXME: Unreference surface here? + */ + goto out; + } +out: + vmw_resource_unreference(&res); + return ret; +} + +int vmw_surface_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int id) +{ + struct vmw_resource *res; + int ret = 0; + + read_lock(&dev_priv->resource_lock); + res = idr_find(&dev_priv->surface_idr, id); + if (res && res->avail) { + struct vmw_surface *srf = + container_of(res, struct vmw_surface, res); + struct vmw_user_surface *usrf = + container_of(srf, struct vmw_user_surface, srf); + + if (usrf->base.tfile != tfile && !usrf->base.shareable) + ret = -EPERM; + } else + ret = -EINVAL; + read_unlock(&dev_priv->resource_lock); + + return ret; +} + +/** + * Buffer management. + */ + +static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, + unsigned long num_pages) +{ + static size_t bo_user_size = ~0; + + size_t page_array_size = + (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; + + if (unlikely(bo_user_size == ~0)) { + bo_user_size = glob->ttm_bo_extra_size + + ttm_round_pot(sizeof(struct vmw_dma_buffer)); + } + + return bo_user_size + page_array_size; +} + +void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + struct ttm_bo_global *glob = bo->glob; + struct vmw_private *dev_priv = + container_of(bo->bdev, struct vmw_private, bdev); + + ttm_mem_global_free(glob->mem_glob, bo->acc_size); + if (vmw_bo->gmr_bound) { + vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); + spin_lock(&glob->lru_lock); + ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); + spin_unlock(&glob->lru_lock); + } + kfree(vmw_bo); +} + +int vmw_dmabuf_init(struct vmw_private *dev_priv, + struct vmw_dma_buffer *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interruptible, + void (*bo_free) (struct ttm_buffer_object *bo)) +{ + struct ttm_bo_device *bdev = &dev_priv->bdev; + struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; + size_t acc_size; + int ret; + + BUG_ON(!bo_free); + + acc_size = + vmw_dmabuf_acc_size(bdev->glob, + (size + PAGE_SIZE - 1) >> PAGE_SHIFT); + + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); + if (unlikely(ret != 0)) { + /* we must free the bo here as + * ttm_buffer_object_init does so as well */ + bo_free(&vmw_bo->base); + return ret; + } + + memset(vmw_bo, 0, sizeof(*vmw_bo)); + + INIT_LIST_HEAD(&vmw_bo->gmr_lru); + INIT_LIST_HEAD(&vmw_bo->validate_list); + vmw_bo->gmr_id = 0; + vmw_bo->gmr_bound = false; + + ret = ttm_bo_init(bdev, &vmw_bo->base, size, + ttm_bo_type_device, placement, + 0, 0, interruptible, + NULL, acc_size, bo_free); + return ret; +} + +static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma; + struct ttm_bo_global *glob = bo->glob; + struct vmw_private *dev_priv = + container_of(bo->bdev, struct vmw_private, bdev); + + ttm_mem_global_free(glob->mem_glob, bo->acc_size); + if (vmw_bo->gmr_bound) { + vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); + spin_lock(&glob->lru_lock); + ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); + spin_unlock(&glob->lru_lock); + } + kfree(vmw_user_bo); +} + +static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) +{ + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_base_object *base = *p_base; + struct ttm_buffer_object *bo; + + *p_base = NULL; + + if (unlikely(base == NULL)) + return; + + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + bo = &vmw_user_bo->dma.base; + ttm_bo_unref(&bo); +} + +int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_alloc_dmabuf_arg *arg = + (union drm_vmw_alloc_dmabuf_arg *)data; + struct drm_vmw_alloc_dmabuf_req *req = &arg->req; + struct drm_vmw_dmabuf_rep *rep = &arg->rep; + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_buffer_object *tmp; + struct vmw_master *vmaster = vmw_master(file_priv->master); + int ret; + + vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); + if (unlikely(vmw_user_bo == NULL)) + return -ENOMEM; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) { + kfree(vmw_user_bo); + return ret; + } + + ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, + &vmw_vram_placement, true, + &vmw_user_dmabuf_destroy); + if (unlikely(ret != 0)) + return ret; + + tmp = ttm_bo_reference(&vmw_user_bo->dma.base); + ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, + &vmw_user_bo->base, + false, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); + if (unlikely(ret != 0)) { + ttm_bo_unref(&tmp); + } else { + rep->handle = vmw_user_bo->base.hash.key; + rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; + rep->cur_gmr_id = vmw_user_bo->base.hash.key; + rep->cur_gmr_offset = 0; + } + ttm_bo_unref(&tmp); + + ttm_read_unlock(&vmaster->lock); + + return 0; +} + +int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_unref_dmabuf_arg *arg = + (struct drm_vmw_unref_dmabuf_arg *)data; + + return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + arg->handle, + TTM_REF_USAGE); +} + +uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, + uint32_t cur_validate_node) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + if (likely(vmw_bo->on_validate_list)) + return vmw_bo->cur_validate_node; + + vmw_bo->cur_validate_node = cur_validate_node; + vmw_bo->on_validate_list = true; + + return cur_validate_node; +} + +void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + vmw_bo->on_validate_list = false; +} + +uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo; + + if (bo->mem.mem_type == TTM_PL_VRAM) + return SVGA_GMR_FRAMEBUFFER; + + vmw_bo = vmw_dma_buffer(bo); + + return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL; +} + +void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + vmw_bo->gmr_bound = true; + vmw_bo->gmr_id = id; +} + +int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, + uint32_t handle, struct vmw_dma_buffer **out) +{ + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_base_object *base; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) { + printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", + (unsigned long)handle); + return -ESRCH; + } + + if (unlikely(base->object_type != ttm_buffer_type)) { + ttm_base_object_unref(&base); + printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", + (unsigned long)handle); + return -EINVAL; + } + + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + (void)ttm_bo_reference(&vmw_user_bo->dma.base); + ttm_base_object_unref(&base); + *out = &vmw_user_bo->dma; + + return 0; +} + +/** + * TODO: Implement a gmr id eviction mechanism. Currently we just fail + * when we're out of ids, causing GMR space to be allocated + * out of VRAM. + */ + +int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) +{ + struct ttm_bo_global *glob = dev_priv->bdev.glob; + int id; + int ret; + + do { + if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0)) + return -ENOMEM; + + spin_lock(&glob->lru_lock); + ret = ida_get_new(&dev_priv->gmr_ida, &id); + spin_unlock(&glob->lru_lock); + } while (ret == -EAGAIN); + + if (unlikely(ret != 0)) + return ret; + + if (unlikely(id >= dev_priv->max_gmr_ids)) { + spin_lock(&glob->lru_lock); + ida_remove(&dev_priv->gmr_ida, id); + spin_unlock(&glob->lru_lock); + return -EBUSY; + } + + *p_id = (uint32_t) id; + return 0; +} + +/* + * Stream managment + */ + +static void vmw_stream_destroy(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_stream *stream; + int ret; + + DRM_INFO("%s: unref\n", __func__); + stream = container_of(res, struct vmw_stream, res); + + ret = vmw_overlay_unref(dev_priv, stream->stream_id); + WARN_ON(ret != 0); +} + +static int vmw_stream_init(struct vmw_private *dev_priv, + struct vmw_stream *stream, + void (*res_free) (struct vmw_resource *res)) +{ + struct vmw_resource *res = &stream->res; + int ret; + + ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, + VMW_RES_STREAM, res_free); + + if (unlikely(ret != 0)) { + if (res_free == NULL) + kfree(stream); + else + res_free(&stream->res); + return ret; + } + + ret = vmw_overlay_claim(dev_priv, &stream->stream_id); + if (ret) { + vmw_resource_unreference(&res); + return ret; + } + + DRM_INFO("%s: claimed\n", __func__); + + vmw_resource_activate(&stream->res, vmw_stream_destroy); + return 0; +} + +/** + * User-space context management: + */ + +static void vmw_user_stream_free(struct vmw_resource *res) +{ + struct vmw_user_stream *stream = + container_of(res, struct vmw_user_stream, stream.res); + + kfree(stream); +} + +/** + * This function is called when user space has no more references on the + * base object. It releases the base-object's reference on the resource object. + */ + +static void vmw_user_stream_base_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct vmw_user_stream *stream = + container_of(base, struct vmw_user_stream, base); + struct vmw_resource *res = &stream->stream.res; + + *p_base = NULL; + vmw_resource_unreference(&res); +} + +int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_resource *res; + struct vmw_user_stream *stream; + struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret = 0; + + res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_stream_free) { + ret = -EINVAL; + goto out; + } + + stream = container_of(res, struct vmw_user_stream, stream.res); + if (stream->base.tfile != tfile) { + ret = -EINVAL; + goto out; + } + + ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); +out: + vmw_resource_unreference(&res); + return ret; +} + +int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL); + struct vmw_resource *res; + struct vmw_resource *tmp; + struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + + if (unlikely(stream == NULL)) + return -ENOMEM; + + res = &stream->stream.res; + stream->base.shareable = false; + stream->base.tfile = NULL; + + ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); + if (unlikely(ret != 0)) + return ret; + + tmp = vmw_resource_reference(res); + ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, + &vmw_user_stream_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + goto out_err; + } + + arg->stream_id = res->id; +out_err: + vmw_resource_unreference(&res); + return ret; +} + +int vmw_user_stream_lookup(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t *inout_id, struct vmw_resource **out) +{ + struct vmw_user_stream *stream; + struct vmw_resource *res; + int ret; + + res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_stream_free) { + ret = -EINVAL; + goto err_ref; + } + + stream = container_of(res, struct vmw_user_stream, stream.res); + if (stream->base.tfile != tfile) { + ret = -EPERM; + goto err_ref; + } + + *inout_id = stream->stream.stream_id; + *out = res; + return 0; +err_ref: + vmw_resource_unreference(&res); + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c new file mode 100644 index 000000000000..e3df4adfb4d8 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -0,0 +1,99 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "drmP.h" +#include "vmwgfx_drv.h" + +int vmw_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct vmw_private *dev_priv; + + if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { + if (vmw_fifo_mmap(filp, vma) == 0) + return 0; + return drm_mmap(filp, vma); + } + + file_priv = (struct drm_file *)filp->private_data; + dev_priv = vmw_priv(file_priv->minor->dev); + return ttm_bo_mmap(filp, vma, &dev_priv->bdev); +} + +static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref) +{ + DRM_INFO("global init.\n"); + return ttm_mem_global_init(ref->object); +} + +static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +int vmw_ttm_global_init(struct vmw_private *dev_priv) +{ + struct ttm_global_reference *global_ref; + int ret; + + global_ref = &dev_priv->mem_global_ref; + global_ref->global_type = TTM_GLOBAL_TTM_MEM; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &vmw_ttm_mem_global_init; + global_ref->release = &vmw_ttm_mem_global_release; + + ret = ttm_global_item_ref(global_ref); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed setting up TTM memory accounting.\n"); + return ret; + } + + dev_priv->bo_global_ref.mem_glob = + dev_priv->mem_global_ref.object; + global_ref = &dev_priv->bo_global_ref.ref; + global_ref->global_type = TTM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_bo_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + ret = ttm_global_item_ref(global_ref); + + if (unlikely(ret != 0)) { + DRM_ERROR("Failed setting up TTM buffer objects.\n"); + goto out_no_bo; + } + + return 0; +out_no_bo: + ttm_global_item_unref(&dev_priv->mem_global_ref); + return ret; +} + +void vmw_ttm_global_release(struct vmw_private *dev_priv) +{ + ttm_global_item_unref(&dev_priv->bo_global_ref.ref); + ttm_global_item_unref(&dev_priv->mem_global_ref); +} diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d21b3469f6d7..89f725fe064d 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -101,6 +101,8 @@ source "drivers/staging/p9auth/Kconfig" source "drivers/staging/line6/Kconfig" +source "drivers/gpu/drm/vmwgfx/Kconfig" + source "drivers/gpu/drm/radeon/Kconfig" source "drivers/staging/octeon/Kconfig" diff --git a/include/drm/Kbuild b/include/drm/Kbuild index b940fdfa3b25..1e83f85be819 100644 --- a/include/drm/Kbuild +++ b/include/drm/Kbuild @@ -7,4 +7,5 @@ unifdef-y += r128_drm.h unifdef-y += radeon_drm.h unifdef-y += sis_drm.h unifdef-y += savage_drm.h +unifdef-y += vmwgfx_drm.h unifdef-y += via_drm.h diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h new file mode 100644 index 000000000000..2be7e1249b6f --- /dev/null +++ b/include/drm/vmwgfx_drm.h @@ -0,0 +1,574 @@ +/************************************************************************** + * + * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef __VMWGFX_DRM_H__ +#define __VMWGFX_DRM_H__ + +#define DRM_VMW_MAX_SURFACE_FACES 6 +#define DRM_VMW_MAX_MIP_LEVELS 24 + +#define DRM_VMW_EXT_NAME_LEN 128 + +#define DRM_VMW_GET_PARAM 0 +#define DRM_VMW_ALLOC_DMABUF 1 +#define DRM_VMW_UNREF_DMABUF 2 +#define DRM_VMW_CURSOR_BYPASS 3 +/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ +#define DRM_VMW_CONTROL_STREAM 4 +#define DRM_VMW_CLAIM_STREAM 5 +#define DRM_VMW_UNREF_STREAM 6 +/* guarded by DRM_VMW_PARAM_3D == 1 */ +#define DRM_VMW_CREATE_CONTEXT 7 +#define DRM_VMW_UNREF_CONTEXT 8 +#define DRM_VMW_CREATE_SURFACE 9 +#define DRM_VMW_UNREF_SURFACE 10 +#define DRM_VMW_REF_SURFACE 11 +#define DRM_VMW_EXECBUF 12 +#define DRM_VMW_FIFO_DEBUG 13 +#define DRM_VMW_FENCE_WAIT 14 + + +/*************************************************************************/ +/** + * DRM_VMW_GET_PARAM - get device information. + * + * DRM_VMW_PARAM_FIFO_OFFSET: + * Offset to use to map the first page of the FIFO read-only. + * The fifo is mapped using the mmap() system call on the drm device. + * + * DRM_VMW_PARAM_OVERLAY_IOCTL: + * Does the driver support the overlay ioctl. + */ + +#define DRM_VMW_PARAM_NUM_STREAMS 0 +#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 +#define DRM_VMW_PARAM_3D 2 +#define DRM_VMW_PARAM_FIFO_OFFSET 3 + + +/** + * struct drm_vmw_getparam_arg + * + * @value: Returned value. //Out + * @param: Parameter to query. //In. + * + * Argument to the DRM_VMW_GET_PARAM Ioctl. + */ + +struct drm_vmw_getparam_arg { + uint64_t value; + uint32_t param; + uint32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_EXTENSION - Query device extensions. + */ + +/** + * struct drm_vmw_extension_rep + * + * @exists: The queried extension exists. + * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension. + * @driver_sarea_offset: Offset to any space in the DRI SAREA + * used by the extension. + * @major: Major version number of the extension. + * @minor: Minor version number of the extension. + * @pl: Patch level version number of the extension. + * + * Output argument to the DRM_VMW_EXTENSION Ioctl. + */ + +struct drm_vmw_extension_rep { + int32_t exists; + uint32_t driver_ioctl_offset; + uint32_t driver_sarea_offset; + uint32_t major; + uint32_t minor; + uint32_t pl; + uint32_t pad64; +}; + +/** + * union drm_vmw_extension_arg + * + * @extension - Ascii name of the extension to be queried. //In + * @rep - Reply as defined above. //Out + * + * Argument to the DRM_VMW_EXTENSION Ioctl. + */ + +union drm_vmw_extension_arg { + char extension[DRM_VMW_EXT_NAME_LEN]; + struct drm_vmw_extension_rep rep; +}; + +/*************************************************************************/ +/** + * DRM_VMW_CREATE_CONTEXT - Create a host context. + * + * Allocates a device unique context id, and queues a create context command + * for the host. Does not wait for host completion. + */ + +/** + * struct drm_vmw_context_arg + * + * @cid: Device unique context ID. + * + * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. + * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. + */ + +struct drm_vmw_context_arg { + int32_t cid; + uint32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_UNREF_CONTEXT - Create a host context. + * + * Frees a global context id, and queues a destroy host command for the host. + * Does not wait for host completion. The context ID can be used directly + * in the command stream and shows up as the same context ID on the host. + */ + +/*************************************************************************/ +/** + * DRM_VMW_CREATE_SURFACE - Create a host suface. + * + * Allocates a device unique surface id, and queues a create surface command + * for the host. Does not wait for host completion. The surface ID can be + * used directly in the command stream and shows up as the same surface + * ID on the host. + */ + +/** + * struct drm_wmv_surface_create_req + * + * @flags: Surface flags as understood by the host. + * @format: Surface format as understood by the host. + * @mip_levels: Number of mip levels for each face. + * An unused face should have 0 encoded. + * @size_addr: Address of a user-space array of sruct drm_vmw_size + * cast to an uint64_t for 32-64 bit compatibility. + * The size of the array should equal the total number of mipmap levels. + * @shareable: Boolean whether other clients (as identified by file descriptors) + * may reference this surface. + * + * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. + * Output data from the DRM_VMW_REF_SURFACE Ioctl. + */ + +struct drm_vmw_surface_create_req { + uint32_t flags; + uint32_t format; + uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; + uint64_t size_addr; + int32_t shareable; + uint32_t pad64; +}; + +/** + * struct drm_wmv_surface_arg + * + * @sid: Surface id of created surface or surface to destroy or reference. + * + * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. + * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. + * Input argument to the DRM_VMW_REF_SURFACE Ioctl. + */ + +struct drm_vmw_surface_arg { + int32_t sid; + uint32_t pad64; +}; + +/** + * struct drm_vmw_size ioctl. + * + * @width - mip level width + * @height - mip level height + * @depth - mip level depth + * + * Description of a mip level. + * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. + */ + +struct drm_vmw_size { + uint32_t width; + uint32_t height; + uint32_t depth; + uint32_t pad64; +}; + +/** + * union drm_vmw_surface_create_arg + * + * @rep: Output data as described above. + * @req: Input data as described above. + * + * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. + */ + +union drm_vmw_surface_create_arg { + struct drm_vmw_surface_arg rep; + struct drm_vmw_surface_create_req req; +}; + +/*************************************************************************/ +/** + * DRM_VMW_REF_SURFACE - Reference a host surface. + * + * Puts a reference on a host surface with a give sid, as previously + * returned by the DRM_VMW_CREATE_SURFACE ioctl. + * A reference will make sure the surface isn't destroyed while we hold + * it and will allow the calling client to use the surface ID in the command + * stream. + * + * On successful return, the Ioctl returns the surface information given + * in the DRM_VMW_CREATE_SURFACE ioctl. + */ + +/** + * union drm_vmw_surface_reference_arg + * + * @rep: Output data as described above. + * @req: Input data as described above. + * + * Argument to the DRM_VMW_REF_SURFACE Ioctl. + */ + +union drm_vmw_surface_reference_arg { + struct drm_vmw_surface_create_req rep; + struct drm_vmw_surface_arg req; +}; + +/*************************************************************************/ +/** + * DRM_VMW_UNREF_SURFACE - Unreference a host surface. + * + * Clear a reference previously put on a host surface. + * When all references are gone, including the one implicitly placed + * on creation, + * a destroy surface command will be queued for the host. + * Does not wait for completion. + */ + +/*************************************************************************/ +/** + * DRM_VMW_EXECBUF + * + * Submit a command buffer for execution on the host, and return a + * fence sequence that when signaled, indicates that the command buffer has + * executed. + */ + +/** + * struct drm_vmw_execbuf_arg + * + * @commands: User-space address of a command buffer cast to an uint64_t. + * @command-size: Size in bytes of the command buffer. + * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an + * uint64_t. + * + * Argument to the DRM_VMW_EXECBUF Ioctl. + */ + +struct drm_vmw_execbuf_arg { + uint64_t commands; + uint32_t command_size; + uint32_t pad64; + uint64_t fence_rep; +}; + +/** + * struct drm_vmw_fence_rep + * + * @fence_seq: Fence sequence associated with a command submission. + * @error: This member should've been set to -EFAULT on submission. + * The following actions should be take on completion: + * error == -EFAULT: Fence communication failed. The host is synchronized. + * Use the last fence id read from the FIFO fence register. + * error != 0 && error != -EFAULT: + * Fence submission failed. The host is synchronized. Use the fence_seq member. + * error == 0: All is OK, The host may not be synchronized. + * Use the fence_seq member. + * + * Input / Output data to the DRM_VMW_EXECBUF Ioctl. + */ + +struct drm_vmw_fence_rep { + uint64_t fence_seq; + int32_t error; + uint32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_ALLOC_DMABUF + * + * Allocate a DMA buffer that is visible also to the host. + * NOTE: The buffer is + * identified by a handle and an offset, which are private to the guest, but + * useable in the command stream. The guest kernel may translate these + * and patch up the command stream accordingly. In the future, the offset may + * be zero at all times, or it may disappear from the interface before it is + * fixed. + * + * The DMA buffer may stay user-space mapped in the guest at all times, + * and is thus suitable for sub-allocation. + * + * DMA buffers are mapped using the mmap() syscall on the drm device. + */ + +/** + * struct drm_vmw_alloc_dmabuf_req + * + * @size: Required minimum size of the buffer. + * + * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. + */ + +struct drm_vmw_alloc_dmabuf_req { + uint32_t size; + uint32_t pad64; +}; + +/** + * struct drm_vmw_dmabuf_rep + * + * @map_handle: Offset to use in the mmap() call used to map the buffer. + * @handle: Handle unique to this buffer. Used for unreferencing. + * @cur_gmr_id: GMR id to use in the command stream when this buffer is + * referenced. See not above. + * @cur_gmr_offset: Offset to use in the command stream when this buffer is + * referenced. See note above. + * + * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. + */ + +struct drm_vmw_dmabuf_rep { + uint64_t map_handle; + uint32_t handle; + uint32_t cur_gmr_id; + uint32_t cur_gmr_offset; + uint32_t pad64; +}; + +/** + * union drm_vmw_dmabuf_arg + * + * @req: Input data as described above. + * @rep: Output data as described above. + * + * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. + */ + +union drm_vmw_alloc_dmabuf_arg { + struct drm_vmw_alloc_dmabuf_req req; + struct drm_vmw_dmabuf_rep rep; +}; + +/*************************************************************************/ +/** + * DRM_VMW_UNREF_DMABUF - Free a DMA buffer. + * + */ + +/** + * struct drm_vmw_unref_dmabuf_arg + * + * @handle: Handle indicating what buffer to free. Obtained from the + * DRM_VMW_ALLOC_DMABUF Ioctl. + * + * Argument to the DRM_VMW_UNREF_DMABUF Ioctl. + */ + +struct drm_vmw_unref_dmabuf_arg { + uint32_t handle; + uint32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_FIFO_DEBUG - Get last FIFO submission. + * + * This IOCTL copies the last FIFO submission directly out of the FIFO buffer. + */ + +/** + * struct drm_vmw_fifo_debug_arg + * + * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In + * @debug_buffer_size: Size in bytes of debug buffer //In + * @used_size: Number of bytes copied to the buffer // Out + * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out + * + * Argument to the DRM_VMW_FIFO_DEBUG Ioctl. + */ + +struct drm_vmw_fifo_debug_arg { + uint64_t debug_buffer; + uint32_t debug_buffer_size; + uint32_t used_size; + int32_t did_not_fit; + uint32_t pad64; +}; + +struct drm_vmw_fence_wait_arg { + uint64_t sequence; + uint64_t kernel_cookie; + int32_t cookie_valid; + int32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. + * + * This IOCTL controls the overlay units of the svga device. + * The SVGA overlay units does not work like regular hardware units in + * that they do not automaticaly read back the contents of the given dma + * buffer. But instead only read back for each call to this ioctl, and + * at any point between this call being made and a following call that + * either changes the buffer or disables the stream. + */ + +/** + * struct drm_vmw_rect + * + * Defines a rectangle. Used in the overlay ioctl to define + * source and destination rectangle. + */ + +struct drm_vmw_rect { + int32_t x; + int32_t y; + uint32_t w; + uint32_t h; +}; + +/** + * struct drm_vmw_control_stream_arg + * + * @stream_id: Stearm to control + * @enabled: If false all following arguments are ignored. + * @handle: Handle to buffer for getting data from. + * @format: Format of the overlay as understood by the host. + * @width: Width of the overlay. + * @height: Height of the overlay. + * @size: Size of the overlay in bytes. + * @pitch: Array of pitches, the two last are only used for YUV12 formats. + * @offset: Offset from start of dma buffer to overlay. + * @src: Source rect, must be within the defined area above. + * @dst: Destination rect, x and y may be negative. + * + * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. + */ + +struct drm_vmw_control_stream_arg { + uint32_t stream_id; + uint32_t enabled; + + uint32_t flags; + uint32_t color_key; + + uint32_t handle; + uint32_t offset; + int32_t format; + uint32_t size; + uint32_t width; + uint32_t height; + uint32_t pitch[3]; + + uint32_t pad64; + struct drm_vmw_rect src; + struct drm_vmw_rect dst; +}; + +/*************************************************************************/ +/** + * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. + * + */ + +#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) +#define DRM_VMW_CURSOR_BYPASS_FLAGS (1) + +/** + * struct drm_vmw_cursor_bypass_arg + * + * @flags: Flags. + * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. + * @xpos: X position of cursor. + * @ypos: Y position of cursor. + * @xhot: X hotspot. + * @yhot: Y hotspot. + * + * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. + */ + +struct drm_vmw_cursor_bypass_arg { + uint32_t flags; + uint32_t crtc_id; + int32_t xpos; + int32_t ypos; + int32_t xhot; + int32_t yhot; +}; + +/*************************************************************************/ +/** + * DRM_VMW_CLAIM_STREAM - Claim a single stream. + */ + +/** + * struct drm_vmw_context_arg + * + * @stream_id: Device unique context ID. + * + * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. + * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. + */ + +struct drm_vmw_stream_arg { + uint32_t stream_id; + uint32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_UNREF_STREAM - Unclaim a stream. + * + * Return a single stream that was claimed by this process. Also makes + * sure that the stream has been stopped. + */ + +#endif -- cgit v1.2.3 From ef12f10994281e2e44526fa0abf23fdd7d5bd87f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 7 Nov 2009 23:04:15 +0100 Subject: locking: Split rwlock from spinlock headers Move the rwlock defines and inlines into separate header files. This makes the selection for -rt easier. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/rwlock.h | 125 +++++++++++++++++++++++++++++++++++++++++ include/linux/rwlock_types.h | 56 ++++++++++++++++++ include/linux/spinlock.h | 100 +++------------------------------ include/linux/spinlock_types.h | 43 ++------------ 4 files changed, 195 insertions(+), 129 deletions(-) create mode 100644 include/linux/rwlock.h create mode 100644 include/linux/rwlock_types.h (limited to 'include') diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h new file mode 100644 index 000000000000..73785b0bd6b9 --- /dev/null +++ b/include/linux/rwlock.h @@ -0,0 +1,125 @@ +#ifndef __LINUX_RWLOCK_H +#define __LINUX_RWLOCK_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * rwlock related methods + * + * split out from spinlock.h + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +# define rwlock_init(lock) \ + do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void _raw_read_lock(rwlock_t *lock); +#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) + extern int _raw_read_trylock(rwlock_t *lock); + extern void _raw_read_unlock(rwlock_t *lock); + extern void _raw_write_lock(rwlock_t *lock); +#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) + extern int _raw_write_trylock(rwlock_t *lock); + extern void _raw_write_unlock(rwlock_t *lock); +#else +# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) +# define _raw_read_lock_flags(lock, flags) \ + __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) +# define _raw_write_lock_flags(lock, flags) \ + __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) +#endif + +#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) + +/* + * Define the various rw_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various + * methods are defined as nops in the case they are not required. + */ +#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) + +#define write_lock(lock) _write_lock(lock) +#define read_lock(lock) _read_lock(lock) + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _read_lock_irqsave(lock); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _write_lock_irqsave(lock); \ + } while (0) + +#else + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_lock_irqsave(lock, flags); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_lock_irqsave(lock, flags); \ + } while (0) + +#endif + +#define read_lock_irq(lock) _read_lock_irq(lock) +#define read_lock_bh(lock) _read_lock_bh(lock) +#define write_lock_irq(lock) _write_lock_irq(lock) +#define write_lock_bh(lock) _write_lock_bh(lock) +#define read_unlock(lock) _read_unlock(lock) +#define write_unlock(lock) _write_unlock(lock) +#define read_unlock_irq(lock) _read_unlock_irq(lock) +#define write_unlock_irq(lock) _write_unlock_irq(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_unlock_irqrestore(lock, flags); \ + } while (0) +#define read_unlock_bh(lock) _read_unlock_bh(lock) + +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_unlock_irqrestore(lock, flags); \ + } while (0) +#define write_unlock_bh(lock) _write_unlock_bh(lock) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + write_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + +#endif /* __LINUX_RWLOCK_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h new file mode 100644 index 000000000000..f8c935206a41 --- /dev/null +++ b/include/linux/rwlock_types.h @@ -0,0 +1,56 @@ +#ifndef __LINUX_RWLOCK_TYPES_H +#define __LINUX_RWLOCK_TYPES_H + +/* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ +typedef struct { + raw_rwlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define RWLOCK_MAGIC 0xdeaf1eed + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + .magic = RWLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1, \ + RW_DEP_MAP_INIT(lockname) } +#else +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + RW_DEP_MAP_INIT(lockname) } +#endif + +/* + * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence + * deprecated. + * + * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate. + */ +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) + +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) + +#endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 71dccfeb0d88..a9aaa709fb93 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -103,20 +103,6 @@ do { \ do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK - extern void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key); -# define rwlock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __rwlock_init((lock), #lock, &__key); \ -} while (0) -#else -# define rwlock_init(lock) \ - do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) -#endif - #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) #ifdef CONFIG_GENERIC_LOCKBREAK @@ -146,43 +132,21 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); - extern void _raw_read_lock(rwlock_t *lock); -#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) - extern int _raw_read_trylock(rwlock_t *lock); - extern void _raw_read_unlock(rwlock_t *lock); - extern void _raw_write_lock(rwlock_t *lock); -#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) - extern int _raw_write_trylock(rwlock_t *lock); - extern void _raw_write_unlock(rwlock_t *lock); #else # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) -# define _raw_read_lock_flags(lock, flags) \ - __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) -# define _raw_write_lock_flags(lock, flags) \ - __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif -#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) -#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) - /* - * Define the various spin_lock and rw_lock methods. Note we define these - * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various - * methods are defined as nops in the case they are not required. + * Define the various spin_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The + * various methods are defined as nops in the case they are not + * required. */ #define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) -#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) -#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) #define spin_lock(lock) _spin_lock(lock) @@ -198,9 +162,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif -#define write_lock(lock) _write_lock(lock) -#define read_lock(lock) _read_lock(lock) - #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define spin_lock_irqsave(lock, flags) \ @@ -208,16 +169,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } typecheck(unsigned long, flags); \ flags = _spin_lock_irqsave(lock); \ } while (0) -#define read_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _read_lock_irqsave(lock); \ - } while (0) -#define write_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _write_lock_irqsave(lock); \ - } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC #define spin_lock_irqsave_nested(lock, flags, subclass) \ @@ -240,16 +191,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } typecheck(unsigned long, flags); \ _spin_lock_irqsave(lock, flags); \ } while (0) -#define read_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _read_lock_irqsave(lock, flags); \ - } while (0) -#define write_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _write_lock_irqsave(lock, flags); \ - } while (0) + #define spin_lock_irqsave_nested(lock, flags, subclass) \ spin_lock_irqsave(lock, flags) @@ -257,16 +199,8 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } #define spin_lock_irq(lock) _spin_lock_irq(lock) #define spin_lock_bh(lock) _spin_lock_bh(lock) -#define read_lock_irq(lock) _read_lock_irq(lock) -#define read_lock_bh(lock) _read_lock_bh(lock) -#define write_lock_irq(lock) _write_lock_irq(lock) -#define write_lock_bh(lock) _write_lock_bh(lock) #define spin_unlock(lock) _spin_unlock(lock) -#define read_unlock(lock) _read_unlock(lock) -#define write_unlock(lock) _write_unlock(lock) #define spin_unlock_irq(lock) _spin_unlock_irq(lock) -#define read_unlock_irq(lock) _read_unlock_irq(lock) -#define write_unlock_irq(lock) _write_unlock_irq(lock) #define spin_unlock_irqrestore(lock, flags) \ do { \ @@ -275,20 +209,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } } while (0) #define spin_unlock_bh(lock) _spin_unlock_bh(lock) -#define read_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _read_unlock_irqrestore(lock, flags); \ - } while (0) -#define read_unlock_bh(lock) _read_unlock_bh(lock) - -#define write_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _write_unlock_irqrestore(lock, flags); \ - } while (0) -#define write_unlock_bh(lock) _write_unlock_bh(lock) - #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) #define spin_trylock_irq(lock) \ @@ -305,13 +225,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } 1 : ({ local_irq_restore(flags); 0; }); \ }) -#define write_trylock_irqsave(lock, flags) \ -({ \ - local_irq_save(flags); \ - write_trylock(lock) ? \ - 1 : ({ local_irq_restore(flags); 0; }); \ -}) - /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) @@ -335,6 +248,9 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); */ #define spin_can_lock(lock) (!spin_is_locked(lock)) +/* Include rwlock functions */ +#include + /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 68d88f71f1a2..f979d5d8a160 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -33,22 +33,6 @@ typedef struct { #define SPINLOCK_MAGIC 0xdead4ead -typedef struct { - raw_rwlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK - unsigned int break_lock; -#endif -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} rwlock_t; - -#define RWLOCK_MAGIC 0xdeaf1eed - #define SPINLOCK_OWNER_INIT ((void *)-1L) #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -57,12 +41,6 @@ typedef struct { # define SPIN_DEP_MAP_INIT(lockname) #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -#else -# define RW_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK # define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ @@ -70,31 +48,22 @@ typedef struct { .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ - .magic = RWLOCK_MAGIC, \ - .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1, \ - RW_DEP_MAP_INIT(lockname) } #else # define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ - RW_DEP_MAP_INIT(lockname) } #endif /* - * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and - * are hence deprecated. - * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or - * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. + * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence + * deprecated. + * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as + * appropriate. */ #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) -#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) + +#include #endif /* __LINUX_SPINLOCK_TYPES_H */ -- cgit v1.2.3 From 6b6b4792f89346e47437682c7ba3438e6681c0f9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 16 Nov 2009 18:48:37 +0100 Subject: locking: Separate rwlock api from spinlock api Move the rwlock smp api defines and functions into a separate header file. Makes the -rt selection simpler and less intrusive. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/rwlock_api_smp.h | 277 +++++++++++++++++++++++++++++++++++++++ include/linux/spinlock_api_smp.h | 252 +---------------------------------- 2 files changed, 280 insertions(+), 249 deletions(-) create mode 100644 include/linux/rwlock_api_smp.h (limited to 'include') diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h new file mode 100644 index 000000000000..090f876f828d --- /dev/null +++ b/include/linux/rwlock_api_smp.h @@ -0,0 +1,277 @@ +#ifndef __LINUX_RWLOCK_API_SMP_H +#define __LINUX_RWLOCK_API_SMP_H + +#ifndef __LINUX_SPINLOCK_API_SMP_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/rwlock_api_smp.h + * + * spinlock API declarations on SMP (and debug) + * (implemented in kernel/spinlock.c) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); +void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); +unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) + __acquires(lock); +unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) + __acquires(lock); +int __lockfunc _read_trylock(rwlock_t *lock); +int __lockfunc _write_trylock(rwlock_t *lock); +void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); +void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); + +#ifdef CONFIG_INLINE_READ_LOCK +#define _read_lock(lock) __read_lock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK +#define _write_lock(lock) __write_lock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_BH +#define _read_lock_bh(lock) __read_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_BH +#define _write_lock_bh(lock) __write_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQ +#define _read_lock_irq(lock) __read_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ +#define _write_lock_irq(lock) __write_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE +#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE +#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_READ_TRYLOCK +#define _read_trylock(lock) __read_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_TRYLOCK +#define _write_trylock(lock) __write_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK +#define _read_unlock(lock) __read_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK +#define _write_unlock(lock) __write_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_BH +#define _read_unlock_bh(lock) __read_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH +#define _write_unlock_bh(lock) __write_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ +#define _read_unlock_irq(lock) __read_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ +#define _write_unlock_irq(lock) __write_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE +#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE +#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) +#endif + +static inline int __read_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (_raw_read_trylock(lock)) { + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +static inline int __write_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (_raw_write_trylock(lock)) { + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline void __read_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline unsigned long __read_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, + _raw_read_lock_flags, &flags); + return flags; +} + +static inline void __read_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline void __read_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline unsigned long __write_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, + _raw_write_lock_flags, &flags); + return flags; +} + +static inline void __write_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +static inline void __write_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +static inline void __write_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +#endif /* CONFIG_PREEMPT */ + +static inline void __write_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + preempt_enable(); +} + +static inline void __read_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + preempt_enable(); +} + +static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __read_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __read_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline void __write_unlock_irqrestore(rwlock_t *lock, + unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __write_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __write_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +#endif /* __LINUX_RWLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8264a7f459bc..a2b2c9df91de 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -24,102 +24,41 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) __acquires(lock); -void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); + unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(lock); unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) __acquires(lock); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) - __acquires(lock); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) - __acquires(lock); int __lockfunc _spin_trylock(spinlock_t *lock); -int __lockfunc _read_trylock(rwlock_t *lock); -int __lockfunc _write_trylock(rwlock_t *lock); int __lockfunc _spin_trylock_bh(spinlock_t *lock); void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(lock); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); #ifdef CONFIG_INLINE_SPIN_LOCK #define _spin_lock(lock) __spin_lock(lock) #endif -#ifdef CONFIG_INLINE_READ_LOCK -#define _read_lock(lock) __read_lock(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK -#define _write_lock(lock) __write_lock(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_LOCK_BH #define _spin_lock_bh(lock) __spin_lock_bh(lock) #endif -#ifdef CONFIG_INLINE_READ_LOCK_BH -#define _read_lock_bh(lock) __read_lock_bh(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK_BH -#define _write_lock_bh(lock) __write_lock_bh(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ #define _spin_lock_irq(lock) __spin_lock_irq(lock) #endif -#ifdef CONFIG_INLINE_READ_LOCK_IRQ -#define _read_lock_irq(lock) __read_lock_irq(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ -#define _write_lock_irq(lock) __write_lock_irq(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) #endif -#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE -#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE -#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_TRYLOCK #define _spin_trylock(lock) __spin_trylock(lock) #endif -#ifdef CONFIG_INLINE_READ_TRYLOCK -#define _read_trylock(lock) __read_trylock(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_TRYLOCK -#define _write_trylock(lock) __write_trylock(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) #endif @@ -128,50 +67,18 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) #define _spin_unlock(lock) __spin_unlock(lock) #endif -#ifdef CONFIG_INLINE_READ_UNLOCK -#define _read_unlock(lock) __read_unlock(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK -#define _write_unlock(lock) __write_unlock(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) #endif -#ifdef CONFIG_INLINE_READ_UNLOCK_BH -#define _read_unlock_bh(lock) __read_unlock_bh(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH -#define _write_unlock_bh(lock) __write_unlock_bh(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) #endif -#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ -#define _read_unlock_irq(lock) __read_unlock_irq(lock) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ -#define _write_unlock_irq(lock) __write_unlock_irq(lock) -#endif - #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) #endif -#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE -#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) -#endif - -#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE -#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) -#endif - static inline int __spin_trylock(spinlock_t *lock) { preempt_disable(); @@ -183,28 +90,6 @@ static inline int __spin_trylock(spinlock_t *lock) return 0; } -static inline int __read_trylock(rwlock_t *lock) -{ - preempt_disable(); - if (_raw_read_trylock(lock)) { - rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); - return 1; - } - preempt_enable(); - return 0; -} - -static inline int __write_trylock(rwlock_t *lock) -{ - preempt_disable(); - if (_raw_write_trylock(lock)) { - rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); - return 1; - } - preempt_enable(); - return 0; -} - /* * If lockdep is enabled then we use the non-preemption spin-ops * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are @@ -212,13 +97,6 @@ static inline int __write_trylock(rwlock_t *lock) */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) -static inline void __read_lock(rwlock_t *lock) -{ - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); -} - static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) { unsigned long flags; @@ -255,62 +133,6 @@ static inline void __spin_lock_bh(spinlock_t *lock) LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -static inline unsigned long __read_lock_irqsave(rwlock_t *lock) -{ - unsigned long flags; - - local_irq_save(flags); - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, - _raw_read_lock_flags, &flags); - return flags; -} - -static inline void __read_lock_irq(rwlock_t *lock) -{ - local_irq_disable(); - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); -} - -static inline void __read_lock_bh(rwlock_t *lock) -{ - local_bh_disable(); - preempt_disable(); - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); -} - -static inline unsigned long __write_lock_irqsave(rwlock_t *lock) -{ - unsigned long flags; - - local_irq_save(flags); - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, - _raw_write_lock_flags, &flags); - return flags; -} - -static inline void __write_lock_irq(rwlock_t *lock) -{ - local_irq_disable(); - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); -} - -static inline void __write_lock_bh(rwlock_t *lock) -{ - local_bh_disable(); - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); -} - static inline void __spin_lock(spinlock_t *lock) { preempt_disable(); @@ -318,13 +140,6 @@ static inline void __spin_lock(spinlock_t *lock) LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -static inline void __write_lock(rwlock_t *lock) -{ - preempt_disable(); - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); -} - #endif /* CONFIG_PREEMPT */ static inline void __spin_unlock(spinlock_t *lock) @@ -334,20 +149,6 @@ static inline void __spin_unlock(spinlock_t *lock) preempt_enable(); } -static inline void __write_unlock(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - preempt_enable(); -} - -static inline void __read_unlock(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); - preempt_enable(); -} - static inline void __spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { @@ -373,55 +174,6 @@ static inline void __spin_unlock_bh(spinlock_t *lock) local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); - local_irq_restore(flags); - preempt_enable(); -} - -static inline void __read_unlock_irq(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); - local_irq_enable(); - preempt_enable(); -} - -static inline void __read_unlock_bh(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); -} - -static inline void __write_unlock_irqrestore(rwlock_t *lock, - unsigned long flags) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - local_irq_restore(flags); - preempt_enable(); -} - -static inline void __write_unlock_irq(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - local_irq_enable(); - preempt_enable(); -} - -static inline void __write_unlock_bh(rwlock_t *lock) -{ - rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); - preempt_enable_no_resched(); - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); -} - static inline int __spin_trylock_bh(spinlock_t *lock) { local_bh_disable(); @@ -435,4 +187,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock) return 0; } +#include + #endif /* __LINUX_SPINLOCK_API_SMP_H */ -- cgit v1.2.3 From 445c89514be242b1b0080056d50bdc1b72adeb5c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 19:49:50 +0100 Subject: locking: Convert raw_spinlock to arch_spinlock The raw_spin* namespace was taken by lockdep for the architecture specific implementations. raw_spin_* would be the ideal name space for the spinlocks which are not converted to sleeping locks in preempt-rt. Linus suggested to convert the raw_ to arch_ locks and cleanup the name space instead of using an artifical name like core_spin, atomic_spin or whatever No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 6 +++--- arch/alpha/include/asm/spinlock_types.h | 2 +- arch/arm/include/asm/spinlock.h | 6 +++--- arch/arm/include/asm/spinlock_types.h | 2 +- arch/blackfin/include/asm/spinlock.h | 10 +++++----- arch/blackfin/include/asm/spinlock_types.h | 2 +- arch/cris/include/arch-v32/arch/spinlock.h | 12 ++++++------ arch/ia64/include/asm/spinlock.h | 26 +++++++++++++------------- arch/ia64/include/asm/spinlock_types.h | 2 +- arch/m32r/include/asm/spinlock.h | 6 +++--- arch/m32r/include/asm/spinlock_types.h | 2 +- arch/mips/include/asm/spinlock.h | 10 +++++----- arch/mips/include/asm/spinlock_types.h | 2 +- arch/parisc/include/asm/atomic.h | 6 +++--- arch/parisc/include/asm/spinlock.h | 8 ++++---- arch/parisc/include/asm/spinlock_types.h | 4 ++-- arch/parisc/lib/bitops.c | 2 +- arch/powerpc/include/asm/rtas.h | 2 +- arch/powerpc/include/asm/spinlock.h | 14 +++++++------- arch/powerpc/include/asm/spinlock_types.h | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/lib/locks.c | 4 ++-- arch/powerpc/platforms/pasemi/setup.c | 2 +- arch/s390/include/asm/spinlock.h | 16 ++++++++-------- arch/s390/include/asm/spinlock_types.h | 2 +- arch/s390/lib/spinlock.c | 8 ++++---- arch/sh/include/asm/spinlock.h | 6 +++--- arch/sh/include/asm/spinlock_types.h | 2 +- arch/sparc/include/asm/spinlock_32.h | 6 +++--- arch/sparc/include/asm/spinlock_64.h | 8 ++++---- arch/sparc/include/asm/spinlock_types.h | 2 +- arch/x86/include/asm/paravirt.h | 12 ++++++------ arch/x86/include/asm/paravirt_types.h | 14 +++++++------- arch/x86/include/asm/spinlock.h | 30 +++++++++++++++--------------- arch/x86/include/asm/spinlock_types.h | 4 ++-- arch/x86/kernel/dumpstack.c | 2 +- arch/x86/kernel/paravirt-spinlocks.c | 2 +- arch/x86/kernel/tsc_sync.c | 2 +- arch/x86/xen/spinlock.c | 16 ++++++++-------- include/asm-generic/bitops/atomic.h | 6 +++--- include/linux/spinlock.h | 4 ++-- include/linux/spinlock_types.h | 2 +- include/linux/spinlock_types_up.h | 4 ++-- include/linux/spinlock_up.h | 8 ++++---- kernel/lockdep.c | 2 +- kernel/trace/ring_buffer.c | 4 ++-- kernel/trace/trace.c | 18 +++++++++--------- kernel/trace/trace_clock.c | 4 ++-- kernel/trace/trace_sched_wakeup.c | 4 ++-- kernel/trace/trace_stack.c | 4 ++-- lib/spinlock_debug.c | 2 +- 51 files changed, 164 insertions(+), 164 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e38fb95cb335..bdb26a1940b4 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -17,13 +17,13 @@ #define __raw_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) -static inline void __raw_spin_unlock(raw_spinlock_t * lock) +static inline void __raw_spin_unlock(arch_spinlock_t * lock) { mb(); lock->lock = 0; } -static inline void __raw_spin_lock(raw_spinlock_t * lock) +static inline void __raw_spin_lock(arch_spinlock_t * lock) { long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock) : "m"(lock->lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { return !test_and_set_bit(0, &lock->lock); } diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 8141eb5ebf0d..bb94a51e53d2 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c13681ac1ede..4e7712ee9394 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -23,7 +23,7 @@ #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { smp_mb(); diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 43e83f6d2ee5..5e9d3eadd167 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index b0c7f0ee4b03..fc16b4c5309b 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr); asmlinkage int __raw_write_trylock_asm(volatile int *ptr); asmlinkage void __raw_write_unlock_asm(volatile int *ptr); -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { return __raw_spin_is_locked_asm(&lock->lock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { __raw_spin_lock_asm(&lock->lock); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { return __raw_spin_trylock_asm(&lock->lock); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __raw_spin_unlock_asm(&lock->lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index be75762c0610..03b377abf5c0 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -15,7 +15,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 367a53ea10c5..e253457765f2 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); extern void cris_spin_lock(void *l); extern int cris_spin_trylock(void *l); -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int __raw_spin_is_locked(arch_spinlock_t *x) { return *(volatile signed char *)(&(x)->slock) <= 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __asm__ volatile ("move.d %1,%0" \ : "=m" (lock->slock) \ @@ -22,24 +22,24 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { return cris_spin_trylock((void *)&lock->slock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { cris_spin_lock((void *)&lock->slock); } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); } diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516d..9fbdf7e61087 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -38,7 +38,7 @@ #define TICKET_BITS 15 #define TICKET_MASK ((1 << TICKET_BITS) - 1) -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket, serve; @@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) } } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->lock); @@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return 0; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; @@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) ACCESS_ONCE(*p) = (tmp + 2) & ~1; } -static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket; @@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) } } -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4a..447ccc6ca7a8 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923883b2..0c0164225bc0 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -36,7 +36,7 @@ * __raw_spin_trylock() tries to get the lock and returns a result. * On the m32r, the result value is 1 (= Success) or 0 (= Failure). */ -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { int oldval; unsigned long tmp1, tmp2; @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (oldval > 0); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp0, tmp1; @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { mb(); lock->slock = 1; diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 83f52105c0e4..17d15bd6322d 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5b60a09a0f08..0f16d0673b4a 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -34,7 +34,7 @@ * becomes equal to the the initial value of the tail. */ -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); @@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock) #define __raw_spin_unlock_wait(x) \ while (__raw_spin_is_locked(x)) { cpu_relax(); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); @@ -53,7 +53,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) } #define __raw_spin_is_contended __raw_spin_is_contended -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { int my_ticket; int tmp; @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_llsc_mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { int tmp; @@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) } } -static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) +static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) { int tmp, tmp2, tmp3; diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index adeedaa116c1..2e1060892d3b 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -12,7 +12,7 @@ typedef struct { * bits 15..28: ticket */ unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 8bc9e96699b2..3a4ea778d4b6 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -27,18 +27,18 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ __raw_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ __raw_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fae03e136fa8..69e8dca26744 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -5,7 +5,7 @@ #include #include -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int __raw_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); return *a == 0; @@ -15,7 +15,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x) #define __raw_spin_unlock_wait(x) \ do { cpu_relax(); } while (__raw_spin_is_locked(x)) -static inline void __raw_spin_lock_flags(raw_spinlock_t *x, +static inline void __raw_spin_lock_flags(arch_spinlock_t *x, unsigned long flags) { volatile unsigned int *a; @@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x, mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *x) +static inline void __raw_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; mb(); @@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x) mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *x) +static inline int __raw_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; int ret; diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 3f72f47cf4b2..735caafb81f5 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -9,10 +9,10 @@ typedef struct { volatile unsigned int lock[4]; # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } #endif -} raw_spinlock_t; +} arch_spinlock_t; typedef struct { - raw_spinlock_t lock; + arch_spinlock_t lock; volatile int counter; } raw_rwlock_t; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index e3eb739fab19..fdd7f583de54 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -12,7 +12,7 @@ #include #ifdef CONFIG_SMP -raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { +arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 168fce726201..20de73c36682 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - raw_spinlock_t lock; + arch_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 198266cf9e2d..c0d44c92ff0e 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -54,7 +54,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) +static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, token; @@ -73,7 +73,7 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; return arch_spin_trylock(lock) == 0; @@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_lppaca()->shared_proc) -extern void __spin_yield(raw_spinlock_t *lock); +extern void __spin_yield(arch_spinlock_t *lock); extern void __rw_yield(raw_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() @@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *lock); #define SHARED_PROCESSOR 0 #endif -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { @@ -120,7 +120,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) } static inline -void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; @@ -140,7 +140,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { SYNC_IO; __asm__ __volatile__("# __raw_spin_unlock\n\t" @@ -149,7 +149,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) } #ifdef CONFIG_PPC64 -extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); +extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); #else #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 74236c9f05b1..4312e5baaf88 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index bf90361bb70f..579069c12152 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node, return 1; } -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static u64 timebase = 0; void __cpuinit rtas_give_timebase(void) diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 79d0fa3a470d..b06294cde499 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -25,7 +25,7 @@ #include #include -void __spin_yield(raw_spinlock_t *lock) +void __spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) } #endif -void __raw_spin_unlock_wait(raw_spinlock_t *lock) +void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (lock->slock) { HMT_low(); diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index a4619347aa7e..be36fece41d7 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -71,7 +71,7 @@ static void pas_restart(char *cmd) } #ifdef CONFIG_SMP -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static unsigned long timebase; static void __devinit pas_give_timebase(void) diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..6121fa4b83d9 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock, do { while (__raw_spin_is_locked(lock)) \ _raw_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(raw_spinlock_t *); -extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(raw_spinlock_t *); -extern void _raw_spin_relax(raw_spinlock_t *lock); +extern void _raw_spin_lock_wait(arch_spinlock_t *); +extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int _raw_spin_trylock_retry(arch_spinlock_t *); +extern void _raw_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(raw_spinlock_t *lp) +static inline void __raw_spin_lock(arch_spinlock_t *lp) { int old; @@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp) _raw_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, +static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, _raw_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(raw_spinlock_t *lp) +static inline int __raw_spin_trylock(arch_spinlock_t *lp) { int old; @@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp) return _raw_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(raw_spinlock_t *lp) +static inline void __raw_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 654abc40de04..a93638eee3f7 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int owner_cpu; -} __attribute__ ((aligned (4))) raw_spinlock_t; +} __attribute__ ((aligned (4))) arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f7e0d30250b7..d4cbf71a6077 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(raw_spinlock_t *lp) +void _raw_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp) } EXPORT_SYMBOL(_raw_spin_lock_wait); -void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) +void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) } EXPORT_SYMBOL(_raw_spin_lock_wait_flags); -int _raw_spin_trylock_retry(raw_spinlock_t *lp) +int _raw_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; @@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp) } EXPORT_SYMBOL(_raw_spin_trylock_retry); -void _raw_spin_relax(raw_spinlock_t *lock) +void _raw_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053fd..5a05b3fcefbe 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -34,7 +34,7 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; @@ -54,7 +54,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; @@ -67,7 +67,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e7b60c..37712c32ba99 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630cff636..b2d8a67f727e 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -15,7 +15,7 @@ #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e514783582..38e16c40efc4 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -27,7 +27,7 @@ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01c585b..41d9a8fec13d 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned char lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb38994859c..5655f75f10b7 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int __raw_spin_is_locked(struct raw_spinlock *lock) +static inline int __raw_spin_is_locked(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); } -static inline int __raw_spin_is_contended(struct raw_spinlock *lock) +static inline int __raw_spin_is_contended(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) +static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_lock, lock); } -static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, +static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); } -static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) +static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); } -static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) +static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9357473c8da0..b1e70d51e40c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -318,14 +318,14 @@ struct pv_mmu_ops { phys_addr_t phys, pgprot_t flags); }; -struct raw_spinlock; +struct arch_spinlock; struct pv_lock_ops { - int (*spin_is_locked)(struct raw_spinlock *lock); - int (*spin_is_contended)(struct raw_spinlock *lock); - void (*spin_lock)(struct raw_spinlock *lock); - void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); - int (*spin_trylock)(struct raw_spinlock *lock); - void (*spin_unlock)(struct raw_spinlock *lock); + int (*spin_is_locked)(struct arch_spinlock *lock); + int (*spin_is_contended)(struct arch_spinlock *lock); + void (*spin_lock)(struct arch_spinlock *lock); + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); + int (*spin_trylock)(struct arch_spinlock *lock); + void (*spin_unlock)(struct arch_spinlock *lock); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853321db..204b524fcf57 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -58,7 +58,7 @@ #if (NR_CPUS < 256) #define TICKET_SHIFT 8 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { short inc = 0x0100; @@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp, new; @@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) #else #define TICKET_SHIFT 16 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp; int new; @@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) @@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) } #endif -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); @@ -174,33 +174,33 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) #ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); @@ -208,7 +208,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, #endif /* CONFIG_PARAVIRT_SPINLOCKS */ -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 845f81c87091..2ae7637ed524 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -5,9 +5,9 @@ # error "please don't include this file directly" #endif -typedef struct raw_spinlock { +typedef struct arch_spinlock { unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b8ce165dde5d..0862d9d89c92 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 3a7c5a44082e..a0f39e090684 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -8,7 +8,7 @@ #include static inline void -default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); } diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index eed156851f5d..9f908b9d1abe 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 36a5141108df..24ded31b5aec 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -120,14 +120,14 @@ struct xen_spinlock { unsigned short spinners; /* count of waiting cpus */ }; -static int xen_spin_is_locked(struct raw_spinlock *lock) +static int xen_spin_is_locked(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; return xl->lock != 0; } -static int xen_spin_is_contended(struct raw_spinlock *lock) +static int xen_spin_is_contended(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; @@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock) return xl->spinners != 0; } -static int xen_spin_trylock(struct raw_spinlock *lock) +static int xen_spin_trylock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; u8 old = 1; @@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock __get_cpu_var(lock_spinners) = prev; } -static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) +static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; struct xen_spinlock *prev; @@ -254,7 +254,7 @@ out: return ret; } -static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) +static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; unsigned timeout; @@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) spin_time_accum_total(start_spin); } -static void xen_spin_lock(struct raw_spinlock *lock) +static void xen_spin_lock(struct arch_spinlock *lock) { __xen_spin_lock(lock, false); } -static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) +static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); } @@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) } } -static void xen_spin_unlock(struct raw_spinlock *lock) +static void xen_spin_unlock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index c8946465e63a..dcf0afad4a7f 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -15,18 +15,18 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ __raw_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ __raw_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a9aaa709fb93..5ef7a4c060b5 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -8,7 +8,7 @@ * * on SMP builds: * - * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the + * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: @@ -75,7 +75,7 @@ #define __lockfunc __attribute__((section(".spinlock.text"))) /* - * Pull the raw_spinlock_t and raw_rwlock_t definitions: + * Pull the arch_spinlock_t and raw_rwlock_t definitions: */ #include diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index f979d5d8a160..d4af2d7a86ea 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -18,7 +18,7 @@ #include typedef struct { - raw_spinlock_t raw_lock; + arch_spinlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198e..34d36691c4ec 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -16,13 +16,13 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } #else -typedef struct { } raw_spinlock_t; +typedef struct { } arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { } diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index d4841ed8215b..8ee2ac1bf636 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -20,19 +20,19 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define __raw_spin_is_locked(x) ((x)->slock == 0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { char oldval = lock->slock; @@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return oldval > 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { lock->slock = 1; } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 429540c70d3f..7cc50c62af59 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a1ca4956ab5e..5ac8ee0a9e35 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -423,7 +423,7 @@ struct ring_buffer_per_cpu { int cpu; struct ring_buffer *buffer; spinlock_t reader_lock; /* serialize readers */ - raw_spinlock_t lock; + arch_spinlock_t lock; struct lock_class_key lock_key; struct list_head *pages; struct buffer_page *head_page; /* read from head */ @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->buffer = buffer; spin_lock_init(&cpu_buffer->reader_lock); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); - cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c82dfd92fdfd..7d56cecc2c6e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) * protected by per_cpu spinlocks. But the action of the swap * needs its own lock. * - * This is defined as a raw_spinlock_t in order to help + * This is defined as a arch_spinlock_t in order to help * with performance when lockdep debugging is enabled. * * It is also used in other places outside the update_max_tr * so it needs to be defined outside of the * CONFIG_TRACER_MAX_TRACE. */ -static raw_spinlock_t ftrace_max_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t ftrace_max_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE unsigned long __read_mostly tracing_max_latency; @@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static int cmdline_idx; -static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; /* temporary disable recording */ static atomic_t trace_record_cmdline_disabled __read_mostly; @@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { - static raw_spinlock_t trace_buf_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; @@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_print; @@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s) static void __ftrace_dump(bool disable_tracing) { - static raw_spinlock_t ftrace_dump_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t ftrace_dump_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 878c03f386ba..206ec3d4b3c2 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -71,10 +71,10 @@ u64 notrace trace_clock(void) /* keep prev_time and lock in the same cacheline. */ static struct { u64 prev_time; - raw_spinlock_t lock; + arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { - .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, + .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 26185d727676..4cf7e83ec235 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -28,8 +28,8 @@ static int wakeup_current_cpu; static unsigned wakeup_prio = -1; static int wakeup_rt; -static raw_spinlock_t wakeup_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t wakeup_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static void __wakeup_reset(struct trace_array *tr); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8504ac71e4e8..9a82d568fdec 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = { }; static unsigned long max_stack_size; -static raw_spinlock_t max_stack_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t max_stack_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9c4b0256490b..2acd501b3826 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; -- cgit v1.2.3 From edc35bd72e2079b25f99c5da7d7a65dbbffc4a26 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 12:38:57 +0100 Subject: locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Further name space cleanup. No functional change Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock_types.h | 2 +- arch/arm/include/asm/spinlock_types.h | 2 +- arch/blackfin/include/asm/spinlock_types.h | 2 +- arch/ia64/include/asm/spinlock_types.h | 2 +- arch/m32r/include/asm/spinlock_types.h | 2 +- arch/mips/include/asm/spinlock_types.h | 2 +- arch/parisc/include/asm/spinlock_types.h | 6 +++--- arch/parisc/lib/bitops.c | 2 +- arch/powerpc/include/asm/spinlock_types.h | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/s390/include/asm/spinlock_types.h | 2 +- arch/sh/include/asm/spinlock_types.h | 2 +- arch/sparc/include/asm/spinlock_types.h | 2 +- arch/x86/include/asm/spinlock_types.h | 2 +- arch/x86/kernel/dumpstack.c | 2 +- arch/x86/kernel/tsc_sync.c | 2 +- include/linux/spinlock_types.h | 4 ++-- include/linux/spinlock_types_up.h | 4 ++-- kernel/lockdep.c | 2 +- kernel/trace/ring_buffer.c | 2 +- kernel/trace/trace.c | 10 +++++----- kernel/trace/trace_clock.c | 2 +- kernel/trace/trace_sched_wakeup.c | 2 +- kernel/trace/trace_stack.c | 2 +- lib/spinlock_debug.c | 2 +- 25 files changed, 33 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index bb94a51e53d2..08975ee0a100 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 5e9d3eadd167..9622e126a8de 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index 03b377abf5c0..c8a3928a58c5 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -17,7 +17,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 447ccc6ca7a8..6a11b65fa66d 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int read_counter : 31; diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 17d15bd6322d..5873a8701107 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile int lock; diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index 2e1060892d3b..b4c5efaadb9c 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -14,7 +14,7 @@ typedef struct { unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 735caafb81f5..396d2746ca57 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -4,10 +4,10 @@ typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; -# define __RAW_SPIN_LOCK_UNLOCKED { 1 } +# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else volatile unsigned int lock[4]; -# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } #endif } arch_spinlock_t; @@ -16,6 +16,6 @@ typedef struct { volatile int counter; } raw_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } +#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #endif diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index fdd7f583de54..353963d42059 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -13,7 +13,7 @@ #ifdef CONFIG_SMP arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED + [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 4312e5baaf88..f5f39d82711f 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile signed int lock; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 579069c12152..57dfa414cfb8 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -42,7 +42,7 @@ #include struct rtas_t rtas = { - .lock = __RAW_SPIN_LOCK_UNLOCKED + .lock = __ARCH_SPIN_LOCK_UNLOCKED }; EXPORT_SYMBOL(rtas); diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index a93638eee3f7..e25c0370f6cd 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int owner_cpu; } __attribute__ ((aligned (4))) arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index 37712c32ba99..a3be2db960ed 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 41d9a8fec13d..c145e63a5d66 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned char lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 2ae7637ed524..696f8364a4f3 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct arch_spinlock { unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { unsigned int lock; diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0862d9d89c92..5b75afac8a38 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9f908b9d1abe..f1714697a09a 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index d4af2d7a86ea..7dadce303ebf 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -43,14 +43,14 @@ typedef struct { #ifdef CONFIG_DEBUG_SPINLOCK # define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ SPIN_DEP_MAP_INIT(lockname) } #else # define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ SPIN_DEP_MAP_INIT(lockname) } #endif diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 34d36691c4ec..10db021f4875 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -18,13 +18,13 @@ typedef struct { volatile unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else typedef struct { } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { } +#define __ARCH_SPIN_LOCK_UNLOCKED { } #endif diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7cc50c62af59..2389e3f85cf6 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5ac8ee0a9e35..fb7a0fa508b9 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->buffer = buffer; spin_lock_init(&cpu_buffer->reader_lock); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); - cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7d56cecc2c6e..63bc1cc38219 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) * CONFIG_TRACER_MAX_TRACE. */ static arch_spinlock_t ftrace_max_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE unsigned long __read_mostly tracing_max_latency; @@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static int cmdline_idx; -static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; /* temporary disable recording */ static atomic_t trace_record_cmdline_disabled __read_mostly; @@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { static arch_spinlock_t trace_buf_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; @@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_print; @@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s) static void __ftrace_dump(bool disable_tracing) { static arch_spinlock_t ftrace_dump_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 206ec3d4b3c2..433e2eda2d01 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -74,7 +74,7 @@ static struct { arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { - .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, + .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4cf7e83ec235..e347853564e9 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1; static int wakeup_rt; static arch_spinlock_t wakeup_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static void __wakeup_reset(struct trace_array *tr); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 9a82d568fdec..728c35221483 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = { static unsigned long max_stack_size; static arch_spinlock_t max_stack_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 2acd501b3826..f73004137141 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; -- cgit v1.2.3 From 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 20:01:25 +0100 Subject: locking: Convert __raw_spin* functions to arch_spin* Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 18 ++++++------ arch/arm/include/asm/spinlock.h | 20 ++++++------- arch/blackfin/include/asm/spinlock.h | 20 ++++++------- arch/cris/include/arch-v32/arch/spinlock.h | 46 +++++++++++++++--------------- arch/ia64/include/asm/bitops.h | 2 +- arch/ia64/include/asm/spinlock.h | 26 ++++++++--------- arch/m32r/include/asm/spinlock.h | 28 +++++++++--------- arch/mips/include/asm/spinlock.h | 36 +++++++++++------------ arch/parisc/include/asm/atomic.h | 4 +-- arch/parisc/include/asm/spinlock.h | 44 ++++++++++++++-------------- arch/powerpc/include/asm/spinlock.h | 32 ++++++++++----------- arch/powerpc/kernel/rtas.c | 12 ++++---- arch/powerpc/lib/locks.c | 4 +-- arch/powerpc/platforms/pasemi/setup.c | 8 +++--- arch/s390/include/asm/spinlock.h | 34 +++++++++++----------- arch/s390/lib/spinlock.c | 22 +++++++------- arch/sh/include/asm/spinlock.h | 26 ++++++++--------- arch/sparc/include/asm/spinlock_32.h | 20 ++++++------- arch/sparc/include/asm/spinlock_64.h | 18 ++++++------ arch/x86/include/asm/paravirt.h | 14 ++++----- arch/x86/include/asm/spinlock.h | 26 ++++++++--------- arch/x86/kernel/dumpstack.c | 6 ++-- arch/x86/kernel/paravirt-spinlocks.c | 2 +- arch/x86/kernel/tsc_sync.c | 8 +++--- include/asm-generic/bitops/atomic.h | 4 +-- include/linux/spinlock.h | 22 +++++++------- include/linux/spinlock_up.h | 26 ++++++++--------- kernel/lockdep.c | 18 ++++++------ kernel/mutex-debug.h | 4 +-- kernel/spinlock.c | 4 +-- kernel/trace/ring_buffer.c | 12 ++++---- kernel/trace/trace.c | 32 ++++++++++----------- kernel/trace/trace_clock.c | 4 +-- kernel/trace/trace_sched_wakeup.c | 12 ++++---- kernel/trace/trace_selftest.c | 4 +-- kernel/trace/trace_stack.c | 12 ++++---- lib/spinlock_debug.c | 8 +++--- 37 files changed, 319 insertions(+), 319 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index bdb26a1940b4..4dac79f504c3 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -12,18 +12,18 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(x) \ +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) -static inline void __raw_spin_unlock(arch_spinlock_t * lock) +static inline void arch_spin_unlock(arch_spinlock_t * lock) { mb(); lock->lock = 0; } -static inline void __raw_spin_lock(arch_spinlock_t * lock) +static inline void arch_spin_lock(arch_spinlock_t * lock) { long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t * lock) : "m"(lock->lock) : "memory"); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return !test_and_set_bit(0, &lock->lock); } @@ -169,8 +169,8 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ALPHA_SPINLOCK_H */ diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 4e7712ee9394..de62eb098f68 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -17,13 +17,13 @@ * Locked value: 1 */ -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) } } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); @@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index fc16b4c5309b..62d49540e02b 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr); asmlinkage int __raw_write_trylock_asm(volatile int *ptr); asmlinkage void __raw_write_unlock_asm(volatile int *ptr); -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __raw_spin_is_locked_asm(&lock->lock); } -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __raw_spin_lock_asm(&lock->lock); } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return __raw_spin_trylock_asm(&lock->lock); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __raw_spin_unlock_asm(&lock->lock); } -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } @@ -92,9 +92,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) __raw_write_unlock_asm(&rw->lock); } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index e253457765f2..a2e8a394d555 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); extern void cris_spin_lock(void *l); extern int cris_spin_trylock(void *l); -static inline int __raw_spin_is_locked(arch_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { return *(volatile signed char *)(&(x)->slock) <= 0; } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ volatile ("move.d %1,%0" \ : "=m" (lock->slock) \ @@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return cris_spin_trylock((void *)&lock->slock); } -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { cris_spin_lock((void *)&lock->slock); } static inline void -__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } /* @@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(raw_rwlock_t *x) static inline void __raw_read_lock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock == 0); rw->lock--; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline void __raw_write_lock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = 0; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline void __raw_read_unlock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); rw->lock++; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = RW_LOCK_BIAS; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline int __raw_read_trylock(raw_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock != 0) { rw->lock--; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return ret; } static inline int __raw_write_trylock(raw_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock == RW_LOCK_BIAS) { rw->lock = 0; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return 1; } #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_ARCH_SPINLOCK_H */ diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9fb..6ebc229a1c51 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) * @addr: Address to start counting from * * Similarly to clear_bit_unlock, the implementation uses a store - * with release semantics. See also __raw_spin_unlock(). + * with release semantics. See also arch_spin_unlock(). */ static __inline__ void __clear_bit_unlock(int nr, void *addr) diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 9fbdf7e61087..b06165f6352f 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -17,7 +17,7 @@ #include #include -#define __raw_spin_lock_init(x) ((x)->lock = 0) +#define arch_spin_lock_init(x) ((x)->lock = 0) /* * Ticket locks are conceptually two parts, one indicating the current head of @@ -103,39 +103,39 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } @@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 0c0164225bc0..8acac950a43c 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -24,19 +24,19 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) /** - * __raw_spin_trylock - Try spin lock and return a result + * arch_spin_trylock - Try spin lock and return a result * @lock: Pointer to the lock variable * - * __raw_spin_trylock() tries to get the lock and returns a result. + * arch_spin_trylock() tries to get the lock and returns a result. * On the m32r, the result value is 1 (= Success) or 0 (= Failure). */ -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { int oldval; unsigned long tmp1, tmp2; @@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_trylock \n\t" + "# arch_spin_trylock \n\t" "ldi %1, #0; \n\t" "mvfc %2, psw; \n\t" "clrpsw #0x40 -> nop; \n\t" @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return (oldval > 0); } -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp0, tmp1; @@ -84,7 +84,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_lock \n\t" + "# arch_spin_lock \n\t" ".fillinsn \n" "1: \n\t" "mvfc %1, psw; \n\t" @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { mb(); lock->slock = 1; @@ -319,8 +319,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_M32R_SPINLOCK_H */ diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 0f16d0673b4a..95edebaaf22a 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -34,33 +34,33 @@ * becomes equal to the the initial value of the tail. */ -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return ((counters >> 14) ^ counters) & 0x1fff; } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - while (__raw_spin_is_locked(x)) { cpu_relax(); } +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + while (arch_spin_is_locked(x)) { cpu_relax(); } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return (((counters >> 14) - counters) & 0x1fff) > 1; } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { int my_ticket; int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -94,7 +94,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) [my_ticket] "=&r" (my_ticket)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) smp_llsc_mb(); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { int tmp; @@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " # __raw_spin_unlock \n" + " # arch_spin_unlock \n" "1: ll %[ticket], %[ticket_ptr] \n" " addiu %[ticket], %[ticket], 1 \n" " ori %[ticket], %[ticket], 0x2000 \n" @@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) [ticket] "=&r" (tmp)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_unlock \n" + " .set push # arch_spin_unlock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) } } -static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) +static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) { int tmp, tmp2, tmp3; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) [now_serving] "=&r" (tmp3)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -483,8 +483,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 3a4ea778d4b6..716634d1f546 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -34,12 +34,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; #define _atomic_spin_lock_irqsave(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - __raw_spin_lock(s); \ + arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ - __raw_spin_unlock(s); \ + arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 69e8dca26744..235e7e386e2a 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -5,17 +5,17 @@ #include #include -static inline int __raw_spin_is_locked(arch_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); return *a == 0; } -#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) -static inline void __raw_spin_lock_flags(arch_spinlock_t *x, +static inline void arch_spin_lock_flags(arch_spinlock_t *x, unsigned long flags) { volatile unsigned int *a; @@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *x, mb(); } -static inline void __raw_spin_unlock(arch_spinlock_t *x) +static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; mb(); @@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *x) mb(); } -static inline int __raw_spin_trylock(arch_spinlock_t *x) +static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; int ret; @@ -73,9 +73,9 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } @@ -85,9 +85,9 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter--; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } @@ -98,9 +98,9 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) unsigned long flags; retry: local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); return 1; } @@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) return 0; /* Wait until we have a realistic chance at the lock */ - while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) + while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) cpu_relax(); goto retry; @@ -124,10 +124,10 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw) unsigned long flags; retry: local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); if (rw->counter != 0) { - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); while (rw->counter != 0) @@ -144,7 +144,7 @@ retry: static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) { rw->counter = 0; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } /* Note that we have to ensure interrupts are disabled in case we're @@ -155,13 +155,13 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) int result = 0; local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { if (rw->counter == 0) { rw->counter = -1; result = 1; } else { /* Read-locked. Oh well. */ - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } } local_irq_restore(flags); @@ -190,8 +190,8 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c0d44c92ff0e..cdcaf6b97087 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,7 +28,7 @@ #include #include -#define __raw_spin_is_locked(x) ((x)->slock != 0) +#define arch_spin_is_locked(x) ((x)->slock != 0) #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ @@ -54,7 +54,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) +static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, token; @@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) return tmp; } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; - return arch_spin_trylock(lock) == 0; + return __arch_spin_trylock(lock) == 0; } /* @@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock); #define SHARED_PROCESSOR 0 #endif -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; do { HMT_low(); @@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) } static inline -void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; local_save_flags(flags_dis); local_irq_restore(flags); @@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) } } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { SYNC_IO; - __asm__ __volatile__("# __raw_spin_unlock\n\t" + __asm__ __volatile__("# arch_spin_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; } #ifdef CONFIG_PPC64 -extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); +extern void arch_spin_unlock_wait(arch_spinlock_t *lock); #else -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) #endif /* @@ -290,9 +290,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) __spin_yield(lock) -#define _raw_read_relax(lock) __rw_yield(lock) -#define _raw_write_relax(lock) __rw_yield(lock) +#define arch_spin_relax(lock) __spin_yield(lock) +#define arch_read_relax(lock) __rw_yield(lock) +#define arch_write_relax(lock) __rw_yield(lock) #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 57dfa414cfb8..fd0d29493fd6 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -80,13 +80,13 @@ static unsigned long lock_rtas(void) local_irq_save(flags); preempt_disable(); - __raw_spin_lock_flags(&rtas.lock, flags); + arch_spin_lock_flags(&rtas.lock, flags); return flags; } static void unlock_rtas(unsigned long flags) { - __raw_spin_unlock(&rtas.lock); + arch_spin_unlock(&rtas.lock); local_irq_restore(flags); preempt_enable(); } @@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void) { while (!timebase) barrier(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index b06294cde499..ee395e392115 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) } #endif -void __raw_spin_unlock_wait(arch_spinlock_t *lock) +void arch_spin_unlock_wait(arch_spinlock_t *lock) { while (lock->slock) { HMT_low(); @@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(arch_spinlock_t *lock) HMT_medium(); } -EXPORT_SYMBOL(__raw_spin_unlock_wait); +EXPORT_SYMBOL(arch_spin_unlock_wait); diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index be36fece41d7..242f8095c2df 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); mtspr(SPRN_TBCTL, TBCTL_FREEZE); isync(); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void) while (!timebase) smp_rmb(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } struct smp_ops_t pas_smp_ops = { diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 6121fa4b83d9..a94c146657a9 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) \ - _raw_spin_relax(lock); } while (0) +#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) \ + arch_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(arch_spinlock_t *); -extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(arch_spinlock_t *); -extern void _raw_spin_relax(arch_spinlock_t *lock); +extern void arch_spin_lock_wait(arch_spinlock_t *); +extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int arch_spin_trylock_retry(arch_spinlock_t *); +extern void arch_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(arch_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait(lp); + arch_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait_flags(lp, flags); + arch_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(arch_spinlock_t *lp) +static inline int arch_spin_trylock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return 1; - return _raw_spin_trylock_retry(lp); + return arch_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(arch_spinlock_t *lp) +static inline void arch_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } @@ -188,7 +188,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return _raw_write_trylock_retry(rw); } -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index d4cbf71a6077..f4596452f072 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(arch_spinlock_t *lp) +void arch_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t *lp) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; } } -EXPORT_SYMBOL(_raw_spin_lock_wait); +EXPORT_SYMBOL(arch_spin_lock_wait); -void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) @@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); } } -EXPORT_SYMBOL(_raw_spin_lock_wait_flags); +EXPORT_SYMBOL(arch_spin_lock_wait_flags); -int _raw_spin_trylock_retry(arch_spinlock_t *lp) +int arch_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; for (count = spin_retry; count > 0; count--) { - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return 1; } return 0; } -EXPORT_SYMBOL(_raw_spin_trylock_retry); +EXPORT_SYMBOL(arch_spin_trylock_retry); -void _raw_spin_relax(arch_spinlock_t *lock) +void arch_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) _raw_yield_cpu(~cpu); } -EXPORT_SYMBOL(_raw_spin_relax); +EXPORT_SYMBOL(arch_spin_relax); void _raw_read_lock_wait(raw_rwlock_t *rw) { diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index 5a05b3fcefbe..da1c6491ed4b 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -23,10 +23,10 @@ * Your basic SMP spinlocks, allowing only a single CPU anywhere */ -#define __raw_spin_is_locked(x) ((x)->lock <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -34,14 +34,14 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_lock \n\t" + "movli.l @%2, %0 ! arch_spin_lock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -54,12 +54,12 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__ ( - "mov #1, %0 ! __raw_spin_unlock \n\t" + "mov #1, %0 ! arch_spin_unlock \n\t" "mov.l %0, @%1 \n\t" : "=&z" (tmp) : "r" (&lock->lock) @@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" + "movli.l @%2, %0 ! arch_spin_trylock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -219,8 +219,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SH_SPINLOCK_H */ diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index b2d8a67f727e..9b0f2f53c81c 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -10,12 +10,12 @@ #include -#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } @@ -176,13 +176,13 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) #define __raw_write_can_lock(rw) (!(rw)->lock) diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 38e16c40efc4..7cf58a2fcda4 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -21,13 +21,13 @@ * the spinner sections must be pre-V9 branches. */ -#define __raw_spin_is_locked(lp) ((lp)->lock != 0) +#define arch_spin_is_locked(lp) ((lp)->lock != 0) -#define __raw_spin_unlock_wait(lp) \ +#define arch_spin_unlock_wait(lp) \ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -222,9 +222,9 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) #define __raw_write_can_lock(rw) (!(rw)->lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5655f75f10b7..dd59a85a918f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int __raw_spin_is_locked(struct arch_spinlock *lock) +static inline int arch_spin_is_locked(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); } -static inline int __raw_spin_is_contended(struct arch_spinlock *lock) +static inline int arch_spin_is_contended(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) +static __always_inline void arch_spin_lock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_lock, lock); } -static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, +static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); } -static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) +static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); } -static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) +static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 204b524fcf57..ab9055fd57d9 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) #ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } @@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() /* The {read|write|spin}_lock() on x86 are full memory barriers. */ static inline void smp_mb__after_lock(void) { } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 5b75afac8a38..0a0aa1cec8f1 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void) /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); - if (!__raw_spin_trylock(&die_lock)) { + if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else - __raw_spin_lock(&die_lock); + arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; @@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ - __raw_spin_unlock(&die_lock); + arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index a0f39e090684..676b8c77a976 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -10,7 +10,7 @@ static inline void default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } struct pv_lock_ops pv_lock_ops = { diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index f1714697a09a..0aa5fed8b9e6 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void) * previous TSC that was measured (possibly on * another CPU) and update the previous TSC timestamp. */ - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); prev = last_tsc; rdtsc_barrier(); now = get_cycles(); rdtsc_barrier(); last_tsc = now; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); /* * Be nice every now and then (and also check whether @@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void) * we saw a time-warp of the TSC going backwards: */ if (unlikely(prev > now)) { - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); max_warp = max(max_warp, prev - now); nr_warps++; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); } } WARN(!(now-start), diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index dcf0afad4a7f..ecc44a8e2b44 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -22,12 +22,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; #define _atomic_spin_lock_irqsave(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - __raw_spin_lock(s); \ + arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ - __raw_spin_unlock(s); \ + arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 5ef7a4c060b5..de3a022489c6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -14,7 +14,7 @@ * linux/spinlock_types.h: * defines the generic type and initializers * - * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel + * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) @@ -34,7 +34,7 @@ * defines the generic type and initializers * * linux/spinlock_up.h: - * contains the __raw_spin_*()/etc. version of UP + * contains the arch_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * @@ -103,17 +103,17 @@ do { \ do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) #endif -#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) +#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #ifdef CONFIG_GENERIC_LOCKBREAK #define spin_is_contended(lock) ((lock)->break_lock) #else -#ifdef __raw_spin_is_contended -#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) +#ifdef arch_spin_is_contended +#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else #define spin_is_contended(lock) (((void)(lock), 0)) -#endif /*__raw_spin_is_contended*/ +#endif /*arch_spin_is_contended*/ #endif /* The lock does not imply full memory barrier. */ @@ -125,7 +125,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } * spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. */ -#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) +#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) #ifdef CONFIG_DEBUG_SPINLOCK extern void _raw_spin_lock(spinlock_t *lock); @@ -133,11 +133,11 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); #else -# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) +# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ - __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) -# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) + arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) +# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) #endif /* diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 8ee2ac1bf636..1d3bcc3cf7c6 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -18,21 +18,21 @@ */ #ifdef CONFIG_DEBUG_SPINLOCK -#define __raw_spin_is_locked(x) ((x)->slock == 0) +#define arch_spin_is_locked(x) ((x)->slock == 0) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; } static inline void -__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { char oldval = lock->slock; @@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return oldval > 0; } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { lock->slock = 1; } @@ -57,20 +57,20 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) #define __raw_write_unlock(lock) do { (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ -#define __raw_spin_is_locked(lock) ((void)(lock), 0) +#define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define __raw_spin_lock(lock) do { (void)(lock); } while (0) -# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) -# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) -# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) +# define arch_spin_lock(lock) do { (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ -#define __raw_spin_is_contended(lock) (((void)(lock), 0)) +#define arch_spin_is_contended(lock) (((void)(lock), 0)) #define __raw_read_can_lock(lock) (((void)(lock), 1)) #define __raw_write_can_lock(lock) (((void)(lock), 1)) -#define __raw_spin_unlock_wait(lock) \ - do { cpu_relax(); } while (__raw_spin_is_locked(lock)) +#define arch_spin_unlock_wait(lock) \ + do { cpu_relax(); } while (arch_spin_is_locked(lock)) #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 2389e3f85cf6..5feaddcdbe49 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -77,7 +77,7 @@ static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED static int graph_lock(void) { - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); /* * Make sure that if another CPU detected a bug while * walking the graph we dont change it (while the other @@ -85,7 +85,7 @@ static int graph_lock(void) * dropped already) */ if (!debug_locks) { - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return 0; } /* prevent any recursions within lockdep from causing deadlocks */ @@ -95,11 +95,11 @@ static int graph_lock(void) static inline int graph_unlock(void) { - if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) + if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) return DEBUG_LOCKS_WARN_ON(1); current->lockdep_recursion--; - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return 0; } @@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void) { int ret = debug_locks_off(); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return ret; } @@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.class = class; local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; @@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.class = class; local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index 6b2d735846a5..7bebbd15b342 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h @@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock) \ DEBUG_LOCKS_WARN_ON(in_interrupt()); \ local_irq_save(flags); \ - __raw_spin_lock(&(lock)->raw_lock); \ + arch_spin_lock(&(lock)->raw_lock); \ DEBUG_LOCKS_WARN_ON(l->magic != l); \ } while (0) #define spin_unlock_mutex(lock, flags) \ do { \ - __raw_spin_unlock(&(lock)->raw_lock); \ + arch_spin_unlock(&(lock)->raw_lock); \ local_irq_restore(flags); \ preempt_check_resched(); \ } while (0) diff --git a/kernel/spinlock.c b/kernel/spinlock.c index e6e136318437..fbb5f8b78357 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -53,7 +53,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ } \ @@ -73,7 +73,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ return flags; \ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fb7a0fa508b9..f58c9ad15830 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) int ret; local_irq_save(flags); - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); again: /* @@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) goto again; out: - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return reader; @@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) synchronize_sched(); spin_lock_irqsave(&cpu_buffer->reader_lock, flags); - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); rb_iter_reset(iter); - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return iter; @@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) goto out; - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); rb_reset_cpu(cpu_buffer); - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); out: spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 63bc1cc38219..bb6b5e7fa2a2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); tr->buffer = max_tr.buffer; max_tr.buffer = buf; __update_max_tr(tr, tsk, cpu); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); } /** @@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); ftrace_disable_cpu(); @@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ @@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk) * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ - if (!__raw_spin_trylock(&trace_cmdline_lock)) + if (!arch_spin_trylock(&trace_cmdline_lock)) return; idx = map_pid_to_cmdline[tsk->pid]; @@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk) memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); - __raw_spin_unlock(&trace_cmdline_lock); + arch_spin_unlock(&trace_cmdline_lock); } void trace_find_cmdline(int pid, char comm[]) @@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[]) } preempt_disable(); - __raw_spin_lock(&trace_cmdline_lock); + arch_spin_lock(&trace_cmdline_lock); map = map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strcpy(comm, saved_cmdlines[map]); else strcpy(comm, "<...>"); - __raw_spin_unlock(&trace_cmdline_lock); + arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } @@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) /* Lockdep uses trace_printk for lock tracing */ local_irq_save(flags); - __raw_spin_lock(&trace_buf_lock); + arch_spin_lock(&trace_buf_lock); len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); if (len > TRACE_BUF_SIZE || len < 0) @@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ring_buffer_unlock_commit(buffer, event); out_unlock: - __raw_spin_unlock(&trace_buf_lock); + arch_spin_unlock(&trace_buf_lock); local_irq_restore(flags); out: @@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr, pause_graph_tracing(); raw_local_irq_save(irq_flags); - __raw_spin_lock(&trace_buf_lock); + arch_spin_lock(&trace_buf_lock); len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); size = sizeof(*entry) + len + 1; @@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr, ring_buffer_unlock_commit(buffer, event); out_unlock: - __raw_spin_unlock(&trace_buf_lock); + arch_spin_unlock(&trace_buf_lock); raw_local_irq_restore(irq_flags); unpause_graph_tracing(); out: @@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, mutex_lock(&tracing_cpumask_update_lock); local_irq_disable(); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are @@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, atomic_dec(&global_trace.data[cpu]->disabled); } } - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); local_irq_enable(); cpumask_copy(tracing_cpumask, tracing_cpumask_new); @@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing) /* only one dump */ local_irq_save(flags); - __raw_spin_lock(&ftrace_dump_lock); + arch_spin_lock(&ftrace_dump_lock); if (dump_ran) goto out; @@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing) } out: - __raw_spin_unlock(&ftrace_dump_lock); + arch_spin_unlock(&ftrace_dump_lock); local_irq_restore(flags); } diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 433e2eda2d01..84a3a7ba072a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void) if (unlikely(in_nmi())) goto out; - __raw_spin_lock(&trace_clock_struct.lock); + arch_spin_lock(&trace_clock_struct.lock); /* * TODO: if this happens often then maybe we should reset @@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void) trace_clock_struct.prev_time = now; - __raw_spin_unlock(&trace_clock_struct.lock); + arch_spin_unlock(&trace_clock_struct.lock); out: raw_local_irq_restore(flags); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index e347853564e9..0271742abb8d 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, goto out; local_irq_save(flags); - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) @@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, out_unlock: __wakeup_reset(wakeup_trace); - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); @@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr) tracing_reset_online_cpus(tr); local_irq_save(flags); - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); __wakeup_reset(tr); - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); } @@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) goto out; /* interrupts should be off from try_to_wake_up */ - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) @@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); } diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index dc98309e839a..280fea470d67 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) /* Don't allow flipping of max traces now */ local_irq_save(flags); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(tr->buffer); @@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) break; } tracing_on(); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); if (count) diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 728c35221483..678a5120ee30 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -54,7 +54,7 @@ static inline void check_stack(void) return; local_irq_save(flags); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); /* a race could have already updated it */ if (this_size <= max_stack_size) @@ -103,7 +103,7 @@ static inline void check_stack(void) } out: - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); } @@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, return ret; local_irq_save(flags); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); *ptr = val; - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); return count; @@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos) { local_irq_disable(); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); if (*pos == 0) return SEQ_START_TOKEN; @@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) static void t_stop(struct seq_file *m, void *p) { - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_enable(); } diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index f73004137141..1304fe094546 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_spin_trylock(&lock->raw_lock)) + if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } @@ -128,14 +128,14 @@ static void __spin_lock_debug(spinlock_t *lock) void _raw_spin_lock(spinlock_t *lock) { debug_spin_lock_before(lock); - if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) + if (unlikely(!arch_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); debug_spin_lock_after(lock); } int _raw_spin_trylock(spinlock_t *lock) { - int ret = __raw_spin_trylock(&lock->raw_lock); + int ret = arch_spin_trylock(&lock->raw_lock); if (ret) debug_spin_lock_after(lock); @@ -151,7 +151,7 @@ int _raw_spin_trylock(spinlock_t *lock) void _raw_spin_unlock(spinlock_t *lock) { debug_spin_unlock(lock); - __raw_spin_unlock(&lock->raw_lock); + arch_spin_unlock(&lock->raw_lock); } static void rwlock_bug(rwlock_t *lock, const char *msg) -- cgit v1.2.3 From fb3a6bbc912b12347614e5742c7c61416cdb0ca0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:01:19 +0100 Subject: locking: Convert raw_rwlock to arch_rwlock Not strictly necessary for -rt as -rt does not have non sleeping rwlocks, but it's odd to not have a consistent naming convention. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 16 ++++++++-------- arch/alpha/include/asm/spinlock_types.h | 4 ++-- arch/arm/include/asm/spinlock.h | 12 ++++++------ arch/arm/include/asm/spinlock_types.h | 4 ++-- arch/blackfin/include/asm/spinlock.h | 16 ++++++++-------- arch/blackfin/include/asm/spinlock_types.h | 4 ++-- arch/cris/include/arch-v32/arch/spinlock.h | 16 ++++++++-------- arch/ia64/include/asm/spinlock.h | 16 ++++++++-------- arch/ia64/include/asm/spinlock_types.h | 4 ++-- arch/m32r/include/asm/spinlock.h | 12 ++++++------ arch/m32r/include/asm/spinlock_types.h | 4 ++-- arch/mips/include/asm/spinlock.h | 12 ++++++------ arch/mips/include/asm/spinlock_types.h | 4 ++-- arch/parisc/include/asm/spinlock.h | 16 ++++++++-------- arch/parisc/include/asm/spinlock_types.h | 4 ++-- arch/powerpc/include/asm/spinlock.h | 18 +++++++++--------- arch/powerpc/include/asm/spinlock_types.h | 4 ++-- arch/powerpc/lib/locks.c | 2 +- arch/s390/include/asm/spinlock.h | 30 +++++++++++++++--------------- arch/s390/include/asm/spinlock_types.h | 4 ++-- arch/s390/lib/spinlock.c | 12 ++++++------ arch/sh/include/asm/spinlock.h | 12 ++++++------ arch/sh/include/asm/spinlock_types.h | 4 ++-- arch/sparc/include/asm/spinlock_32.h | 20 ++++++++++---------- arch/sparc/include/asm/spinlock_64.h | 12 ++++++------ arch/sparc/include/asm/spinlock_types.h | 4 ++-- arch/x86/include/asm/spinlock.h | 16 ++++++++-------- arch/x86/include/asm/spinlock_types.h | 4 ++-- include/linux/rwlock_types.h | 6 +++--- include/linux/spinlock.h | 4 ++-- include/linux/spinlock_types_up.h | 4 ++-- lib/spinlock_debug.c | 2 +- 32 files changed, 151 insertions(+), 151 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index 4dac79f504c3..e8b2970f037b 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) /***********************************************************/ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int __raw_read_can_lock(arch_rwlock_t *lock) { return (lock->lock & 1) == 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int __raw_write_can_lock(arch_rwlock_t *lock) { return lock->lock == 0; } -static inline void __raw_read_lock(raw_rwlock_t *lock) +static inline void __raw_read_lock(arch_rwlock_t *lock) { long regx; @@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *lock) +static inline void __raw_write_lock(arch_rwlock_t *lock) { long regx; @@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t * lock) +static inline int __raw_read_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock) return success; } -static inline int __raw_write_trylock(raw_rwlock_t * lock) +static inline int __raw_write_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock) return success; } -static inline void __raw_read_unlock(raw_rwlock_t * lock) +static inline void __raw_read_unlock(arch_rwlock_t * lock) { long regx; __asm__ __volatile__( @@ -160,7 +160,7 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t * lock) +static inline void __raw_write_unlock(arch_rwlock_t * lock) { mb(); lock->lock = 0; diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 08975ee0a100..54c2afce0a1d 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index de62eb098f68..a8671d8bc7d4 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) } } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cc"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 9622e126a8de..d14d197ae04a 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index 62d49540e02b..7e1c56b0a571 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -52,42 +52,42 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) cpu_relax(); } -static inline int __raw_read_can_lock(raw_rwlock_t *rw) +static inline int __raw_read_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *rw) +static inline int __raw_write_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { __raw_read_lock_asm(&rw->lock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { return __raw_read_trylock_asm(&rw->lock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { __raw_read_unlock_asm(&rw->lock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { __raw_write_lock_asm(&rw->lock); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { return __raw_write_trylock_asm(&rw->lock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { __raw_write_unlock_asm(&rw->lock); } diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index c8a3928a58c5..1a33608c958b 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -21,8 +21,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index a2e8a394d555..1d7d3a8046cb 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) * */ -static inline int __raw_read_can_lock(raw_rwlock_t *x) +static inline int __raw_read_can_lock(arch_rwlock_t *x) { return (int)(x)->lock > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *x) +static inline int __raw_write_can_lock(arch_rwlock_t *x) { return (x)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock == 0); @@ -74,7 +74,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -82,14 +82,14 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); rw->lock++; arch_spin_unlock(&rw->slock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -97,7 +97,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); @@ -109,7 +109,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index b06165f6352f..6715b6a8ebc3 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -146,7 +146,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) +__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -177,7 +177,7 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) #define __raw_read_lock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -190,14 +190,14 @@ do { \ #define __raw_read_unlock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) +__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(raw_rwlock_t *x) +static inline int __raw_read_trylock(arch_rwlock_t *x) { union { - raw_rwlock_t lock; + arch_rwlock_t lock; __u32 word; } old, new; old.lock = new.lock = *x; diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 6a11b65fa66d..e2b42a52a6d3 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -14,8 +14,8 @@ typedef struct { typedef struct { volatile unsigned int read_counter : 31; volatile unsigned int write_lock : 1; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } #endif diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 8acac950a43c..1c76af8c8e1b 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -148,7 +148,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int __raw_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t*)lock; if (atomic_dec_return(count) >= 0) @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int __raw_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 5873a8701107..92e27672661f 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -13,11 +13,11 @@ typedef struct { typedef struct { volatile int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 95edebaaf22a..7bf27c8a3364 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -256,7 +256,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) */ #define __raw_write_can_lock(rw) (!(rw)->lock) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned int tmp; @@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -335,7 +335,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned int tmp; @@ -377,7 +377,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_llsc_mb(); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -389,7 +389,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; @@ -433,7 +433,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index b4c5efaadb9c..ee197c2f9c98 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -18,8 +18,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 235e7e386e2a..1ff3a0a94a43 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_lock(raw_rwlock_t *rw) +static __inline__ void __raw_read_lock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) +static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -93,7 +93,7 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) +static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void __raw_write_lock(raw_rwlock_t *rw) +static __inline__ void __raw_write_lock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -141,7 +141,7 @@ retry: local_irq_restore(flags); } -static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) +static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) { rw->counter = 0; arch_spin_unlock(&rw->lock); @@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) +static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) { unsigned long flags; int result = 0; @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) +static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) { return rw->counter >= 0; } @@ -182,7 +182,7 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) +static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw) { return !rw->counter; } diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 396d2746ca57..8c373aa28a86 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -14,8 +14,8 @@ typedef struct { typedef struct { arch_spinlock_t lock; volatile int counter; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #endif diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index cdcaf6b97087..2fad2c07c593 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -97,7 +97,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_lppaca()->shared_proc) extern void __spin_yield(arch_spinlock_t *lock); -extern void __rw_yield(raw_rwlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() #define __rw_yield(x) barrier() @@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static inline long arch_read_trylock(raw_rwlock_t *rw) +static inline long arch_read_trylock(arch_rwlock_t *rw) { long tmp; @@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw) * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static inline long arch_write_trylock(raw_rwlock_t *rw) +static inline long arch_write_trylock(arch_rwlock_t *rw) { long tmp, token; @@ -225,7 +225,7 @@ static inline long arch_write_trylock(raw_rwlock_t *rw) return tmp; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { while (1) { if (likely(arch_read_trylock(rw) > 0)) @@ -239,7 +239,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { while (1) { if (likely(arch_write_trylock(rw) == 0)) @@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) } } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { return arch_read_trylock(rw) > 0; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { return arch_write_trylock(rw) == 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { long tmp; @@ -280,7 +280,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cr0", "xer", "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__("# write_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index f5f39d82711f..2351adc4fdc4 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile signed int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index ee395e392115..58e14fba11b1 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -55,7 +55,7 @@ void __spin_yield(arch_spinlock_t *lock) * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ -void __rw_yield(raw_rwlock_t *rw) +void __rw_yield(arch_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index a94c146657a9..7f98f0e48acb 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -121,14 +121,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) */ #define __raw_write_can_lock(x) ((x)->lock == 0) -extern void _raw_read_lock_wait(raw_rwlock_t *lp); -extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_read_trylock_retry(raw_rwlock_t *lp); -extern void _raw_write_lock_wait(raw_rwlock_t *lp); -extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_write_trylock_retry(raw_rwlock_t *lp); - -static inline void __raw_read_lock(raw_rwlock_t *rw) +extern void _raw_read_lock_wait(arch_rwlock_t *lp); +extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_read_trylock_retry(arch_rwlock_t *lp); +extern void _raw_write_lock_wait(arch_rwlock_t *lp); +extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_write_trylock_retry(arch_rwlock_t *lp); + +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,7 +181,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index e25c0370f6cd..9c76656a0af0 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f4596452f072..09fee9a1aa15 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -105,7 +105,7 @@ void arch_spin_relax(arch_spinlock_t *lock) } EXPORT_SYMBOL(arch_spin_relax); -void _raw_read_lock_wait(raw_rwlock_t *rw) +void _raw_read_lock_wait(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); -void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; int count = spin_retry; @@ -145,7 +145,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_read_lock_wait_flags); -int _raw_read_trylock_retry(raw_rwlock_t *rw) +int _raw_read_trylock_retry(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_trylock_retry); -void _raw_write_lock_wait(raw_rwlock_t *rw) +void _raw_write_lock_wait(arch_rwlock_t *rw) { int count = spin_retry; @@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); -void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { int count = spin_retry; @@ -197,7 +197,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_write_lock_wait_flags); -int _raw_write_trylock_retry(raw_rwlock_t *rw) +int _raw_write_trylock_retry(arch_rwlock_t *rw) { int count = spin_retry; diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index da1c6491ed4b..7f3626aac869 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -108,7 +108,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; @@ -142,7 +142,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -160,7 +160,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( "mov.l %1, @%0 ! __raw_write_unlock \n\t" @@ -170,7 +170,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; @@ -193,7 +193,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index a3be2db960ed..9b7560db06ca 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -13,9 +13,9 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 9b0f2f53c81c..06d37e588fde 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -65,7 +65,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ - * | 24-bit counter | wlock | raw_rwlock_t + * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * @@ -76,9 +76,9 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -96,9 +96,9 @@ do { unsigned long flags; \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -116,9 +116,9 @@ do { unsigned long flags; \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 7cf58a2fcda4..2b22d7f2c2fb 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -92,7 +92,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(raw_rwlock_t *lock) +static void inline arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_read_trylock(raw_rwlock_t *lock) +static int inline arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) return tmp1; } -static void inline arch_read_unlock(raw_rwlock_t *lock) +static void inline arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_lock(raw_rwlock_t *lock) +static void inline arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_unlock(raw_rwlock_t *lock) +static void inline arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_write_trylock(raw_rwlock_t *lock) +static int inline arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index c145e63a5d66..9c454fdeaad8 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index ab9055fd57d9..99cb86e843a0 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int __raw_read_can_lock(arch_rwlock_t *lock) { return (int)(lock)->lock > 0; } @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int __raw_write_can_lock(arch_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" "jz 1f\n" @@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int __raw_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int __raw_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -284,12 +284,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) return 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl %1, %0" : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 696f8364a4f3..dcb48b2edc11 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct arch_spinlock { typedef struct { unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_X86_SPINLOCK_TYPES_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index f8c935206a41..bd31808c7d8e 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -9,7 +9,7 @@ * Released under the General Public License (GPL). */ typedef struct { - raw_rwlock_t raw_lock; + arch_rwlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif @@ -32,14 +32,14 @@ typedef struct { #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ RW_DEP_MAP_INIT(lockname) } #else #define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ RW_DEP_MAP_INIT(lockname) } #endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index de3a022489c6..53bc2213b414 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -8,7 +8,7 @@ * * on SMP builds: * - * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the + * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * * linux/spinlock_types.h: @@ -75,7 +75,7 @@ #define __lockfunc __attribute__((section(".spinlock.text"))) /* - * Pull the arch_spinlock_t and raw_rwlock_t definitions: + * Pull the arch_spinlock_t and arch_rwlock_t definitions: */ #include diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 10db021f4875..c09b6407ae1b 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -30,8 +30,8 @@ typedef struct { } arch_spinlock_t; typedef struct { /* no debug version on UP */ -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { } +#define __ARCH_RW_LOCK_UNLOCKED { } #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 1304fe094546..3f72f10d9cb0 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; + lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; -- cgit v1.2.3 From e5931943d02bf751b1ec849c0d2ade23d76a8d41 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:08:46 +0100 Subject: locking: Convert raw_rwlock functions to arch_rwlock Name space cleanup for rwlock functions. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 20 +++++++------- arch/arm/include/asm/spinlock.h | 20 +++++++------- arch/blackfin/include/asm/spinlock.h | 40 ++++++++++++++-------------- arch/cris/include/arch-v32/arch/spinlock.h | 16 ++++++------ arch/ia64/include/asm/spinlock.h | 32 +++++++++++------------ arch/m32r/include/asm/spinlock.h | 20 +++++++------- arch/mips/include/asm/spinlock.h | 42 +++++++++++++++--------------- arch/parisc/include/asm/spinlock.h | 20 +++++++------- arch/powerpc/include/asm/spinlock.h | 32 +++++++++++------------ arch/s390/include/asm/spinlock.h | 20 +++++++------- arch/s390/lib/spinlock.c | 12 ++++----- arch/sh/include/asm/spinlock.h | 32 +++++++++++------------ arch/sparc/include/asm/spinlock_32.h | 32 +++++++++++------------ arch/sparc/include/asm/spinlock_64.h | 22 ++++++++-------- arch/x86/include/asm/spinlock.h | 20 +++++++------- include/linux/rwlock.h | 20 +++++++------- include/linux/spinlock_up.h | 16 ++++++------ lib/spinlock_debug.c | 16 ++++++------ 18 files changed, 216 insertions(+), 216 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e8b2970f037b..d0faca1e992d 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) /***********************************************************/ -static inline int __raw_read_can_lock(arch_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (lock->lock & 1) == 0; } -static inline int __raw_write_can_lock(arch_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return lock->lock == 0; } -static inline void __raw_read_lock(arch_rwlock_t *lock) +static inline void arch_read_lock(arch_rwlock_t *lock) { long regx; @@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_lock(arch_rwlock_t *lock) +static inline void arch_write_lock(arch_rwlock_t *lock) { long regx; @@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline int __raw_read_trylock(arch_rwlock_t * lock) +static inline int arch_read_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arch_rwlock_t * lock) return success; } -static inline int __raw_write_trylock(arch_rwlock_t * lock) +static inline int arch_write_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(arch_rwlock_t * lock) return success; } -static inline void __raw_read_unlock(arch_rwlock_t * lock) +static inline void arch_read_unlock(arch_rwlock_t * lock) { long regx; __asm__ __volatile__( @@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arch_rwlock_t * lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_unlock(arch_rwlock_t * lock) +static inline void arch_write_unlock(arch_rwlock_t * lock) { mb(); lock->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index a8671d8bc7d4..c91c64cab922 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) } } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -142,7 +142,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) } /* write_can_lock - would write_trylock() succeed? */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) /* * Read locks are a bit more hairy: @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) : "cc"); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; @@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) } /* read_can_lock - would read_trylock() succeed? */ -#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) +#define arch_read_can_lock(x) ((x)->lock < 0x80000000) -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index 7e1c56b0a571..1942ccfedbe0 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); asmlinkage void __raw_spin_lock_asm(volatile int *ptr); asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); -asmlinkage void __raw_read_lock_asm(volatile int *ptr); -asmlinkage int __raw_read_trylock_asm(volatile int *ptr); -asmlinkage void __raw_read_unlock_asm(volatile int *ptr); -asmlinkage void __raw_write_lock_asm(volatile int *ptr); -asmlinkage int __raw_write_trylock_asm(volatile int *ptr); -asmlinkage void __raw_write_unlock_asm(volatile int *ptr); +asmlinkage void arch_read_lock_asm(volatile int *ptr); +asmlinkage int arch_read_trylock_asm(volatile int *ptr); +asmlinkage void arch_read_unlock_asm(volatile int *ptr); +asmlinkage void arch_write_lock_asm(volatile int *ptr); +asmlinkage int arch_write_trylock_asm(volatile int *ptr); +asmlinkage void arch_write_unlock_asm(volatile int *ptr); static inline int arch_spin_is_locked(arch_spinlock_t *lock) { @@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) cpu_relax(); } -static inline int __raw_read_can_lock(arch_rwlock_t *rw) +static inline int arch_read_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) > 0; } -static inline int __raw_write_can_lock(arch_rwlock_t *rw) +static inline int arch_write_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - __raw_read_lock_asm(&rw->lock); + arch_read_lock_asm(&rw->lock); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return __raw_read_trylock_asm(&rw->lock); + return arch_read_trylock_asm(&rw->lock); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - __raw_read_unlock_asm(&rw->lock); + arch_read_unlock_asm(&rw->lock); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - __raw_write_lock_asm(&rw->lock); + arch_write_lock_asm(&rw->lock); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return __raw_write_trylock_asm(&rw->lock); + return arch_write_trylock_asm(&rw->lock); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { - __raw_write_unlock_asm(&rw->lock); + arch_write_unlock_asm(&rw->lock); } #define arch_spin_relax(lock) cpu_relax() diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 1d7d3a8046cb..f171a6600fbc 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) * */ -static inline int __raw_read_can_lock(arch_rwlock_t *x) +static inline int arch_read_can_lock(arch_rwlock_t *x) { return (int)(x)->lock > 0; } -static inline int __raw_write_can_lock(arch_rwlock_t *x) +static inline int arch_write_can_lock(arch_rwlock_t *x) { return (x)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock == 0); @@ -74,7 +74,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -82,14 +82,14 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); rw->lock++; arch_spin_unlock(&rw->slock); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -97,7 +97,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); @@ -109,7 +109,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 6715b6a8ebc3..1a91c9121d17 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) __ticket_spin_unlock_wait(lock); } -#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) +#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) +#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "p6", "p7", "r2", "memory"); } -#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) +#define arch_read_lock(lock) arch_read_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) -#define __raw_read_lock(rw) \ +#define arch_read_lock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ \ @@ -188,7 +188,7 @@ do { \ #endif /* !ASM_SUPPORTED */ -#define __raw_read_unlock(rw) \ +#define arch_read_unlock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -197,7 +197,7 @@ do { \ #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); } -#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) +#define arch_write_lock(rw) arch_write_lock_flags(rw, 0) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ register long result; \ \ @@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(arch_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -244,9 +244,9 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) #else /* !ASM_SUPPORTED */ -#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) +#define arch_write_lock_flags(l, flags) arch_write_lock(l) -#define __raw_write_lock(l) \ +#define arch_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ @@ -257,7 +257,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) } while (ia64_val); \ }) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ __u64 ia64_val; \ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(arch_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,7 +273,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(arch_rwlock_t *x) +static inline int arch_read_trylock(arch_rwlock_t *x) { union { arch_rwlock_t lock; diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 1c76af8c8e1b..179a06489b10 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -140,15 +140,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +#define arch_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -199,7 +199,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) ); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -252,7 +252,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) ); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) ); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) ); } -static inline int __raw_read_trylock(arch_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t*)lock; if (atomic_dec_return(count) >= 0) @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(arch_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -316,8 +316,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock) return 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 7bf27c8a3364..21ef9efbde43 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -248,21 +248,21 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) /* * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_write_can_lock(rw) (!(rw)->lock) -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 1b \n" " addu %1, 1 \n" @@ -275,7 +275,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 2f \n" " addu %1, 1 \n" @@ -301,7 +301,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -309,7 +309,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) if (R10000_LLSC_WAR) { __asm__ __volatile__( - "1: ll %1, %2 # __raw_read_unlock \n" + "1: ll %1, %2 # arch_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" @@ -318,7 +318,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_unlock \n" + " .set noreorder # arch_read_unlock \n" "1: ll %1, %2 \n" " sub %1, 1 \n" " sc %1, %0 \n" @@ -335,13 +335,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) } } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " lui %1, 0x8000 \n" @@ -354,7 +354,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 2f \n" " lui %1, 0x8000 \n" @@ -377,26 +377,26 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) smp_llsc_mb(); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( - " # __raw_write_unlock \n" + " # arch_write_unlock \n" " sw $0, %0 \n" : "=m" (rw->lock) : "m" (rw->lock) : "memory"); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -413,7 +413,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -433,14 +433,14 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -457,7 +457,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -480,8 +480,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) return ret; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 1ff3a0a94a43..74036f436a3b 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_lock(arch_rwlock_t *rw) +static __inline__ void arch_read_lock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) +static __inline__ void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -93,7 +93,7 @@ static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) +static __inline__ int arch_read_trylock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void __raw_write_lock(arch_rwlock_t *rw) +static __inline__ void arch_write_lock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -141,7 +141,7 @@ retry: local_irq_restore(flags); } -static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) +static __inline__ void arch_write_unlock(arch_rwlock_t *rw) { rw->counter = 0; arch_spin_unlock(&rw->lock); @@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) +static __inline__ int arch_write_trylock(arch_rwlock_t *rw) { unsigned long flags; int result = 0; @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) +static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) { return rw->counter >= 0; } @@ -182,13 +182,13 @@ static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw) +static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) { return !rw->counter; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 2fad2c07c593..764094cff681 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -166,8 +166,8 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); * read-locks. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_write_can_lock(rw) (!(rw)->lock) #ifdef CONFIG_PPC64 #define __DO_SIGN_EXTEND "extsw %0,%0\n" @@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static inline long arch_read_trylock(arch_rwlock_t *rw) +static inline long __arch_read_trylock(arch_rwlock_t *rw) { long tmp; @@ -205,7 +205,7 @@ static inline long arch_read_trylock(arch_rwlock_t *rw) * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static inline long arch_write_trylock(arch_rwlock_t *rw) +static inline long __arch_write_trylock(arch_rwlock_t *rw) { long tmp, token; @@ -225,10 +225,10 @@ static inline long arch_write_trylock(arch_rwlock_t *rw) return tmp; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_read_trylock(rw) > 0)) + if (likely(__arch_read_trylock(rw) > 0)) break; do { HMT_low(); @@ -239,10 +239,10 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) } } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_write_trylock(rw) == 0)) + if (likely(__arch_write_trylock(rw) == 0)) break; do { HMT_low(); @@ -253,17 +253,17 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) } } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return arch_read_trylock(rw) > 0; + return __arch_read_trylock(rw) > 0; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return arch_write_trylock(rw) == 0; + return __arch_write_trylock(rw) == 0; } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { long tmp; @@ -280,15 +280,15 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) : "cr0", "xer", "memory"); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__("# write_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); rw->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) __spin_yield(lock) #define arch_read_relax(lock) __rw_yield(lock) diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 7f98f0e48acb..a587907d77f3 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -113,13 +113,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock >= 0) +#define arch_read_can_lock(x) ((int)(x)->lock >= 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) extern void _raw_read_lock_wait(arch_rwlock_t *lp); extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); @@ -128,7 +128,7 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp); extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) +static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) +static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,7 +181,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 09fee9a1aa15..10754a375668 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -115,7 +115,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; local_irq_disable(); @@ -151,7 +151,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) int count = spin_retry; while (count-- > 0) { - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -170,7 +170,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return; @@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; local_irq_disable(); if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) @@ -202,7 +202,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) int count = spin_retry; while (count-- > 0) { - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return 1; diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index 7f3626aac869..bdc0f3b6c56a 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -100,21 +100,21 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((x)->lock > 0) +#define arch_read_can_lock(x) ((x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_lock \n\t" + "movli.l @%1, %0 ! arch_read_lock \n\t" "cmp/pl %0 \n\t" "bf 1b \n\t" "add #-1, %0 \n\t" @@ -126,13 +126,13 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) ); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_unlock \n\t" + "movli.l @%1, %0 ! arch_read_unlock \n\t" "add #1, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" @@ -142,13 +142,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) ); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_write_lock \n\t" + "movli.l @%1, %0 ! arch_write_lock \n\t" "cmp/hs %2, %0 \n\t" "bf 1b \n\t" "sub %2, %0 \n\t" @@ -160,23 +160,23 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) ); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( - "mov.l %1, @%0 ! __raw_write_unlock \n\t" + "mov.l %1, @%0 ! arch_write_unlock \n\t" : : "r" (&rw->lock), "r" (RW_LOCK_BIAS) : "t", "memory" ); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_read_trylock \n\t" + "movli.l @%2, %0 ! arch_read_trylock \n\t" "mov %0, %1 \n\t" "cmp/pl %0 \n\t" "bf 2f \n\t" @@ -193,13 +193,13 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_write_trylock \n\t" + "movli.l @%2, %0 ! arch_write_trylock \n\t" "mov %0, %1 \n\t" "cmp/hs %3, %0 \n\t" "bf 2f \n\t" @@ -216,8 +216,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) return (oldval > (RW_LOCK_BIAS - 1)); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 06d37e588fde..7f9b9dba38a6 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -76,7 +76,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(arch_rwlock_t *rw) +static inline void __arch_read_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; @@ -89,14 +89,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_lock(lock) \ +#define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_lock(lock); \ + __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(arch_rwlock_t *rw) +static inline void __arch_read_unlock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; @@ -109,14 +109,14 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_unlock(lock) \ +#define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_unlock(lock); \ + __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; @@ -130,7 +130,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,7 +150,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(arch_rwlock_t *rw) +static inline int __arch_read_trylock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); @@ -165,27 +165,27 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) return res; } -#define __raw_read_trylock(lock) \ +#define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ - res = arch_read_trylock(lock); \ + res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) -#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) +#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) -#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) +#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() -#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) +#define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 2b22d7f2c2fb..073936a8b275 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -210,17 +210,17 @@ static int inline arch_write_trylock(arch_rwlock_t *lock) return result; } -#define __raw_read_lock(p) arch_read_lock(p) -#define __raw_read_lock_flags(p, f) arch_read_lock(p) -#define __raw_read_trylock(p) arch_read_trylock(p) -#define __raw_read_unlock(p) arch_read_unlock(p) -#define __raw_write_lock(p) arch_write_lock(p) -#define __raw_write_lock_flags(p, f) arch_write_lock(p) -#define __raw_write_unlock(p) arch_write_unlock(p) -#define __raw_write_trylock(p) arch_write_trylock(p) - -#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_lock(p) arch_read_lock(p) +#define arch_read_lock_flags(p, f) arch_read_lock(p) +#define arch_read_trylock(p) arch_read_trylock(p) +#define arch_read_unlock(p) arch_read_unlock(p) +#define arch_write_lock(p) arch_write_lock(p) +#define arch_write_lock_flags(p, f) arch_write_lock(p) +#define arch_write_unlock(p) arch_write_unlock(p) +#define arch_write_trylock(p) arch_write_trylock(p) + +#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define arch_write_can_lock(rw) (!(rw)->lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 99cb86e843a0..3089f70c0c52 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(arch_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (int)(lock)->lock > 0; } @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(arch_rwlock_t *lock) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(arch_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -255,7 +255,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) ::LOCK_PTR_REG (rw) : "memory"); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" "jz 1f\n" @@ -264,7 +264,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(arch_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(arch_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -284,19 +284,19 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock) return 0; } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl %1, %0" : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 73785b0bd6b9..5725b034defe 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -38,20 +38,20 @@ do { \ extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else -# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) +# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) # define _raw_read_lock_flags(lock, flags) \ - __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) + arch_read_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) # define _raw_write_lock_flags(lock, flags) \ - __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) + arch_write_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) #endif -#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) -#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) +#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) /* * Define the various rw_lock methods. Note we define these diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 1d3bcc3cf7c6..b14f6a91e19f 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -49,12 +49,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) /* * Read-write spinlocks. No debug version. */ -#define __raw_read_lock(lock) do { (void)(lock); } while (0) -#define __raw_write_lock(lock) do { (void)(lock); } while (0) -#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) -#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) -#define __raw_read_unlock(lock) do { (void)(lock); } while (0) -#define __raw_write_unlock(lock) do { (void)(lock); } while (0) +#define arch_read_lock(lock) do { (void)(lock); } while (0) +#define arch_write_lock(lock) do { (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ (void)(lock); 1; }) +#define arch_read_unlock(lock) do { (void)(lock); } while (0) +#define arch_write_unlock(lock) do { (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) @@ -67,8 +67,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #define arch_spin_is_contended(lock) (((void)(lock), 0)) -#define __raw_read_can_lock(lock) (((void)(lock), 1)) -#define __raw_write_can_lock(lock) (((void)(lock), 1)) +#define arch_read_can_lock(lock) (((void)(lock), 1)) +#define arch_write_can_lock(lock) (((void)(lock), 1)) #define arch_spin_unlock_wait(lock) \ do { cpu_relax(); } while (arch_spin_is_locked(lock)) diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3f72f10d9cb0..0cea0bf6114e 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_read_trylock(&lock->raw_lock)) + if (arch_read_trylock(&lock->raw_lock)) return; __delay(1); } @@ -196,12 +196,12 @@ static void __read_lock_debug(rwlock_t *lock) void _raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_lock(&lock->raw_lock); + arch_read_lock(&lock->raw_lock); } int _raw_read_trylock(rwlock_t *lock) { - int ret = __raw_read_trylock(&lock->raw_lock); + int ret = arch_read_trylock(&lock->raw_lock); #ifndef CONFIG_SMP /* @@ -215,7 +215,7 @@ int _raw_read_trylock(rwlock_t *lock) void _raw_read_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_unlock(&lock->raw_lock); + arch_read_unlock(&lock->raw_lock); } static inline void debug_write_lock_before(rwlock_t *lock) @@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_write_trylock(&lock->raw_lock)) + if (arch_write_trylock(&lock->raw_lock)) return; __delay(1); } @@ -271,13 +271,13 @@ static void __write_lock_debug(rwlock_t *lock) void _raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); - __raw_write_lock(&lock->raw_lock); + arch_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } int _raw_write_trylock(rwlock_t *lock) { - int ret = __raw_write_trylock(&lock->raw_lock); + int ret = arch_write_trylock(&lock->raw_lock); if (ret) debug_write_lock_after(lock); @@ -293,5 +293,5 @@ int _raw_write_trylock(rwlock_t *lock) void _raw_write_unlock(rwlock_t *lock) { debug_write_unlock(lock); - __raw_write_unlock(&lock->raw_lock); + arch_write_unlock(&lock->raw_lock); } -- cgit v1.2.3 From c2f21ce2e31286a0a32f8da0a7856e9ca1122ef3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 20:02:59 +0100 Subject: locking: Implement new raw_spinlock Now that the raw_spin name space is freed up, we can implement raw_spinlock and the related functions which are used to annotate the locks which are not converted to sleeping spinlocks in preempt-rt. A side effect is that only such locks can be used with the low level lock fsunctions which circumvent lockdep. For !rt spin_* functions are mapped to the raw_spin* implementations. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/spinlock.h | 258 +++++++++++++++++++++++++++++---------- include/linux/spinlock_api_smp.h | 51 ++++---- include/linux/spinlock_api_up.h | 2 +- include/linux/spinlock_types.h | 49 ++++++-- kernel/mutex-debug.h | 12 +- kernel/sched.c | 2 +- kernel/spinlock.c | 34 +++--- lib/spinlock_debug.c | 24 ++-- 8 files changed, 297 insertions(+), 135 deletions(-) (limited to 'include') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 53bc2213b414..ef5a55d96b9b 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -80,7 +80,7 @@ #include /* - * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): + * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them): */ #ifdef CONFIG_SMP # include @@ -89,30 +89,30 @@ #endif #ifdef CONFIG_DEBUG_SPINLOCK - extern void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key); -# define spin_lock_init(lock) \ + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ - __spin_lock_init((lock), #lock, &__key); \ + __raw_spin_lock_init((lock), #lock, &__key); \ } while (0) #else -# define spin_lock_init(lock) \ - do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) +# define raw_spin_lock_init(lock) \ + do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) #endif -#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) +#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #ifdef CONFIG_GENERIC_LOCKBREAK -#define spin_is_contended(lock) ((lock)->break_lock) +#define raw_spin_is_contended(lock) ((lock)->break_lock) #else #ifdef arch_spin_is_contended -#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) +#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else -#define spin_is_contended(lock) (((void)(lock), 0)) +#define raw_spin_is_contended(lock) (((void)(lock), 0)) #endif /*arch_spin_is_contended*/ #endif @@ -122,22 +122,37 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } #endif /** - * spin_unlock_wait - wait until the spinlock gets unlocked + * raw_spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. */ -#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) +#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) #ifdef CONFIG_DEBUG_SPINLOCK - extern void _raw_spin_lock(spinlock_t *lock); + extern void _raw_spin_lock(raw_spinlock_t *lock); #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - extern int _raw_spin_trylock(spinlock_t *lock); - extern void _raw_spin_unlock(spinlock_t *lock); + extern int _raw_spin_trylock(raw_spinlock_t *lock); + extern void _raw_spin_unlock(raw_spinlock_t *lock); #else -# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) -# define _raw_spin_lock_flags(lock, flags) \ - arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) -# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) +static inline void _raw_spin_lock(raw_spinlock_t *lock) +{ + arch_spin_lock(&lock->raw_lock); +} + +static inline void +_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) +{ + arch_spin_lock_flags(&lock->raw_lock, *flags); +} + +static inline int _raw_spin_trylock(raw_spinlock_t *lock) +{ + return arch_spin_trylock(&(lock)->raw_lock); +} + +static inline void _raw_spin_unlock(raw_spinlock_t *lock) +{ + arch_spin_unlock(&lock->raw_lock); +} #endif /* @@ -146,38 +161,38 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } * various methods are defined as nops in the case they are not * required. */ -#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) +#define raw_spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) -#define spin_lock(lock) _spin_lock(lock) +#define raw_spin_lock(lock) _spin_lock(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) -# define spin_lock_nest_lock(lock, nest_lock) \ +# define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else -# define spin_lock_nested(lock, subclass) _spin_lock(lock) -# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) +# define raw_spin_lock_nested(lock, subclass) _spin_lock(lock) +# define raw_spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -#define spin_lock_irqsave(lock, flags) \ +#define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _spin_lock_irqsave(lock); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define spin_lock_irqsave_nested(lock, flags, subclass) \ +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else -#define spin_lock_irqsave_nested(lock, flags, subclass) \ +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _spin_lock_irqsave(lock); \ @@ -186,45 +201,178 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } #else -#define spin_lock_irqsave(lock, flags) \ +#define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _spin_lock_irqsave(lock, flags); \ } while (0) -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - spin_lock_irqsave(lock, flags) +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + raw_spin_lock_irqsave(lock, flags) #endif -#define spin_lock_irq(lock) _spin_lock_irq(lock) -#define spin_lock_bh(lock) _spin_lock_bh(lock) -#define spin_unlock(lock) _spin_unlock(lock) -#define spin_unlock_irq(lock) _spin_unlock_irq(lock) +#define raw_spin_lock_irq(lock) _spin_lock_irq(lock) +#define raw_spin_lock_bh(lock) _spin_lock_bh(lock) +#define raw_spin_unlock(lock) _spin_unlock(lock) +#define raw_spin_unlock_irq(lock) _spin_unlock_irq(lock) -#define spin_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ +#define raw_spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ _spin_unlock_irqrestore(lock, flags); \ } while (0) -#define spin_unlock_bh(lock) _spin_unlock_bh(lock) +#define raw_spin_unlock_bh(lock) _spin_unlock_bh(lock) -#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) +#define raw_spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) -#define spin_trylock_irq(lock) \ +#define raw_spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ - spin_trylock(lock) ? \ + raw_spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ }) -#define spin_trylock_irqsave(lock, flags) \ +#define raw_spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ - spin_trylock(lock) ? \ + raw_spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) +/** + * raw_spin_can_lock - would raw_spin_trylock() succeed? + * @lock: the spinlock in question. + */ +#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + +/* Include rwlock functions */ +#include + +/* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: + */ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +# include +#else +# include +#endif + +/* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ + +static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) +{ + return &lock->rlock; +} + +#define spin_lock_init(_lock) \ +do { \ + spinlock_check(_lock); \ + raw_spin_lock_init(&(_lock)->rlock); \ +} while (0) + +static inline void spin_lock(spinlock_t *lock) +{ + raw_spin_lock(&lock->rlock); +} + +static inline void spin_lock_bh(spinlock_t *lock) +{ + raw_spin_lock_bh(&lock->rlock); +} + +static inline int spin_trylock(spinlock_t *lock) +{ + return raw_spin_trylock(&lock->rlock); +} + +#define spin_lock_nested(lock, subclass) \ +do { \ + raw_spin_lock_nested(spinlock_check(lock), subclass); \ +} while (0) + +#define spin_lock_nest_lock(lock, nest_lock) \ +do { \ + raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ +} while (0) + +static inline void spin_lock_irq(spinlock_t *lock) +{ + raw_spin_lock_irq(&lock->rlock); +} + +#define spin_lock_irqsave(lock, flags) \ +do { \ + raw_spin_lock_irqsave(spinlock_check(lock), flags); \ +} while (0) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ +do { \ + raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ +} while (0) + +static inline void spin_unlock(spinlock_t *lock) +{ + raw_spin_unlock(&lock->rlock); +} + +static inline void spin_unlock_bh(spinlock_t *lock) +{ + raw_spin_unlock_bh(&lock->rlock); +} + +static inline void spin_unlock_irq(spinlock_t *lock) +{ + raw_spin_unlock_irq(&lock->rlock); +} + +static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&lock->rlock, flags); +} + +static inline int spin_trylock_bh(spinlock_t *lock) +{ + return raw_spin_trylock_bh(&lock->rlock); +} + +static inline int spin_trylock_irq(spinlock_t *lock) +{ + return raw_spin_trylock_irq(&lock->rlock); +} + +#define spin_trylock_irqsave(lock, flags) \ +({ \ + raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ +}) + +static inline void spin_unlock_wait(spinlock_t *lock) +{ + raw_spin_unlock_wait(&lock->rlock); +} + +static inline int spin_is_locked(spinlock_t *lock) +{ + return raw_spin_is_locked(&lock->rlock); +} + +static inline int spin_is_contended(spinlock_t *lock) +{ + return raw_spin_is_contended(&lock->rlock); +} + +static inline int spin_can_lock(spinlock_t *lock) +{ + return raw_spin_can_lock(&lock->rlock); +} + +static inline void assert_spin_locked(spinlock_t *lock) +{ + assert_raw_spin_locked(&lock->rlock); +} + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) @@ -242,22 +390,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) -/** - * spin_can_lock - would spin_trylock() succeed? - * @lock: the spinlock in question. - */ -#define spin_can_lock(lock) (!spin_is_locked(lock)) - -/* Include rwlock functions */ -#include - -/* - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: - */ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -# include -#else -# include -#endif - #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index a2b2c9df91de..eabe5068d138 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -17,26 +17,29 @@ int in_lock_functions(unsigned long addr); -#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) +#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) -void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) +void __lockfunc +_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) __acquires(lock); -void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); +void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) +unsigned long __lockfunc +_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); -int __lockfunc _spin_trylock(spinlock_t *lock); -int __lockfunc _spin_trylock_bh(spinlock_t *lock); -void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +int __lockfunc _spin_trylock(raw_spinlock_t *lock); +int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock); +void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); +void __lockfunc +_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) __releases(lock); #ifdef CONFIG_INLINE_SPIN_LOCK @@ -79,7 +82,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) #endif -static inline int __spin_trylock(spinlock_t *lock) +static inline int __spin_trylock(raw_spinlock_t *lock) { preempt_disable(); if (_raw_spin_trylock(lock)) { @@ -97,7 +100,7 @@ static inline int __spin_trylock(spinlock_t *lock) */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) -static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) +static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) { unsigned long flags; @@ -117,7 +120,7 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) return flags; } -static inline void __spin_lock_irq(spinlock_t *lock) +static inline void __spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); @@ -125,7 +128,7 @@ static inline void __spin_lock_irq(spinlock_t *lock) LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -static inline void __spin_lock_bh(spinlock_t *lock) +static inline void __spin_lock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); @@ -133,7 +136,7 @@ static inline void __spin_lock_bh(spinlock_t *lock) LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -static inline void __spin_lock(spinlock_t *lock) +static inline void __spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); @@ -142,14 +145,14 @@ static inline void __spin_lock(spinlock_t *lock) #endif /* CONFIG_PREEMPT */ -static inline void __spin_unlock(spinlock_t *lock) +static inline void __spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); preempt_enable(); } -static inline void __spin_unlock_irqrestore(spinlock_t *lock, +static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); @@ -158,7 +161,7 @@ static inline void __spin_unlock_irqrestore(spinlock_t *lock, preempt_enable(); } -static inline void __spin_unlock_irq(spinlock_t *lock) +static inline void __spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); @@ -166,7 +169,7 @@ static inline void __spin_unlock_irq(spinlock_t *lock) preempt_enable(); } -static inline void __spin_unlock_bh(spinlock_t *lock) +static inline void __spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); @@ -174,7 +177,7 @@ static inline void __spin_unlock_bh(spinlock_t *lock) local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -static inline int __spin_trylock_bh(spinlock_t *lock) +static inline int __spin_trylock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index 04e1d3164576..3a9e27adecf9 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -16,7 +16,7 @@ #define in_lock_functions(ADDR) 0 -#define assert_spin_locked(lock) do { (void)(lock); } while (0) +#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) /* * In the UP-nondebug case there's no real locking going on, so the diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 7dadce303ebf..851b7783720d 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -17,7 +17,7 @@ #include -typedef struct { +typedef struct raw_spinlock { arch_spinlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; @@ -29,7 +29,7 @@ typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif -} spinlock_t; +} raw_spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead @@ -42,18 +42,45 @@ typedef struct { #endif #ifdef CONFIG_DEBUG_SPINLOCK -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - .magic = SPINLOCK_MAGIC, \ - .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1, \ - SPIN_DEP_MAP_INIT(lockname) } +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, #else -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEP_MAP_INIT(lockname) } +# define SPIN_DEBUG_INIT(lockname) #endif +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + /* * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence * deprecated. diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index 7bebbd15b342..57d527a16f9d 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h @@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock) \ DEBUG_LOCKS_WARN_ON(in_interrupt()); \ local_irq_save(flags); \ - arch_spin_lock(&(lock)->raw_lock); \ + arch_spin_lock(&(lock)->rlock.raw_lock);\ DEBUG_LOCKS_WARN_ON(l->magic != l); \ } while (0) -#define spin_unlock_mutex(lock, flags) \ - do { \ - arch_spin_unlock(&(lock)->raw_lock); \ - local_irq_restore(flags); \ - preempt_check_resched(); \ +#define spin_unlock_mutex(lock, flags) \ + do { \ + arch_spin_unlock(&(lock)->rlock.raw_lock); \ + local_irq_restore(flags); \ + preempt_check_resched(); \ } while (0) diff --git a/kernel/sched.c b/kernel/sched.c index fd05861b2111..e6acf2d7b753 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -884,7 +884,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ - rq->lock.owner = current; + rq->lock.rlock.owner = current; #endif /* * If we are tracking spinlock dependencies then we have to diff --git a/kernel/spinlock.c b/kernel/spinlock.c index fbb5f8b78357..54eb7dd3c608 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -32,6 +32,8 @@ * include/linux/spinlock_api_smp.h */ #else +#define raw_read_can_lock(l) read_can_lock(l) +#define raw_write_can_lock(l) write_can_lock(l) /* * We build the __lock_function inlines here. They are too large for * inlining all over the place, but here is only one user per function @@ -52,7 +54,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ @@ -72,7 +74,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ @@ -107,14 +109,14 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ * __[spin|read|write]_lock_irqsave() * __[spin|read|write]_lock_bh() */ -BUILD_LOCK_OPS(spin, spinlock); +BUILD_LOCK_OPS(spin, raw_spinlock); BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK -int __lockfunc _spin_trylock(spinlock_t *lock) +int __lockfunc _spin_trylock(raw_spinlock_t *lock) { return __spin_trylock(lock); } @@ -122,7 +124,7 @@ EXPORT_SYMBOL(_spin_trylock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH -int __lockfunc _spin_trylock_bh(spinlock_t *lock) +int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock) { return __spin_trylock_bh(lock); } @@ -130,7 +132,7 @@ EXPORT_SYMBOL(_spin_trylock_bh); #endif #ifndef CONFIG_INLINE_SPIN_LOCK -void __lockfunc _spin_lock(spinlock_t *lock) +void __lockfunc _spin_lock(raw_spinlock_t *lock) { __spin_lock(lock); } @@ -138,7 +140,7 @@ EXPORT_SYMBOL(_spin_lock); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) { return __spin_lock_irqsave(lock); } @@ -146,7 +148,7 @@ EXPORT_SYMBOL(_spin_lock_irqsave); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ -void __lockfunc _spin_lock_irq(spinlock_t *lock) +void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) { __spin_lock_irq(lock); } @@ -154,7 +156,7 @@ EXPORT_SYMBOL(_spin_lock_irq); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_BH -void __lockfunc _spin_lock_bh(spinlock_t *lock) +void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) { __spin_lock_bh(lock); } @@ -162,7 +164,7 @@ EXPORT_SYMBOL(_spin_lock_bh); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK -void __lockfunc _spin_unlock(spinlock_t *lock) +void __lockfunc _spin_unlock(raw_spinlock_t *lock) { __spin_unlock(lock); } @@ -170,7 +172,7 @@ EXPORT_SYMBOL(_spin_unlock); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +void __lockfunc _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { __spin_unlock_irqrestore(lock, flags); } @@ -178,7 +180,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ -void __lockfunc _spin_unlock_irq(spinlock_t *lock) +void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) { __spin_unlock_irq(lock); } @@ -186,7 +188,7 @@ EXPORT_SYMBOL(_spin_unlock_irq); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH -void __lockfunc _spin_unlock_bh(spinlock_t *lock) +void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) { __spin_unlock_bh(lock); } @@ -339,7 +341,7 @@ EXPORT_SYMBOL(_write_unlock_bh); #ifdef CONFIG_DEBUG_LOCK_ALLOC -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); @@ -347,7 +349,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) } EXPORT_SYMBOL(_spin_lock_nested); -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, +unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) { unsigned long flags; @@ -361,7 +363,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, } EXPORT_SYMBOL(_spin_lock_irqsave_nested); -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, +void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *nest_lock) { preempt_disable(); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 0cea0bf6114e..e705848cc33c 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -13,8 +13,8 @@ #include #include -void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) +void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -29,7 +29,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, lock->owner_cpu = -1; } -EXPORT_SYMBOL(__spin_lock_init); +EXPORT_SYMBOL(__raw_spin_lock_init); void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) @@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, EXPORT_SYMBOL(__rwlock_init); -static void spin_bug(spinlock_t *lock, const char *msg) +static void spin_bug(raw_spinlock_t *lock, const char *msg) { struct task_struct *owner = NULL; @@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg) #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) static inline void -debug_spin_lock_before(spinlock_t *lock) +debug_spin_lock_before(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(lock->owner == current, lock, "recursion"); @@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock) lock, "cpu recursion"); } -static inline void debug_spin_lock_after(spinlock_t *lock) +static inline void debug_spin_lock_after(raw_spinlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } -static inline void debug_spin_unlock(spinlock_t *lock) +static inline void debug_spin_unlock(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); - SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); + SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); @@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock) lock->owner_cpu = -1; } -static void __spin_lock_debug(spinlock_t *lock) +static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t *lock) } } -void _raw_spin_lock(spinlock_t *lock) +void _raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); if (unlikely(!arch_spin_trylock(&lock->raw_lock))) @@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock) debug_spin_lock_after(lock); } -int _raw_spin_trylock(spinlock_t *lock) +int _raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&lock->raw_lock); @@ -148,7 +148,7 @@ int _raw_spin_trylock(spinlock_t *lock) return ret; } -void _raw_spin_unlock(spinlock_t *lock) +void _raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); arch_spin_unlock(&lock->raw_lock); -- cgit v1.2.3 From 9828ea9d75c38fe3dce05d00566eed61c85732e6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:55:53 +0100 Subject: locking: Further name space cleanups The name space hierarchy for the internal lock functions is now a bit backwards. raw_spin* functions map to _spin* which use __spin*, while we would like to have _raw_spin* and __raw_spin*. _raw_spin* is already used by lock debugging, so rename those funtions to do_raw_spin* to free up the _raw_spin* name space. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/rwlock.h | 32 ++++++++++++++++---------------- include/linux/rwlock_api_smp.h | 40 ++++++++++++++++++++-------------------- include/linux/spinlock.h | 16 ++++++++-------- include/linux/spinlock_api_smp.h | 24 ++++++++++++------------ kernel/sched.c | 2 +- kernel/spinlock.c | 12 ++++++------ lib/kernel_lock.c | 18 +++++++++--------- lib/spinlock_debug.c | 18 +++++++++--------- 8 files changed, 81 insertions(+), 81 deletions(-) (limited to 'include') diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 5725b034defe..bd799bc6d086 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -29,25 +29,25 @@ do { \ #endif #ifdef CONFIG_DEBUG_SPINLOCK - extern void _raw_read_lock(rwlock_t *lock); -#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) - extern int _raw_read_trylock(rwlock_t *lock); - extern void _raw_read_unlock(rwlock_t *lock); - extern void _raw_write_lock(rwlock_t *lock); -#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) - extern int _raw_write_trylock(rwlock_t *lock); - extern void _raw_write_unlock(rwlock_t *lock); + extern void do_raw_read_lock(rwlock_t *lock); +#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) + extern int do_raw_read_trylock(rwlock_t *lock); + extern void do_raw_read_unlock(rwlock_t *lock); + extern void do_raw_write_lock(rwlock_t *lock); +#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) + extern int do_raw_write_trylock(rwlock_t *lock); + extern void do_raw_write_unlock(rwlock_t *lock); #else -# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) -# define _raw_read_lock_flags(lock, flags) \ +# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) +# define do_raw_read_lock_flags(lock, flags) \ arch_read_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) -# define _raw_write_lock_flags(lock, flags) \ +# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) +# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) +# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) +# define do_raw_write_lock_flags(lock, flags) \ arch_write_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) +# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) +# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) #endif #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index 090f876f828d..b3ba5ae6a8c4 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -113,7 +113,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) static inline int __read_trylock(rwlock_t *lock) { preempt_disable(); - if (_raw_read_trylock(lock)) { + if (do_raw_read_trylock(lock)) { rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); return 1; } @@ -124,7 +124,7 @@ static inline int __read_trylock(rwlock_t *lock) static inline int __write_trylock(rwlock_t *lock) { preempt_disable(); - if (_raw_write_trylock(lock)) { + if (do_raw_write_trylock(lock)) { rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } @@ -143,7 +143,7 @@ static inline void __read_lock(rwlock_t *lock) { preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); } static inline unsigned long __read_lock_irqsave(rwlock_t *lock) @@ -153,8 +153,8 @@ static inline unsigned long __read_lock_irqsave(rwlock_t *lock) local_irq_save(flags); preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, - _raw_read_lock_flags, &flags); + LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, + do_raw_read_lock_flags, &flags); return flags; } @@ -163,7 +163,7 @@ static inline void __read_lock_irq(rwlock_t *lock) local_irq_disable(); preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); } static inline void __read_lock_bh(rwlock_t *lock) @@ -171,7 +171,7 @@ static inline void __read_lock_bh(rwlock_t *lock) local_bh_disable(); preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); } static inline unsigned long __write_lock_irqsave(rwlock_t *lock) @@ -181,8 +181,8 @@ static inline unsigned long __write_lock_irqsave(rwlock_t *lock) local_irq_save(flags); preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, - _raw_write_lock_flags, &flags); + LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, + do_raw_write_lock_flags, &flags); return flags; } @@ -191,7 +191,7 @@ static inline void __write_lock_irq(rwlock_t *lock) local_irq_disable(); preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } static inline void __write_lock_bh(rwlock_t *lock) @@ -199,14 +199,14 @@ static inline void __write_lock_bh(rwlock_t *lock) local_bh_disable(); preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } static inline void __write_lock(rwlock_t *lock) { preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } #endif /* CONFIG_PREEMPT */ @@ -214,21 +214,21 @@ static inline void __write_lock(rwlock_t *lock) static inline void __write_unlock(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); + do_raw_write_unlock(lock); preempt_enable(); } static inline void __read_unlock(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); + do_raw_read_unlock(lock); preempt_enable(); } static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); + do_raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } @@ -236,7 +236,7 @@ static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) static inline void __read_unlock_irq(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); + do_raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } @@ -244,7 +244,7 @@ static inline void __read_unlock_irq(rwlock_t *lock) static inline void __read_unlock_bh(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_read_unlock(lock); + do_raw_read_unlock(lock); preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } @@ -253,7 +253,7 @@ static inline void __write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); + do_raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } @@ -261,7 +261,7 @@ static inline void __write_unlock_irqrestore(rwlock_t *lock, static inline void __write_unlock_irq(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); + do_raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } @@ -269,7 +269,7 @@ static inline void __write_unlock_irq(rwlock_t *lock) static inline void __write_unlock_bh(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); - _raw_write_unlock(lock); + do_raw_write_unlock(lock); preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ef5a55d96b9b..0cbc58acf689 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -128,28 +128,28 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) #ifdef CONFIG_DEBUG_SPINLOCK - extern void _raw_spin_lock(raw_spinlock_t *lock); -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - extern int _raw_spin_trylock(raw_spinlock_t *lock); - extern void _raw_spin_unlock(raw_spinlock_t *lock); + extern void do_raw_spin_lock(raw_spinlock_t *lock); +#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) + extern int do_raw_spin_trylock(raw_spinlock_t *lock); + extern void do_raw_spin_unlock(raw_spinlock_t *lock); #else -static inline void _raw_spin_lock(raw_spinlock_t *lock) +static inline void do_raw_spin_lock(raw_spinlock_t *lock) { arch_spin_lock(&lock->raw_lock); } static inline void -_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) { arch_spin_lock_flags(&lock->raw_lock, *flags); } -static inline int _raw_spin_trylock(raw_spinlock_t *lock) +static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { return arch_spin_trylock(&(lock)->raw_lock); } -static inline void _raw_spin_unlock(raw_spinlock_t *lock) +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) { arch_spin_unlock(&lock->raw_lock); } diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index eabe5068d138..1be1fc57fc4b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -85,7 +85,7 @@ _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) static inline int __spin_trylock(raw_spinlock_t *lock) { preempt_disable(); - if (_raw_spin_trylock(lock)) { + if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } @@ -109,13 +109,13 @@ static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); /* * On lockdep we dont want the hand-coded irq-enable of - * _raw_spin_lock_flags() code, because lockdep assumes + * do_raw_spin_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ #ifdef CONFIG_LOCKDEP - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); #else - _raw_spin_lock_flags(lock, &flags); + do_raw_spin_lock_flags(lock, &flags); #endif return flags; } @@ -125,7 +125,7 @@ static inline void __spin_lock_irq(raw_spinlock_t *lock) local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } static inline void __spin_lock_bh(raw_spinlock_t *lock) @@ -133,14 +133,14 @@ static inline void __spin_lock_bh(raw_spinlock_t *lock) local_bh_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } static inline void __spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } #endif /* CONFIG_PREEMPT */ @@ -148,7 +148,7 @@ static inline void __spin_lock(raw_spinlock_t *lock) static inline void __spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); + do_raw_spin_unlock(lock); preempt_enable(); } @@ -156,7 +156,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); + do_raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } @@ -164,7 +164,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, static inline void __spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); + do_raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } @@ -172,7 +172,7 @@ static inline void __spin_unlock_irq(raw_spinlock_t *lock) static inline void __spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); - _raw_spin_unlock(lock); + do_raw_spin_unlock(lock); preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } @@ -181,7 +181,7 @@ static inline int __spin_trylock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); - if (_raw_spin_trylock(lock)) { + if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } diff --git a/kernel/sched.c b/kernel/sched.c index e6acf2d7b753..91c65dd91435 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6684,7 +6684,7 @@ SYSCALL_DEFINE0(sched_yield) */ __release(rq->lock); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); - _raw_spin_unlock(&rq->lock); + do_raw_spin_unlock(&rq->lock); preempt_enable_no_resched(); schedule(); diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 54eb7dd3c608..795240b81224 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -48,7 +48,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \ { \ for (;;) { \ preempt_disable(); \ - if (likely(_raw_##op##_trylock(lock))) \ + if (likely(do_raw_##op##_trylock(lock))) \ break; \ preempt_enable(); \ \ @@ -67,7 +67,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ for (;;) { \ preempt_disable(); \ local_irq_save(flags); \ - if (likely(_raw_##op##_trylock(lock))) \ + if (likely(do_raw_##op##_trylock(lock))) \ break; \ local_irq_restore(flags); \ preempt_enable(); \ @@ -345,7 +345,7 @@ void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } EXPORT_SYMBOL(_spin_lock_nested); @@ -357,8 +357,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock, local_irq_save(flags); preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, - _raw_spin_lock_flags, &flags); + LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, + do_raw_spin_lock_flags, &flags); return flags; } EXPORT_SYMBOL(_spin_lock_irqsave_nested); @@ -368,7 +368,7 @@ void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock, { preempt_disable(); spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } EXPORT_SYMBOL(_spin_lock_nest_lock); diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 5526b46aba94..fdd23cdb53f3 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); * If it successfully gets the lock, it should increment * the preemption count like any spinlock does. * - * (This works on UP too - _raw_spin_trylock will never + * (This works on UP too - do_raw_spin_trylock will never * return false in that case) */ int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(&kernel_flag)) { + while (!do_raw_spin_trylock(&kernel_flag)) { if (need_resched()) return -EAGAIN; cpu_relax(); @@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void) void __lockfunc __release_kernel_lock(void) { - _raw_spin_unlock(&kernel_flag); + do_raw_spin_unlock(&kernel_flag); preempt_enable_no_resched(); } /* * These are the BKL spinlocks - we try to be polite about preemption. * If SMP is not on (ie UP preemption), this all goes away because the - * _raw_spin_trylock() will always succeed. + * do_raw_spin_trylock() will always succeed. */ #ifdef CONFIG_PREEMPT static inline void __lock_kernel(void) { preempt_disable(); - if (unlikely(!_raw_spin_trylock(&kernel_flag))) { + if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { /* * If preemption was disabled even before this * was called, there's nothing we can be polite * about - just spin. */ if (preempt_count() > 1) { - _raw_spin_lock(&kernel_flag); + do_raw_spin_lock(&kernel_flag); return; } @@ -85,7 +85,7 @@ static inline void __lock_kernel(void) while (spin_is_locked(&kernel_flag)) cpu_relax(); preempt_disable(); - } while (!_raw_spin_trylock(&kernel_flag)); + } while (!do_raw_spin_trylock(&kernel_flag)); } } @@ -96,7 +96,7 @@ static inline void __lock_kernel(void) */ static inline void __lock_kernel(void) { - _raw_spin_lock(&kernel_flag); + do_raw_spin_lock(&kernel_flag); } #endif @@ -106,7 +106,7 @@ static inline void __unlock_kernel(void) * the BKL is not covered by lockdep, so we open-code the * unlocking sequence (and thus avoid the dep-chain ops): */ - _raw_spin_unlock(&kernel_flag); + do_raw_spin_unlock(&kernel_flag); preempt_enable(); } diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index e705848cc33c..4755b98b6dfb 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -125,7 +125,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock) } } -void _raw_spin_lock(raw_spinlock_t *lock) +void do_raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); if (unlikely(!arch_spin_trylock(&lock->raw_lock))) @@ -133,7 +133,7 @@ void _raw_spin_lock(raw_spinlock_t *lock) debug_spin_lock_after(lock); } -int _raw_spin_trylock(raw_spinlock_t *lock) +int do_raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&lock->raw_lock); @@ -148,7 +148,7 @@ int _raw_spin_trylock(raw_spinlock_t *lock) return ret; } -void _raw_spin_unlock(raw_spinlock_t *lock) +void do_raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); arch_spin_unlock(&lock->raw_lock); @@ -193,13 +193,13 @@ static void __read_lock_debug(rwlock_t *lock) } #endif -void _raw_read_lock(rwlock_t *lock) +void do_raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); arch_read_lock(&lock->raw_lock); } -int _raw_read_trylock(rwlock_t *lock) +int do_raw_read_trylock(rwlock_t *lock) { int ret = arch_read_trylock(&lock->raw_lock); @@ -212,7 +212,7 @@ int _raw_read_trylock(rwlock_t *lock) return ret; } -void _raw_read_unlock(rwlock_t *lock) +void do_raw_read_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); arch_read_unlock(&lock->raw_lock); @@ -268,14 +268,14 @@ static void __write_lock_debug(rwlock_t *lock) } #endif -void _raw_write_lock(rwlock_t *lock) +void do_raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); arch_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } -int _raw_write_trylock(rwlock_t *lock) +int do_raw_write_trylock(rwlock_t *lock) { int ret = arch_write_trylock(&lock->raw_lock); @@ -290,7 +290,7 @@ int _raw_write_trylock(rwlock_t *lock) return ret; } -void _raw_write_unlock(rwlock_t *lock) +void do_raw_write_unlock(rwlock_t *lock) { debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); -- cgit v1.2.3 From 9c1721aa4994f6625decbd915241f3a94ee2fe67 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 21:52:18 +0100 Subject: locking: Cleanup the name space completely Make the name space hierarchy of locking functions consistent: raw_spin* -> _raw_spin* -> __raw_spin* No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/rwlock.h | 50 +++++----- include/linux/rwlock_api_smp.h | 113 ++++++++++++----------- include/linux/spinlock.h | 37 ++++---- include/linux/spinlock_api_smp.h | 79 ++++++++-------- include/linux/spinlock_api_up.h | 64 +++++++------ kernel/spinlock.c | 192 +++++++++++++++++++-------------------- 6 files changed, 274 insertions(+), 261 deletions(-) (limited to 'include') diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index bd799bc6d086..71e0b00b6f2c 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -38,7 +38,7 @@ do { \ extern int do_raw_write_trylock(rwlock_t *lock); extern void do_raw_write_unlock(rwlock_t *lock); #else -# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) +# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) # define do_raw_read_lock_flags(lock, flags) \ arch_read_lock_flags(&(lock)->raw_lock, *(flags)) # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) @@ -58,23 +58,23 @@ do { \ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * methods are defined as nops in the case they are not required. */ -#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) -#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) +#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) -#define write_lock(lock) _write_lock(lock) -#define read_lock(lock) _read_lock(lock) +#define write_lock(lock) _raw_write_lock(lock) +#define read_lock(lock) _raw_read_lock(lock) #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - flags = _read_lock_irqsave(lock); \ + flags = _raw_read_lock_irqsave(lock); \ } while (0) #define write_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - flags = _write_lock_irqsave(lock); \ + flags = _raw_write_lock_irqsave(lock); \ } while (0) #else @@ -82,38 +82,38 @@ do { \ #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - _read_lock_irqsave(lock, flags); \ + _raw_read_lock_irqsave(lock, flags); \ } while (0) #define write_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - _write_lock_irqsave(lock, flags); \ + _raw_write_lock_irqsave(lock, flags); \ } while (0) #endif -#define read_lock_irq(lock) _read_lock_irq(lock) -#define read_lock_bh(lock) _read_lock_bh(lock) -#define write_lock_irq(lock) _write_lock_irq(lock) -#define write_lock_bh(lock) _write_lock_bh(lock) -#define read_unlock(lock) _read_unlock(lock) -#define write_unlock(lock) _write_unlock(lock) -#define read_unlock_irq(lock) _read_unlock_irq(lock) -#define write_unlock_irq(lock) _write_unlock_irq(lock) - -#define read_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _read_unlock_irqrestore(lock, flags); \ +#define read_lock_irq(lock) _raw_read_lock_irq(lock) +#define read_lock_bh(lock) _raw_read_lock_bh(lock) +#define write_lock_irq(lock) _raw_write_lock_irq(lock) +#define write_lock_bh(lock) _raw_write_lock_bh(lock) +#define read_unlock(lock) _raw_read_unlock(lock) +#define write_unlock(lock) _raw_write_unlock(lock) +#define read_unlock_irq(lock) _raw_read_unlock_irq(lock) +#define write_unlock_irq(lock) _raw_write_unlock_irq(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_read_unlock_irqrestore(lock, flags); \ } while (0) -#define read_unlock_bh(lock) _read_unlock_bh(lock) +#define read_unlock_bh(lock) _raw_read_unlock_bh(lock) #define write_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - _write_unlock_irqrestore(lock, flags); \ + _raw_write_unlock_irqrestore(lock, flags); \ } while (0) -#define write_unlock_bh(lock) _write_unlock_bh(lock) +#define write_unlock_bh(lock) _raw_write_unlock_bh(lock) #define write_trylock_irqsave(lock, flags) \ ({ \ diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index b3ba5ae6a8c4..9c9f0495d37c 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -15,102 +15,106 @@ * Released under the General Public License (GPL). */ -void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) +void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); +unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) __acquires(lock); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) +unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) __acquires(lock); -int __lockfunc _read_trylock(rwlock_t *lock); -int __lockfunc _write_trylock(rwlock_t *lock); -void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +int __lockfunc _raw_read_trylock(rwlock_t *lock); +int __lockfunc _raw_write_trylock(rwlock_t *lock); +void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc +_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(lock); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +void __lockfunc +_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(lock); #ifdef CONFIG_INLINE_READ_LOCK -#define _read_lock(lock) __read_lock(lock) +#define _raw_read_lock(lock) __raw_read_lock(lock) #endif #ifdef CONFIG_INLINE_WRITE_LOCK -#define _write_lock(lock) __write_lock(lock) +#define _raw_write_lock(lock) __raw_write_lock(lock) #endif #ifdef CONFIG_INLINE_READ_LOCK_BH -#define _read_lock_bh(lock) __read_lock_bh(lock) +#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) #endif #ifdef CONFIG_INLINE_WRITE_LOCK_BH -#define _write_lock_bh(lock) __write_lock_bh(lock) +#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) #endif #ifdef CONFIG_INLINE_READ_LOCK_IRQ -#define _read_lock_irq(lock) __read_lock_irq(lock) +#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) #endif #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ -#define _write_lock_irq(lock) __write_lock_irq(lock) +#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) #endif #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE -#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) +#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) #endif #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE -#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) +#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) #endif #ifdef CONFIG_INLINE_READ_TRYLOCK -#define _read_trylock(lock) __read_trylock(lock) +#define _raw_read_trylock(lock) __raw_read_trylock(lock) #endif #ifdef CONFIG_INLINE_WRITE_TRYLOCK -#define _write_trylock(lock) __write_trylock(lock) +#define _raw_write_trylock(lock) __raw_write_trylock(lock) #endif #ifdef CONFIG_INLINE_READ_UNLOCK -#define _read_unlock(lock) __read_unlock(lock) +#define _raw_read_unlock(lock) __raw_read_unlock(lock) #endif #ifdef CONFIG_INLINE_WRITE_UNLOCK -#define _write_unlock(lock) __write_unlock(lock) +#define _raw_write_unlock(lock) __raw_write_unlock(lock) #endif #ifdef CONFIG_INLINE_READ_UNLOCK_BH -#define _read_unlock_bh(lock) __read_unlock_bh(lock) +#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) #endif #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH -#define _write_unlock_bh(lock) __write_unlock_bh(lock) +#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) #endif #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ -#define _read_unlock_irq(lock) __read_unlock_irq(lock) +#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) #endif #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ -#define _write_unlock_irq(lock) __write_unlock_irq(lock) +#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) #endif #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE -#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) +#define _raw_read_unlock_irqrestore(lock, flags) \ + __raw_read_unlock_irqrestore(lock, flags) #endif #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE -#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) +#define _raw_write_unlock_irqrestore(lock, flags) \ + __raw_write_unlock_irqrestore(lock, flags) #endif -static inline int __read_trylock(rwlock_t *lock) +static inline int __raw_read_trylock(rwlock_t *lock) { preempt_disable(); if (do_raw_read_trylock(lock)) { @@ -121,7 +125,7 @@ static inline int __read_trylock(rwlock_t *lock) return 0; } -static inline int __write_trylock(rwlock_t *lock) +static inline int __raw_write_trylock(rwlock_t *lock) { preempt_disable(); if (do_raw_write_trylock(lock)) { @@ -139,14 +143,14 @@ static inline int __write_trylock(rwlock_t *lock) */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) -static inline void __read_lock(rwlock_t *lock) +static inline void __raw_read_lock(rwlock_t *lock) { preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); } -static inline unsigned long __read_lock_irqsave(rwlock_t *lock) +static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) { unsigned long flags; @@ -158,7 +162,7 @@ static inline unsigned long __read_lock_irqsave(rwlock_t *lock) return flags; } -static inline void __read_lock_irq(rwlock_t *lock) +static inline void __raw_read_lock_irq(rwlock_t *lock) { local_irq_disable(); preempt_disable(); @@ -166,7 +170,7 @@ static inline void __read_lock_irq(rwlock_t *lock) LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); } -static inline void __read_lock_bh(rwlock_t *lock) +static inline void __raw_read_lock_bh(rwlock_t *lock) { local_bh_disable(); preempt_disable(); @@ -174,7 +178,7 @@ static inline void __read_lock_bh(rwlock_t *lock) LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); } -static inline unsigned long __write_lock_irqsave(rwlock_t *lock) +static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) { unsigned long flags; @@ -186,7 +190,7 @@ static inline unsigned long __write_lock_irqsave(rwlock_t *lock) return flags; } -static inline void __write_lock_irq(rwlock_t *lock) +static inline void __raw_write_lock_irq(rwlock_t *lock) { local_irq_disable(); preempt_disable(); @@ -194,7 +198,7 @@ static inline void __write_lock_irq(rwlock_t *lock) LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } -static inline void __write_lock_bh(rwlock_t *lock) +static inline void __raw_write_lock_bh(rwlock_t *lock) { local_bh_disable(); preempt_disable(); @@ -202,7 +206,7 @@ static inline void __write_lock_bh(rwlock_t *lock) LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); } -static inline void __write_lock(rwlock_t *lock) +static inline void __raw_write_lock(rwlock_t *lock) { preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); @@ -211,21 +215,22 @@ static inline void __write_lock(rwlock_t *lock) #endif /* CONFIG_PREEMPT */ -static inline void __write_unlock(rwlock_t *lock) +static inline void __raw_write_unlock(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); preempt_enable(); } -static inline void __read_unlock(rwlock_t *lock) +static inline void __raw_read_unlock(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); preempt_enable(); } -static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +static inline void +__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); @@ -233,7 +238,7 @@ static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) preempt_enable(); } -static inline void __read_unlock_irq(rwlock_t *lock) +static inline void __raw_read_unlock_irq(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); @@ -241,7 +246,7 @@ static inline void __read_unlock_irq(rwlock_t *lock) preempt_enable(); } -static inline void __read_unlock_bh(rwlock_t *lock) +static inline void __raw_read_unlock_bh(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); @@ -249,7 +254,7 @@ static inline void __read_unlock_bh(rwlock_t *lock) local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -static inline void __write_unlock_irqrestore(rwlock_t *lock, +static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); @@ -258,7 +263,7 @@ static inline void __write_unlock_irqrestore(rwlock_t *lock, preempt_enable(); } -static inline void __write_unlock_irq(rwlock_t *lock) +static inline void __raw_write_unlock_irq(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); @@ -266,7 +271,7 @@ static inline void __write_unlock_irq(rwlock_t *lock) preempt_enable(); } -static inline void __write_unlock_bh(rwlock_t *lock) +static inline void __raw_write_unlock_bh(rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 0cbc58acf689..86088213334a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -161,20 +161,22 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) * various methods are defined as nops in the case they are not * required. */ -#define raw_spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) +#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) -#define raw_spin_lock(lock) _spin_lock(lock) +#define raw_spin_lock(lock) _raw_spin_lock(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock_nested(lock, subclass) + # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ - _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else -# define raw_spin_lock_nested(lock, subclass) _spin_lock(lock) -# define raw_spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) +# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) +# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -182,20 +184,20 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave(lock); \ + flags = _raw_spin_lock_irqsave(lock); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave_nested(lock, subclass); \ + flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave(lock); \ + flags = _raw_spin_lock_irqsave(lock); \ } while (0) #endif @@ -204,7 +206,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - _spin_lock_irqsave(lock, flags); \ + _raw_spin_lock_irqsave(lock, flags); \ } while (0) #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ @@ -212,19 +214,20 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) #endif -#define raw_spin_lock_irq(lock) _spin_lock_irq(lock) -#define raw_spin_lock_bh(lock) _spin_lock_bh(lock) -#define raw_spin_unlock(lock) _spin_unlock(lock) -#define raw_spin_unlock_irq(lock) _spin_unlock_irq(lock) +#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) +#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) +#define raw_spin_unlock(lock) _raw_spin_unlock(lock) +#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) #define raw_spin_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - _spin_unlock_irqrestore(lock, flags); \ + _raw_spin_unlock_irqrestore(lock, flags); \ } while (0) -#define raw_spin_unlock_bh(lock) _spin_unlock_bh(lock) +#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) -#define raw_spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) +#define raw_spin_trylock_bh(lock) \ + __cond_lock(lock, _raw_spin_trylock_bh(lock)) #define raw_spin_trylock_irq(lock) \ ({ \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 1be1fc57fc4b..e253ccd7a604 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -19,70 +19,71 @@ int in_lock_functions(unsigned long addr); #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) -void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) - __acquires(lock); +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); void __lockfunc -_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) - __acquires(lock); -void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock); - -unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) - __acquires(lock); +_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); +void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) + __acquires(lock); + +unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) + __acquires(lock); unsigned long __lockfunc -_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) - __acquires(lock); -int __lockfunc _spin_trylock(raw_spinlock_t *lock); -int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock); -void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); +_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); +int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); void __lockfunc -_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) - __releases(lock); +_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) + __releases(lock); #ifdef CONFIG_INLINE_SPIN_LOCK -#define _spin_lock(lock) __spin_lock(lock) +#define _raw_spin_lock(lock) __raw_spin_lock(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_BH -#define _spin_lock_bh(lock) __spin_lock_bh(lock) +#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ -#define _spin_lock_irq(lock) __spin_lock_irq(lock) +#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE -#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) +#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) #endif #ifdef CONFIG_INLINE_SPIN_TRYLOCK -#define _spin_trylock(lock) __spin_trylock(lock) +#define _raw_spin_trylock(lock) __raw_spin_trylock(lock) #endif #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH -#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) +#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK -#define _spin_unlock(lock) __spin_unlock(lock) +#define _raw_spin_unlock(lock) __raw_spin_unlock(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH -#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) +#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ -#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) +#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE -#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) +#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) #endif -static inline int __spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { preempt_disable(); if (do_raw_spin_trylock(lock)) { @@ -100,7 +101,7 @@ static inline int __spin_trylock(raw_spinlock_t *lock) */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) -static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) +static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) { unsigned long flags; @@ -120,7 +121,7 @@ static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) return flags; } -static inline void __spin_lock_irq(raw_spinlock_t *lock) +static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); @@ -128,7 +129,7 @@ static inline void __spin_lock_irq(raw_spinlock_t *lock) LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -static inline void __spin_lock_bh(raw_spinlock_t *lock) +static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); @@ -136,7 +137,7 @@ static inline void __spin_lock_bh(raw_spinlock_t *lock) LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -static inline void __spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); @@ -145,14 +146,14 @@ static inline void __spin_lock(raw_spinlock_t *lock) #endif /* CONFIG_PREEMPT */ -static inline void __spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); preempt_enable(); } -static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, +static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); @@ -161,7 +162,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, preempt_enable(); } -static inline void __spin_unlock_irq(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); @@ -169,7 +170,7 @@ static inline void __spin_unlock_irq(raw_spinlock_t *lock) preempt_enable(); } -static inline void __spin_unlock_bh(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); @@ -177,7 +178,7 @@ static inline void __spin_unlock_bh(raw_spinlock_t *lock) local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -static inline int __spin_trylock_bh(raw_spinlock_t *lock) +static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index 3a9e27adecf9..af1f47229e70 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -40,7 +40,8 @@ do { preempt_enable(); __release(lock); (void)(lock); } while (0) #define __UNLOCK_BH(lock) \ - do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) + do { preempt_enable_no_resched(); local_bh_enable(); \ + __release(lock); (void)(lock); } while (0) #define __UNLOCK_IRQ(lock) \ do { local_irq_enable(); __UNLOCK(lock); } while (0) @@ -48,34 +49,37 @@ #define __UNLOCK_IRQRESTORE(lock, flags) \ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) -#define _spin_lock(lock) __LOCK(lock) -#define _spin_lock_nested(lock, subclass) __LOCK(lock) -#define _read_lock(lock) __LOCK(lock) -#define _write_lock(lock) __LOCK(lock) -#define _spin_lock_bh(lock) __LOCK_BH(lock) -#define _read_lock_bh(lock) __LOCK_BH(lock) -#define _write_lock_bh(lock) __LOCK_BH(lock) -#define _spin_lock_irq(lock) __LOCK_IRQ(lock) -#define _read_lock_irq(lock) __LOCK_IRQ(lock) -#define _write_lock_irq(lock) __LOCK_IRQ(lock) -#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) -#define _read_trylock(lock) ({ __LOCK(lock); 1; }) -#define _write_trylock(lock) ({ __LOCK(lock); 1; }) -#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) -#define _spin_unlock(lock) __UNLOCK(lock) -#define _read_unlock(lock) __UNLOCK(lock) -#define _write_unlock(lock) __UNLOCK(lock) -#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) -#define _write_unlock_bh(lock) __UNLOCK_BH(lock) -#define _read_unlock_bh(lock) __UNLOCK_BH(lock) -#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_spin_lock(lock) __LOCK(lock) +#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_read_lock(lock) __LOCK(lock) +#define _raw_write_lock(lock) __LOCK(lock) +#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) +#define _raw_read_lock_bh(lock) __LOCK_BH(lock) +#define _raw_write_lock_bh(lock) __LOCK_BH(lock) +#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) +#define _raw_spin_unlock(lock) __UNLOCK(lock) +#define _raw_read_unlock(lock) __UNLOCK(lock) +#define _raw_write_unlock(lock) __UNLOCK(lock) +#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_spin_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_read_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_write_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) #endif /* __LINUX_SPINLOCK_API_UP_H */ diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 795240b81224..be6517fb9c14 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -44,7 +44,7 @@ * towards that other CPU that it should break the lock ASAP. */ #define BUILD_LOCK_OPS(op, locktype) \ -void __lockfunc __##op##_lock(locktype##_t *lock) \ +void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ { \ for (;;) { \ preempt_disable(); \ @@ -60,7 +60,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \ (lock)->break_lock = 0; \ } \ \ -unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ +unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ { \ unsigned long flags; \ \ @@ -81,12 +81,12 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ return flags; \ } \ \ -void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ +void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ { \ - _##op##_lock_irqsave(lock); \ + _raw_##op##_lock_irqsave(lock); \ } \ \ -void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ +void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ { \ unsigned long flags; \ \ @@ -95,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ /* irq-disabling. We use the generic preemption-aware */ \ /* function: */ \ /**/ \ - flags = _##op##_lock_irqsave(lock); \ + flags = _raw_##op##_lock_irqsave(lock); \ local_bh_disable(); \ local_irq_restore(flags); \ } \ @@ -116,240 +116,240 @@ BUILD_LOCK_OPS(write, rwlock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK -int __lockfunc _spin_trylock(raw_spinlock_t *lock) +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { - return __spin_trylock(lock); + return __raw_spin_trylock(lock); } -EXPORT_SYMBOL(_spin_trylock); +EXPORT_SYMBOL(_raw_spin_trylock); #endif #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH -int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock) +int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) { - return __spin_trylock_bh(lock); + return __raw_spin_trylock_bh(lock); } -EXPORT_SYMBOL(_spin_trylock_bh); +EXPORT_SYMBOL(_raw_spin_trylock_bh); #endif #ifndef CONFIG_INLINE_SPIN_LOCK -void __lockfunc _spin_lock(raw_spinlock_t *lock) +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { - __spin_lock(lock); + __raw_spin_lock(lock); } -EXPORT_SYMBOL(_spin_lock); +EXPORT_SYMBOL(_raw_spin_lock); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE -unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) +unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) { - return __spin_lock_irqsave(lock); + return __raw_spin_lock_irqsave(lock); } -EXPORT_SYMBOL(_spin_lock_irqsave); +EXPORT_SYMBOL(_raw_spin_lock_irqsave); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ -void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) +void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) { - __spin_lock_irq(lock); + __raw_spin_lock_irq(lock); } -EXPORT_SYMBOL(_spin_lock_irq); +EXPORT_SYMBOL(_raw_spin_lock_irq); #endif #ifndef CONFIG_INLINE_SPIN_LOCK_BH -void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) +void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) { - __spin_lock_bh(lock); + __raw_spin_lock_bh(lock); } -EXPORT_SYMBOL(_spin_lock_bh); +EXPORT_SYMBOL(_raw_spin_lock_bh); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK -void __lockfunc _spin_unlock(raw_spinlock_t *lock) +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { - __spin_unlock(lock); + __raw_spin_unlock(lock); } -EXPORT_SYMBOL(_spin_unlock); +EXPORT_SYMBOL(_raw_spin_unlock); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE -void __lockfunc _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) +void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { - __spin_unlock_irqrestore(lock, flags); + __raw_spin_unlock_irqrestore(lock, flags); } -EXPORT_SYMBOL(_spin_unlock_irqrestore); +EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ -void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) +void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) { - __spin_unlock_irq(lock); + __raw_spin_unlock_irq(lock); } -EXPORT_SYMBOL(_spin_unlock_irq); +EXPORT_SYMBOL(_raw_spin_unlock_irq); #endif #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH -void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) +void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) { - __spin_unlock_bh(lock); + __raw_spin_unlock_bh(lock); } -EXPORT_SYMBOL(_spin_unlock_bh); +EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif #ifndef CONFIG_INLINE_READ_TRYLOCK -int __lockfunc _read_trylock(rwlock_t *lock) +int __lockfunc _raw_read_trylock(rwlock_t *lock) { - return __read_trylock(lock); + return __raw_read_trylock(lock); } -EXPORT_SYMBOL(_read_trylock); +EXPORT_SYMBOL(_raw_read_trylock); #endif #ifndef CONFIG_INLINE_READ_LOCK -void __lockfunc _read_lock(rwlock_t *lock) +void __lockfunc _raw_read_lock(rwlock_t *lock) { - __read_lock(lock); + __raw_read_lock(lock); } -EXPORT_SYMBOL(_read_lock); +EXPORT_SYMBOL(_raw_read_lock); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) +unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) { - return __read_lock_irqsave(lock); + return __raw_read_lock_irqsave(lock); } -EXPORT_SYMBOL(_read_lock_irqsave); +EXPORT_SYMBOL(_raw_read_lock_irqsave); #endif #ifndef CONFIG_INLINE_READ_LOCK_IRQ -void __lockfunc _read_lock_irq(rwlock_t *lock) +void __lockfunc _raw_read_lock_irq(rwlock_t *lock) { - __read_lock_irq(lock); + __raw_read_lock_irq(lock); } -EXPORT_SYMBOL(_read_lock_irq); +EXPORT_SYMBOL(_raw_read_lock_irq); #endif #ifndef CONFIG_INLINE_READ_LOCK_BH -void __lockfunc _read_lock_bh(rwlock_t *lock) +void __lockfunc _raw_read_lock_bh(rwlock_t *lock) { - __read_lock_bh(lock); + __raw_read_lock_bh(lock); } -EXPORT_SYMBOL(_read_lock_bh); +EXPORT_SYMBOL(_raw_read_lock_bh); #endif #ifndef CONFIG_INLINE_READ_UNLOCK -void __lockfunc _read_unlock(rwlock_t *lock) +void __lockfunc _raw_read_unlock(rwlock_t *lock) { - __read_unlock(lock); + __raw_read_unlock(lock); } -EXPORT_SYMBOL(_read_unlock); +EXPORT_SYMBOL(_raw_read_unlock); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - __read_unlock_irqrestore(lock, flags); + __raw_read_unlock_irqrestore(lock, flags); } -EXPORT_SYMBOL(_read_unlock_irqrestore); +EXPORT_SYMBOL(_raw_read_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ -void __lockfunc _read_unlock_irq(rwlock_t *lock) +void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) { - __read_unlock_irq(lock); + __raw_read_unlock_irq(lock); } -EXPORT_SYMBOL(_read_unlock_irq); +EXPORT_SYMBOL(_raw_read_unlock_irq); #endif #ifndef CONFIG_INLINE_READ_UNLOCK_BH -void __lockfunc _read_unlock_bh(rwlock_t *lock) +void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) { - __read_unlock_bh(lock); + __raw_read_unlock_bh(lock); } -EXPORT_SYMBOL(_read_unlock_bh); +EXPORT_SYMBOL(_raw_read_unlock_bh); #endif #ifndef CONFIG_INLINE_WRITE_TRYLOCK -int __lockfunc _write_trylock(rwlock_t *lock) +int __lockfunc _raw_write_trylock(rwlock_t *lock) { - return __write_trylock(lock); + return __raw_write_trylock(lock); } -EXPORT_SYMBOL(_write_trylock); +EXPORT_SYMBOL(_raw_write_trylock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK -void __lockfunc _write_lock(rwlock_t *lock) +void __lockfunc _raw_write_lock(rwlock_t *lock) { - __write_lock(lock); + __raw_write_lock(lock); } -EXPORT_SYMBOL(_write_lock); +EXPORT_SYMBOL(_raw_write_lock); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) +unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) { - return __write_lock_irqsave(lock); + return __raw_write_lock_irqsave(lock); } -EXPORT_SYMBOL(_write_lock_irqsave); +EXPORT_SYMBOL(_raw_write_lock_irqsave); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ -void __lockfunc _write_lock_irq(rwlock_t *lock) +void __lockfunc _raw_write_lock_irq(rwlock_t *lock) { - __write_lock_irq(lock); + __raw_write_lock_irq(lock); } -EXPORT_SYMBOL(_write_lock_irq); +EXPORT_SYMBOL(_raw_write_lock_irq); #endif #ifndef CONFIG_INLINE_WRITE_LOCK_BH -void __lockfunc _write_lock_bh(rwlock_t *lock) +void __lockfunc _raw_write_lock_bh(rwlock_t *lock) { - __write_lock_bh(lock); + __raw_write_lock_bh(lock); } -EXPORT_SYMBOL(_write_lock_bh); +EXPORT_SYMBOL(_raw_write_lock_bh); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK -void __lockfunc _write_unlock(rwlock_t *lock) +void __lockfunc _raw_write_unlock(rwlock_t *lock) { - __write_unlock(lock); + __raw_write_unlock(lock); } -EXPORT_SYMBOL(_write_unlock); +EXPORT_SYMBOL(_raw_write_unlock); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - __write_unlock_irqrestore(lock, flags); + __raw_write_unlock_irqrestore(lock, flags); } -EXPORT_SYMBOL(_write_unlock_irqrestore); +EXPORT_SYMBOL(_raw_write_unlock_irqrestore); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ -void __lockfunc _write_unlock_irq(rwlock_t *lock) +void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) { - __write_unlock_irq(lock); + __raw_write_unlock_irq(lock); } -EXPORT_SYMBOL(_write_unlock_irq); +EXPORT_SYMBOL(_raw_write_unlock_irq); #endif #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH -void __lockfunc _write_unlock_bh(rwlock_t *lock) +void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) { - __write_unlock_bh(lock); + __raw_write_unlock_bh(lock); } -EXPORT_SYMBOL(_write_unlock_bh); +EXPORT_SYMBOL(_raw_write_unlock_bh); #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC -void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) +void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_nested); +EXPORT_SYMBOL(_raw_spin_lock_nested); -unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock, +unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) { unsigned long flags; @@ -361,16 +361,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock, do_raw_spin_lock_flags, &flags); return flags; } -EXPORT_SYMBOL(_spin_lock_irqsave_nested); +EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); -void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock, +void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *nest_lock) { preempt_disable(); spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_nest_lock); +EXPORT_SYMBOL(_raw_spin_lock_nest_lock); #endif -- cgit v1.2.3 From a26724591edba5acc528d41f3906a972590e8f54 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Nov 2009 14:46:14 +0100 Subject: plist: Make plist debugging raw_spinlock aware plists are used with spinlocks and raw_spinlocks. Change the plist debugging to handle both types. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/plist.h | 43 +++++++++++++++++++++++++++++++++++++------ kernel/futex.c | 6 +++--- lib/plist.c | 8 +++++--- 3 files changed, 45 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/plist.h b/include/linux/plist.h index 45926d77d6ac..8227f717c70f 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -81,7 +81,8 @@ struct plist_head { struct list_head prio_list; struct list_head node_list; #ifdef CONFIG_DEBUG_PI_LIST - spinlock_t *lock; + raw_spinlock_t *rawlock; + spinlock_t *spinlock; #endif }; @@ -91,9 +92,11 @@ struct plist_node { }; #ifdef CONFIG_DEBUG_PI_LIST -# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock +# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock +# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock #else # define PLIST_HEAD_LOCK_INIT(_lock) +# define PLIST_HEAD_LOCK_INIT_RAW(_lock) #endif #define _PLIST_HEAD_INIT(head) \ @@ -107,10 +110,21 @@ struct plist_node { */ #define PLIST_HEAD_INIT(head, _lock) \ { \ - _PLIST_HEAD_INIT(head), \ + _PLIST_HEAD_INIT(head), \ PLIST_HEAD_LOCK_INIT(&(_lock)) \ } +/** + * PLIST_HEAD_INIT_RAW - static struct plist_head initializer + * @head: struct plist_head variable name + * @_lock: lock to initialize for this list + */ +#define PLIST_HEAD_INIT_RAW(head, _lock) \ +{ \ + _PLIST_HEAD_INIT(head), \ + PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \ +} + /** * PLIST_NODE_INIT - static struct plist_node initializer * @node: struct plist_node variable name @@ -119,13 +133,13 @@ struct plist_node { #define PLIST_NODE_INIT(node, __prio) \ { \ .prio = (__prio), \ - .plist = { _PLIST_HEAD_INIT((node).plist) }, \ + .plist = { _PLIST_HEAD_INIT((node).plist) }, \ } /** * plist_head_init - dynamic struct plist_head initializer * @head: &struct plist_head pointer - * @lock: list spinlock, remembered for debugging + * @lock: spinlock protecting the list (debugging) */ static inline void plist_head_init(struct plist_head *head, spinlock_t *lock) @@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->node_list); #ifdef CONFIG_DEBUG_PI_LIST - head->lock = lock; + head->spinlock = lock; + head->rawlock = NULL; +#endif +} + +/** + * plist_head_init_raw - dynamic struct plist_head initializer + * @head: &struct plist_head pointer + * @lock: raw_spinlock protecting the list (debugging) + */ +static inline void +plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) +{ + INIT_LIST_HEAD(&head->prio_list); + INIT_LIST_HEAD(&head->node_list); +#ifdef CONFIG_DEBUG_PI_LIST + head->rawlock = lock; + head->spinlock = NULL; #endif } diff --git a/kernel/futex.c b/kernel/futex.c index d73ef1f3e55d..6af474df17bb 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1010,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, plist_add(&q->list, &hb2->chain); q->lock_ptr = &hb2->lock; #ifdef CONFIG_DEBUG_PI_LIST - q->list.plist.lock = &hb2->lock; + q->list.plist.spinlock = &hb2->lock; #endif } get_futex_key_refs(key2); @@ -1046,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, q->lock_ptr = &hb->lock; #ifdef CONFIG_DEBUG_PI_LIST - q->list.plist.lock = &hb->lock; + q->list.plist.spinlock = &hb->lock; #endif wake_up_state(q->task, TASK_NORMAL); @@ -1394,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) plist_node_init(&q->list, prio); #ifdef CONFIG_DEBUG_PI_LIST - q->list.plist.lock = &hb->lock; + q->list.plist.spinlock = &hb->lock; #endif plist_add(&q->list, &hb->chain); q->task = current; diff --git a/lib/plist.c b/lib/plist.c index d6c64a824e1d..1471988d9190 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top) static void plist_check_head(struct plist_head *head) { - WARN_ON(!head->lock); - if (head->lock) - WARN_ON_SMP(!spin_is_locked(head->lock)); + WARN_ON(!head->rawlock && !head->spinlock); + if (head->rawlock) + WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); + if (head->spinlock) + WARN_ON_SMP(!spin_is_locked(head->spinlock)); plist_check_list(&head->prio_list); plist_check_list(&head->node_list); } -- cgit v1.2.3 From 1d615482547584b9a8bb6316a58fed6ce90dd9ff Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Nov 2009 14:54:03 +0100 Subject: sched: Convert pi_lock to raw_spinlock Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/init_task.h | 2 +- include/linux/sched.h | 2 +- kernel/exit.c | 2 +- kernel/fork.c | 4 ++-- kernel/futex.c | 38 +++++++++++++++---------------- kernel/rtmutex-debug.c | 4 ++-- kernel/rtmutex.c | 58 +++++++++++++++++++++++------------------------ kernel/sched.c | 12 +++++----- 8 files changed, 61 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 8d10aa7fd4c9..abec69b63d7e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -165,7 +165,7 @@ extern struct cred init_cred; .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ - .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 294eb2f80144..41a9ea322dce 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1409,7 +1409,7 @@ struct task_struct { #endif /* Protection of the PI data structures: */ - spinlock_t pi_lock; + raw_spinlock_t pi_lock; #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task */ diff --git a/kernel/exit.c b/kernel/exit.c index 6f50ef55a6f3..5962d7ccf243 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code) * an exiting task cleaning up the robust pi futexes. */ smp_mb(); - spin_unlock_wait(&tsk->pi_lock); + raw_spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", diff --git a/kernel/fork.c b/kernel/fork.c index 1415dc4598ae..9bd91447e052 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) static void rt_mutex_init_task(struct task_struct *p) { - spin_lock_init(&p->pi_lock); + raw_spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES - plist_head_init(&p->pi_waiters, &p->pi_lock); + plist_head_init_raw(&p->pi_waiters, &p->pi_lock); p->pi_blocked_on = NULL; #endif } diff --git a/kernel/futex.c b/kernel/futex.c index 6af474df17bb..320b369d20b5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state) * and has cleaned up the pi_state already */ if (pi_state->owner) { - spin_lock_irq(&pi_state->owner->pi_lock); + raw_spin_lock_irq(&pi_state->owner->pi_lock); list_del_init(&pi_state->list); - spin_unlock_irq(&pi_state->owner->pi_lock); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); } @@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr) * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: */ - spin_lock_irq(&curr->pi_lock); + raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; hb = hash_futex(&key); - spin_unlock_irq(&curr->pi_lock); + raw_spin_unlock_irq(&curr->pi_lock); spin_lock(&hb->lock); - spin_lock_irq(&curr->pi_lock); + raw_spin_lock_irq(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: @@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr) WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); pi_state->owner = NULL; - spin_unlock_irq(&curr->pi_lock); + raw_spin_unlock_irq(&curr->pi_lock); rt_mutex_unlock(&pi_state->pi_mutex); spin_unlock(&hb->lock); - spin_lock_irq(&curr->pi_lock); + raw_spin_lock_irq(&curr->pi_lock); } - spin_unlock_irq(&curr->pi_lock); + raw_spin_unlock_irq(&curr->pi_lock); } static int @@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, * change of the task flags, we do this protected by * p->pi_lock: */ - spin_lock_irq(&p->pi_lock); + raw_spin_lock_irq(&p->pi_lock); if (unlikely(p->flags & PF_EXITING)) { /* * The task is on the way out. When PF_EXITPIDONE is @@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, */ int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; - spin_unlock_irq(&p->pi_lock); + raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); return ret; } @@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; - spin_unlock_irq(&p->pi_lock); + raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); @@ -794,16 +794,16 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) } } - spin_lock_irq(&pi_state->owner->pi_lock); + raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); - spin_unlock_irq(&pi_state->owner->pi_lock); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); - spin_lock_irq(&new_owner->pi_lock); + raw_spin_lock_irq(&new_owner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; - spin_unlock_irq(&new_owner->pi_lock); + raw_spin_unlock_irq(&new_owner->pi_lock); spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); @@ -1529,18 +1529,18 @@ retry: * itself. */ if (pi_state->owner != NULL) { - spin_lock_irq(&pi_state->owner->pi_lock); + raw_spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); - spin_unlock_irq(&pi_state->owner->pi_lock); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); } pi_state->owner = newowner; - spin_lock_irq(&newowner->pi_lock); + raw_spin_lock_irq(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); - spin_unlock_irq(&newowner->pi_lock); + raw_spin_unlock_irq(&newowner->pi_lock); return 0; /* diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 5fcb4fe645e2..ddabb54bb5c8 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -37,8 +37,8 @@ do { \ if (rt_trace_on) { \ rt_trace_on = 0; \ console_verbose(); \ - if (spin_is_locked(¤t->pi_lock)) \ - spin_unlock(¤t->pi_lock); \ + if (raw_spin_is_locked(¤t->pi_lock)) \ + raw_spin_unlock(¤t->pi_lock); \ } \ } while (0) diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 29bd4baf9e75..d33da470f9da 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task) { unsigned long flags; - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); } /* @@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* * Task can not go away as we did a get_task() before ! */ - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; /* @@ -232,7 +232,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, lock = waiter->lock; if (!spin_trylock(&lock->wait_lock)) { - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); cpu_relax(); goto retry; } @@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, plist_add(&waiter->list_entry, &lock->wait_list); /* Release the task */ - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); put_task_struct(task); /* Grab the next task */ task = rt_mutex_owner(lock); get_task_struct(task); - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { /* Boost the owner */ @@ -277,7 +277,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, __rt_mutex_adjust_prio(task); } - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); spin_unlock(&lock->wait_lock); @@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, goto again; out_unlock_pi: - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); @@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, if (pendowner == task) return 1; - spin_lock_irqsave(&pendowner->pi_lock, flags); + raw_spin_lock_irqsave(&pendowner->pi_lock, flags); if (task->prio >= pendowner->prio) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); return 0; } @@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, * priority. */ if (likely(!rt_mutex_has_waiters(lock))) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); return 1; } @@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, next = rt_mutex_top_waiter(lock); plist_del(&next->pi_list_entry, &pendowner->pi_waiters); __rt_mutex_adjust_prio(pendowner); - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); /* * We are going to steal the lock and a waiter was @@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, * might be task: */ if (likely(next->task != task)) { - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); plist_add(&next->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); } return 1; } @@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, unsigned long flags; int chain_walk = 0, res; - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; @@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, task->pi_blocked_on = waiter; - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { - spin_lock_irqsave(&owner->pi_lock, flags); + raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; @@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) struct task_struct *pendowner; unsigned long flags; - spin_lock_irqsave(¤t->pi_lock, flags); + raw_spin_lock_irqsave(¤t->pi_lock, flags); waiter = rt_mutex_top_waiter(lock); plist_del(&waiter->list_entry, &lock->wait_list); @@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); - spin_unlock_irqrestore(¤t->pi_lock, flags); + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); /* * Clear the pi_blocked_on variable and enqueue a possible @@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) * waiter with higher priority than pending-owner->normal_prio * is blocked on the unboosted (pending) owner. */ - spin_lock_irqsave(&pendowner->pi_lock, flags); + raw_spin_lock_irqsave(&pendowner->pi_lock, flags); WARN_ON(!pendowner->pi_blocked_on); WARN_ON(pendowner->pi_blocked_on != waiter); @@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &pendowner->pi_waiters); } - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); wake_up_process(pendowner); } @@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock, unsigned long flags; int chain_walk = 0; - spin_lock_irqsave(¤t->pi_lock, flags); + raw_spin_lock_irqsave(¤t->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); waiter->task = NULL; current->pi_blocked_on = NULL; - spin_unlock_irqrestore(¤t->pi_lock, flags); + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); if (first && owner != current) { - spin_lock_irqsave(&owner->pi_lock, flags); + raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&waiter->pi_list_entry, &owner->pi_waiters); @@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock, if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); @@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task) struct rt_mutex_waiter *waiter; unsigned long flags; - spin_lock_irqsave(&task->pi_lock, flags); + raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; if (!waiter || waiter->list_entry.prio == task->prio) { - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } - spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); diff --git a/kernel/sched.c b/kernel/sched.c index 01c5016e57f1..18cceeecce35 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6323,7 +6323,7 @@ recheck: * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: */ - spin_lock_irqsave(&p->pi_lock, flags); + raw_spin_lock_irqsave(&p->pi_lock, flags); /* * To be able to change p->policy safely, the apropriate * runqueue lock must be held. @@ -6333,7 +6333,7 @@ recheck: if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; __task_rq_unlock(rq); - spin_unlock_irqrestore(&p->pi_lock, flags); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); goto recheck; } update_rq_clock(rq); @@ -6357,7 +6357,7 @@ recheck: check_class_changed(rq, p, prev_class, oldprio, running); } __task_rq_unlock(rq); - spin_unlock_irqrestore(&p->pi_lock, flags); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); rt_mutex_adjust_pi(p); @@ -9624,7 +9624,7 @@ void __init sched_init(void) #endif #ifdef CONFIG_RT_MUTEXES - plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); + plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); #endif /* @@ -9749,13 +9749,13 @@ void normalize_rt_tasks(void) continue; } - spin_lock(&p->pi_lock); + raw_spin_lock(&p->pi_lock); rq = __task_rq_lock(p); normalize_task(rq, p); __task_rq_unlock(rq); - spin_unlock(&p->pi_lock); + raw_spin_unlock(&p->pi_lock); } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); -- cgit v1.2.3 From d209d74d52ab39dc071656533cac095294f70de7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Nov 2009 18:22:11 +0100 Subject: rtmutes: Convert rtmutex.lock to raw_spinlock Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/rtmutex.h | 6 +++--- kernel/futex.c | 6 +++--- kernel/rtmutex.c | 48 ++++++++++++++++++++++++------------------------ 3 files changed, 30 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index f19b00b7d530..281d8fd775e8 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -24,7 +24,7 @@ * @owner: the mutex owner */ struct rt_mutex { - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES @@ -63,8 +63,8 @@ struct hrtimer_sleeper; #endif #define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} diff --git a/kernel/futex.c b/kernel/futex.c index 320b369d20b5..8e3c3ffe1b9a 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -760,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (!pi_state) return -EINVAL; - spin_lock(&pi_state->pi_mutex.wait_lock); + raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); /* @@ -789,7 +789,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) else if (curval != uval) ret = -EINVAL; if (ret) { - spin_unlock(&pi_state->pi_mutex.wait_lock); + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); return ret; } } @@ -805,7 +805,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) pi_state->owner = new_owner; raw_spin_unlock_irq(&new_owner->pi_lock); - spin_unlock(&pi_state->pi_mutex.wait_lock); + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); rt_mutex_unlock(&pi_state->pi_mutex); return 0; diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index d33da470f9da..a9604815786a 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -231,7 +231,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, goto out_unlock_pi; lock = waiter->lock; - if (!spin_trylock(&lock->wait_lock)) { + if (!raw_spin_trylock(&lock->wait_lock)) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); cpu_relax(); goto retry; @@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* Deadlock detection */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); ret = deadlock_detect ? -EDEADLK : 0; goto out_unlock_pi; } @@ -280,7 +280,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, raw_spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); if (!detect_deadlock && waiter != top_waiter) goto out_put_task; @@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, task); - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); return res; } @@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock, /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); } /* @@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, break; } - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); if (waiter->task) schedule_rt_mutex(lock); - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); set_current_state(state); } @@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return 0; } @@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, */ fixup_rt_mutex_waiters(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); /* Remove pending timer: */ if (unlikely(timeout)) @@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); if (likely(rt_mutex_owner(lock) != current)) { @@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) fixup_rt_mutex_waiters(lock); } - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return ret; } @@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); debug_rt_mutex_unlock(lock); @@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock) if (!rt_mutex_has_waiters(lock)) { lock->owner = NULL; - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return; } wakeup_next_waiter(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); /* Undo pi boosting if necessary: */ rt_mutex_adjust_prio(current); @@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; - spin_lock_init(&lock->wait_lock); - plist_head_init(&lock->wait_list, &lock->wait_lock); + raw_spin_lock_init(&lock->wait_lock); + plist_head_init_raw(&lock->wait_list, &lock->wait_lock); debug_rt_mutex_init(lock, name); } @@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, { int ret; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); mark_rt_mutex_waiters(lock); @@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, /* We got the lock for task. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task, 0); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); rt_mutex_deadlock_account_lock(lock, task); return 1; } @@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, */ ret = 0; } - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); @@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, { int ret; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, */ fixup_rt_mutex_waiters(lock); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); /* * Readjust priority, when we did not get the lock. We might have been -- cgit v1.2.3 From 239007b8440abff689632f50cdf0f2b9e895b534 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Nov 2009 16:46:45 +0100 Subject: genirq: Convert irq_desc.lock to raw_spinlock Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- arch/alpha/kernel/irq.c | 4 +- arch/arm/include/asm/mach/irq.h | 4 +- arch/arm/kernel/irq.c | 12 ++--- arch/arm/mach-ns9xxx/irq.c | 8 +-- arch/avr32/kernel/irq.c | 4 +- arch/blackfin/kernel/irqchip.c | 6 +-- arch/blackfin/kernel/traps.c | 4 +- arch/cris/kernel/irq.c | 4 +- arch/frv/kernel/irq.c | 4 +- arch/h8300/kernel/irq.c | 4 +- arch/ia64/kernel/iosapic.c | 6 +-- arch/ia64/kernel/irq.c | 4 +- arch/ia64/kernel/irq_ia64.c | 4 +- arch/m32r/kernel/irq.c | 4 +- arch/microblaze/kernel/irq.c | 4 +- arch/mips/kernel/irq.c | 4 +- arch/mips/vr41xx/common/icu.c | 92 ++++++++++++++++----------------- arch/mn10300/kernel/irq.c | 4 +- arch/parisc/kernel/irq.c | 4 +- arch/powerpc/kernel/irq.c | 8 +-- arch/powerpc/platforms/52xx/media5200.c | 8 +-- arch/powerpc/platforms/cell/interrupt.c | 8 +-- arch/powerpc/platforms/iseries/irq.c | 4 +- arch/powerpc/platforms/pseries/xics.c | 4 +- arch/powerpc/sysdev/fsl_msi.c | 4 +- arch/powerpc/sysdev/uic.c | 8 +-- arch/sh/kernel/irq.c | 4 +- arch/sparc/kernel/irq_64.c | 8 +-- arch/um/kernel/irq.c | 4 +- arch/x86/kernel/apic/io_apic.c | 4 +- arch/x86/kernel/irq.c | 14 ++--- arch/xtensa/kernel/irq.c | 4 +- include/linux/irq.h | 2 +- kernel/irq/autoprobe.c | 20 +++---- kernel/irq/chip.c | 86 +++++++++++++++--------------- kernel/irq/handle.c | 22 ++++---- kernel/irq/internals.h | 2 +- kernel/irq/manage.c | 50 +++++++++--------- kernel/irq/migration.c | 2 +- kernel/irq/numa_migrate.c | 8 +-- kernel/irq/pm.c | 8 +-- kernel/irq/proc.c | 4 +- kernel/irq/spurious.c | 14 ++--- 43 files changed, 240 insertions(+), 240 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index c0de072b8305..5f2cf23c4648 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v) #endif if (irq < ACTUAL_NR_IRQS) { - spin_lock_irqsave(&irq_desc[irq].lock, flags); + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); action = irq_desc[irq].action; if (!action) goto unlock; @@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } else if (irq == ACTUAL_NR_IRQS) { #ifdef CONFIG_SMP seq_puts(p, "IPI: "); diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index acac5302e4ea..8920b2d6e3b8 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *); */ #define do_bad_IRQ(irq,desc) \ do { \ - spin_lock(&desc->lock); \ + raw_spin_lock(&desc->lock); \ handle_bad_irq(irq, desc); \ - spin_unlock(&desc->lock); \ + raw_spin_unlock(&desc->lock); \ } while(0) #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c9a8619f3856..b7cb45bb91e8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { #ifdef CONFIG_FIQ show_fiq_list(p, v); @@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) } desc = irq_desc + irq; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (iflags & IRQF_VALID) desc->status &= ~IRQ_NOREQUEST; @@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) desc->status &= ~IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) desc->status &= ~IRQ_NOAUTOEN; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } void __init init_IRQ(void) @@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) { pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index feb0e54a91de..038f24d47023 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c @@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); BUG_ON(desc->status & IRQ_INPROGRESS); @@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) goto out_mask; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); @@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) * Maybe this function should go to kernel/irq/chip.c? */ note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; if (desc->status & IRQ_DISABLED) @@ -97,7 +97,7 @@ out_mask: /* ack unconditionally to unmask lower prio irqs */ desc->chip->ack(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } #define handle_irq handle_prio_irq #endif diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c index 9f572229d318..c8ab63e3a675 100644 --- a/arch/avr32/kernel/irq.c +++ b/arch/avr32/kernel/irq.c @@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -66,7 +66,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index db9f9c91f11f..64cff54a8a58 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq) static struct irq_desc bad_irq_desc = { .handle_irq = handle_bad_irq, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), }; #ifdef CONFIG_CPUMASK_OFFSTACK @@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v) unsigned long flags; if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 78cb3d38f899..9636bace00e8 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp) if (fp->ipend & ~0x3F) { for (i = 0; i < (NR_IRQS - 1); i++) { if (!in_atomic) - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) @@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp) verbose_printk("\n"); unlock: if (!in_atomic) - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } } diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 0ca7d9892cc6..b5ce0724a88f 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index af3e824b91b3..62d1aba615dc 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { seq_printf(p, "%3d: ", i); @@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); } diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 5c913d472119..c25dc2c2b1da 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_puts(p, " CPU0"); if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908c..95ac77aeae9b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, goto unlock_iosapic_lock; } - spin_lock(&irq_desc[irq].lock); + raw_spin_lock(&irq_desc[irq].lock); dest = get_target_cpu(gsi, irq); dmode = choose_dmode(); err = register_intr(gsi, irq, dmode, polarity, trigger); if (err < 0) { - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); irq = err; goto unlock_iosapic_lock; } @@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, irq_to_vector(irq)); - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); unlock_iosapic_lock: spin_unlock_irqrestore(&iosapic_lock, flags); return irq; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7c..94ee9d067cbd 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); return 0; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a1..70e4bad23432 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) desc = irq_desc + irq; cfg = irq_cfg + irq; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; @@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 8dfd31e87c4c..3c71f776872c 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 7d5ddd62d4d2..0f06034d1fe0 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < nr_irq) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7b845ba9dff4..8b0b4181219f 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 6d39e222b170..6153b6a05ccf 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c @@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_macint); @@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_macint); @@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_dsiuint); @@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_dsiuint); @@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_set(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_firint); @@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_firint); @@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, PCIINT0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, SCUINT0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_set(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, BCUINTR); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) pin = SYSINT1_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); intassign0 = icu1_read(INTASSIGN0); intassign1 = icu1_read(INTASSIGN1); @@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) intassign1 |= (uint16_t)assign << 9; break; default: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN0, intassign0); icu1_write(INTASSIGN1, intassign1); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return 0; } @@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) pin = SYSINT2_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); intassign2 = icu1_read(INTASSIGN2); intassign3 = icu1_read(INTASSIGN3); @@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) intassign3 |= (uint16_t)assign << 12; break; default: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN2, intassign2); icu1_write(INTASSIGN3, intassign3); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return 0; } diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 4c3c58ef5cda..e2d5ed891f37 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v) /* display information rows, one per active CPU */ case 1 ... NR_IRQS - 1: - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { @@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); break; /* polish off with NMI and error counters */ diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 2e7610cb33d5..f47465e8d040 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v) if (i < NR_IRQS) { struct irqaction *action; - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f6dca4f4b295..9040330b0530 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); action = desc->action; if (!action || !action->handler) @@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private) if (!desc) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && desc->action->handler) { seq_printf(m, "%5d ", i); @@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private) seq_printf(m, "%s\n", p); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } return 0; diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index cc0c854291d7..0bac3a3dbecf 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) u32 status, enable; /* Mask off the cascaded IRQ */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->chip->mask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs * are pending. 'ffs()' is 1 based */ @@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) } /* Processing done; can reenable the cascade now */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED)) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int media5200_irq_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 7267effc8078..6829cf7e2bda 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -237,7 +237,7 @@ extern int noirqdebug; static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) { - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); @@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) goto out_eoi; desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_eoi: desc->chip->eoi(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int iic_host_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 07762259c60a..86c4b29eea89 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs() struct irq_desc *desc = irq_to_desc(irq); if (desc && desc->chip && desc->chip->startup) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->chip->startup(irq); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } } diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 7d01b58f3989..b9b9e11609ec 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void) || desc->chip->set_affinity == NULL) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { @@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void) cpumask_setall(irq_to_desc(virq)->affinity); desc->chip->set_affinity(virq, cpu_all_mask); unlock: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 62e50258cdef..c6e11b077108 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) u32 intr_index; u32 have_shift = 0; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { if (desc->chip->mask_ack) desc->chip->mask_ack(irq); @@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) break; } unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int __devinit fsl_of_msi_probe(struct of_device *dev, diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 7d10074b3304..6f220a913e42 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) int src; int subvirq; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->mask(virq); else desc->chip->mask_ack(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); msr = mfdcr(uic->dcrbase + UIC_MSR); if (!msr) /* spurious interrupt */ @@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) generic_handle_irq(subvirq); uic_irq_ret: - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static struct uic * __init uic_init_one(struct device_node *node) diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index e1913f28f418..d2d41d046657 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } #endif diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index ce996f97855f..8d6882bb480a 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) @@ -785,14 +785,14 @@ void fixup_irqs(void) for (irq = 0; irq < NR_IRQS; irq++) { unsigned long flags; - spin_lock_irqsave(&irq_desc[irq].lock, flags); + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); if (irq_desc[irq].action && !(irq_desc[irq].status & IRQ_PER_CPU)) { if (irq_desc[irq].chip->set_affinity) irq_desc[irq].chip->set_affinity(irq, irq_desc[irq].affinity); } - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } tick_ops->disable_irq(); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 039270b9b73b..89474ba0741e 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_putc(p, '\n'); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d5d498fbee4b..11a5851f1f50 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) continue; cfg = irq_cfg(irq); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) goto unlock; @@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) } __get_cpu_var(vector_irq)[vector] = -1; unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } irq_exit(); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 664bcb7384ac..91fd0c70a18a 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -294,12 +294,12 @@ void fixup_irqs(void) continue; /* interrupt's are disabled at this point */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); affinity = desc->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); continue; } @@ -326,7 +326,7 @@ void fixup_irqs(void) if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) desc->chip->unmask(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); @@ -356,10 +356,10 @@ void fixup_irqs(void) irq = __get_cpu_var(vector_irq)[vector]; desc = irq_to_desc(irq); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->chip->retrigger) desc->chip->retrigger(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } } } diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index a1badb32fcda..8cd38484e130 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/include/linux/irq.h b/include/linux/irq.h index a287cfc0b1a6..451481c082b5 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -192,7 +192,7 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; - spinlock_t lock; + raw_spinlock_t lock; #ifdef CONFIG_SMP cpumask_var_t affinity; unsigned int node; diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 1de9700f416e..2295a31ef110 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -45,7 +45,7 @@ unsigned long probe_irq_on(void) * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { /* * An old-style architecture might still have @@ -61,7 +61,7 @@ unsigned long probe_irq_on(void) desc->chip->set_type(i, IRQ_TYPE_PROBE); desc->chip->startup(i); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ @@ -73,13 +73,13 @@ unsigned long probe_irq_on(void) * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; if (desc->chip->startup(i)) desc->status |= IRQ_PENDING; } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* @@ -91,7 +91,7 @@ unsigned long probe_irq_on(void) * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { @@ -103,7 +103,7 @@ unsigned long probe_irq_on(void) if (i < 32) mask |= 1 << i; } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } return mask; @@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val) int i; for_each_irq_desc(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { @@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val) desc->status = status & ~IRQ_AUTODETECT; desc->chip->shutdown(i); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); @@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val) unsigned int status; for_each_irq_desc(i, desc) { - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { @@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val) desc->status = status & ~IRQ_AUTODETECT; desc->chip->shutdown(i); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index ba566c261adc..ecc3fa28f666 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq) } /* Ensure we don't have left over values from a previous use of this irq */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status = IRQ_DISABLED; desc->chip = &no_irq_chip; desc->handle_irq = handle_bad_irq; @@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq) cpumask_clear(desc->pending_mask); #endif #endif - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } /** @@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq) return; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action) { - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", irq); return; @@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq) desc->chip = &no_irq_chip; desc->name = NULL; clear_kstat_irqs(desc); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } @@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) if (!chip) chip = &no_irq_chip; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); irq_chip_set_defaults(chip); desc->chip = chip; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type) if (type == IRQ_TYPE_NONE) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); ret = __irq_set_trigger(desc, irq, type); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } EXPORT_SYMBOL(set_irq_type); @@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data) return -EINVAL; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->handler_data = data; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } EXPORT_SYMBOL(set_irq_data); @@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) return -EINVAL; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->msi_desc = entry; if (entry) entry->irq = irq; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data) return -EINVAL; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->chip_data = data; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest) if (!desc) return; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (nest) desc->status |= IRQ_NESTED_THREAD; else desc->status &= ~IRQ_NESTED_THREAD; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL_GPL(set_irq_nested_thread); @@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq) might_sleep(); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); kstat_incr_irqs_this_cpu(irq, desc); @@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq) goto out_unlock; desc->status |= IRQ_INPROGRESS; - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } EXPORT_SYMBOL_GPL(handle_nested_irq); @@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; @@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) goto out_unlock; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } /** @@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); mask_ack_irq(desc, irq); if (unlikely(desc->status & IRQ_INPROGRESS)) @@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) goto out_unlock; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; if (unlikely(desc->status & IRQ_ONESHOT)) @@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(irq); out_unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_level_irq); @@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out; @@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) desc->status |= IRQ_INPROGRESS; desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out: desc->chip->eoi(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } /** @@ -530,7 +530,7 @@ out: void handle_edge_irq(unsigned int irq, struct irq_desc *desc) { - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); @@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) } desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } /** @@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, } chip_bus_lock(irq, desc); - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); /* Uninstall? */ if (handle == handle_bad_irq) { @@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->depth = 0; desc->chip->startup(irq); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); } EXPORT_SYMBOL_GPL(__set_irq_handler); @@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq) return; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOPROBE; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } void __init set_irq_probe(unsigned int irq) @@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq) return; } - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status &= ~IRQ_NOPROBE; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 17c71bb565c6..814940e7f485 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = { .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), }; void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) @@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) { memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); - spin_lock_init(&desc->lock); + raw_spin_lock_init(&desc->lock); desc->irq = irq; #ifdef CONFIG_SMP desc->node = node; @@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) /* * Protect the sparse_irqs: */ -DEFINE_SPINLOCK(sparse_irq_lock); +DEFINE_RAW_SPINLOCK(sparse_irq_lock); struct irq_desc **irq_desc_ptrs __read_mostly; @@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), } }; @@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) if (desc) return desc; - spin_lock_irqsave(&sparse_irq_lock, flags); + raw_spin_lock_irqsave(&sparse_irq_lock, flags); /* We have to check it to avoid races with another CPU */ desc = irq_desc_ptrs[irq]; @@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) irq_desc_ptrs[irq] = desc; out_unlock: - spin_unlock_irqrestore(&sparse_irq_lock, flags); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); return desc; } @@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), } }; @@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq) return 1; } - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->chip->ack) desc->chip->ack(irq); /* @@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq) for (;;) { irqreturn_t action_ret; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (likely(!(desc->status & IRQ_PENDING))) break; desc->status &= ~IRQ_PENDING; @@ -536,7 +536,7 @@ out: * disabled while the handler was running. */ desc->chip->end(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); return 1; } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 1b5d742c6a77..b2821f070a3d 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); extern struct lock_class_key irq_desc_lock_class; extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); extern void clear_kstat_irqs(struct irq_desc *desc); -extern spinlock_t sparse_irq_lock; +extern raw_spinlock_t sparse_irq_lock; #ifdef CONFIG_SPARSE_IRQ /* irq_desc_ptrs allocated at boot time */ diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7305b297d1eb..eb6078ca60c7 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); status = desc->status; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); /* Oops, that failed? */ } while (status & IRQ_INPROGRESS); @@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) if (!desc->chip->set_affinity) return -EINVAL; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PCNTXT) { @@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) } #endif desc->status |= IRQ_AFFINITY_SET; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq) unsigned long flags; int ret; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); ret = setup_affinity(irq, desc); if (!ret) irq_set_thread_affinity(desc); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } @@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq) return; chip_bus_lock(irq, desc); - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __disable_irq(desc, irq, false); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); } EXPORT_SYMBOL(disable_irq_nosync); @@ -308,9 +308,9 @@ void enable_irq(unsigned int irq) return; chip_bus_lock(irq, desc); - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, false); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); } EXPORT_SYMBOL(enable_irq); @@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on) /* wakeup-capable irqs can be shared between drivers that * don't need to have the same sleep mode behaviors. */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (on) { if (desc->wake_depth++ == 0) { ret = set_irq_wake_real(irq, on); @@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on) } } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } EXPORT_SYMBOL(set_irq_wake); @@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct irqaction *action) static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) { chip_bus_lock(irq, desc); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { desc->status &= ~IRQ_MASKED; desc->chip->unmask(irq); } - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(irq, desc); } @@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) return; } - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); cpumask_copy(mask, desc->affinity); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); @@ -545,7 +545,7 @@ static int irq_thread(void *data) atomic_inc(&desc->threads_active); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); if (unlikely(desc->status & IRQ_DISABLED)) { /* * CHECKME: We might need a dedicated @@ -555,9 +555,9 @@ static int irq_thread(void *data) * retriggers the interrupt itself --- tglx */ desc->status |= IRQ_PENDING; - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } else { - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); action->thread_fn(action->irq, action->dev_id); @@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) /* * The following block of code has to be executed atomically */ - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); old_ptr = &desc->action; old = *old_ptr; if (old) { @@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) __enable_irq(desc, irq, false); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); /* * Strictly no need to wake it up, but hung_task complains @@ -802,7 +802,7 @@ mismatch: ret = -EBUSY; out_thread: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); if (new->thread) { struct task_struct *t = new->thread; @@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) if (!desc) return NULL; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); /* * There can be multiple actions per IRQ descriptor, find the right @@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) if (!action) { WARN(1, "Trying to free already-free IRQ %d\n", irq); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return NULL; } @@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) desc->chip->disable(irq); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index fcb6c96f2627..241962280836 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -27,7 +27,7 @@ void move_masked_irq(int irq) if (!desc->chip->set_affinity) return; - assert_spin_locked(&desc->lock); + assert_raw_spin_locked(&desc->lock); /* * If there was a valid mask to work with, please diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 3fd30197da2e..26bac9d8f860 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, "for migration.\n", irq); return false; } - spin_lock_init(&desc->lock); + raw_spin_lock_init(&desc->lock); desc->node = node; lockdep_set_class(&desc->lock, &irq_desc_lock_class); init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); @@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, irq = old_desc->irq; - spin_lock_irqsave(&sparse_irq_lock, flags); + raw_spin_lock_irqsave(&sparse_irq_lock, flags); /* We have to check it to avoid races with another CPU */ desc = irq_desc_ptrs[irq]; @@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, } irq_desc_ptrs[irq] = desc; - spin_unlock_irqrestore(&sparse_irq_lock, flags); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); /* free the old one */ free_one_irq_desc(old_desc, desc); @@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, return desc; out_unlock: - spin_unlock_irqrestore(&sparse_irq_lock, flags); + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); return desc; } diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index a0bb09e79867..0d4005d85b03 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -28,9 +28,9 @@ void suspend_device_irqs(void) for_each_irq_desc(irq, desc) { unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __disable_irq(desc, irq, true); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } for_each_irq_desc(irq, desc) @@ -56,9 +56,9 @@ void resume_device_irqs(void) if (!(desc->status & IRQ_SUSPENDED)) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, true); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } EXPORT_SYMBOL_GPL(resume_device_irqs); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 0832145fea97..6f50eccc79c0 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -179,7 +179,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) unsigned long flags; int ret = 1; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for (action = desc->action ; action; action = action->next) { if ((action != new_action) && action->name && !strcmp(new_action->name, action->name)) { @@ -187,7 +187,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) break; } } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index e49ea1c5232d..89fb90ae534f 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) struct irqaction *action; int ok = 0, work = 0; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); /* Already running on another processor */ if (desc->status & IRQ_INPROGRESS) { /* @@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc) */ if (desc->action && (desc->action->flags & IRQF_SHARED)) desc->status |= IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); return ok; } /* Honour the normal IRQ locking */ desc->status |= IRQ_INPROGRESS; action = desc->action; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); while (action) { /* Only shared IRQ handlers are safe to call */ @@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) } local_irq_disable(); /* Now clean up the flags */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); action = desc->action; /* @@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc) * Perform real IRQ processing for the IRQ we deferred */ work = 1; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); handle_IRQ_event(irq, action); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_PENDING; } desc->status &= ~IRQ_INPROGRESS; @@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) */ if (work && desc->chip && desc->chip->end) desc->chip->end(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); return ok; } -- cgit v1.2.3 From ecb49d1a639acbacfc3771cae5ec07bed5df3847 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Nov 2009 16:36:54 +0100 Subject: hrtimers: Convert to raw_spinlocks Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/hrtimer.h | 2 +- kernel/hrtimer.c | 50 +++++++++++++++++++++++------------------------ kernel/time/timer_list.c | 6 +++--- kernel/time/timer_stats.c | 17 ++++++++-------- 4 files changed, 38 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index af634e95871d..5d86fb2309d2 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -169,7 +169,7 @@ struct hrtimer_clock_base { * @max_hang_time: Maximum time spent in hrtimer_interrupt */ struct hrtimer_cpu_base { - spinlock_t lock; + raw_spinlock_t lock; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t expires_next; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d2f9239dc6ba..0086628b6e97 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, for (;;) { base = timer->base; if (likely(base != NULL)) { - spin_lock_irqsave(&base->cpu_base->lock, *flags); + raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); if (likely(base == timer->base)) return base; /* The timer has migrated to another CPU: */ - spin_unlock_irqrestore(&base->cpu_base->lock, *flags); + raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); } cpu_relax(); } @@ -208,13 +208,13 @@ again: /* See the comment in lock_timer_base() */ timer->base = NULL; - spin_unlock(&base->cpu_base->lock); - spin_lock(&new_base->cpu_base->lock); + raw_spin_unlock(&base->cpu_base->lock); + raw_spin_lock(&new_base->cpu_base->lock); if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { cpu = this_cpu; - spin_unlock(&new_base->cpu_base->lock); - spin_lock(&base->cpu_base->lock); + raw_spin_unlock(&new_base->cpu_base->lock); + raw_spin_lock(&base->cpu_base->lock); timer->base = base; goto again; } @@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { struct hrtimer_clock_base *base = timer->base; - spin_lock_irqsave(&base->cpu_base->lock, *flags); + raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); return base; } @@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg) base = &__get_cpu_var(hrtimer_bases); /* Adjust CLOCK_REALTIME offset */ - spin_lock(&base->lock); + raw_spin_lock(&base->lock); base->clock_base[CLOCK_REALTIME].offset = timespec_to_ktime(realtime_offset); hrtimer_force_reprogram(base, 0); - spin_unlock(&base->lock); + raw_spin_unlock(&base->lock); } /* @@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, { if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { if (wakeup) { - spin_unlock(&base->cpu_base->lock); + raw_spin_unlock(&base->cpu_base->lock); raise_softirq_irqoff(HRTIMER_SOFTIRQ); - spin_lock(&base->cpu_base->lock); + raw_spin_lock(&base->cpu_base->lock); } else __raise_softirq_irqoff(HRTIMER_SOFTIRQ); @@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer) static inline void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { - spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); + raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); } /** @@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void) unsigned long flags; int i; - spin_lock_irqsave(&cpu_base->lock, flags); + raw_spin_lock_irqsave(&cpu_base->lock, flags); if (!hrtimer_hres_active()) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { @@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void) } } - spin_unlock_irqrestore(&cpu_base->lock, flags); + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); if (mindelta.tv64 < 0) mindelta.tv64 = 0; @@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) * they get migrated to another cpu, therefore its safe to unlock * the timer base. */ - spin_unlock(&cpu_base->lock); + raw_spin_unlock(&cpu_base->lock); trace_hrtimer_expire_entry(timer, now); restart = fn(timer); trace_hrtimer_expire_exit(timer); - spin_lock(&cpu_base->lock); + raw_spin_lock(&cpu_base->lock); /* * Note: We clear the CALLBACK bit after enqueue_hrtimer and @@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) retry: expires_next.tv64 = KTIME_MAX; - spin_lock(&cpu_base->lock); + raw_spin_lock(&cpu_base->lock); /* * We set expires_next to KTIME_MAX here with cpu_base->lock * held to prevent that a timer is enqueued in our queue via @@ -1317,7 +1317,7 @@ retry: * against it. */ cpu_base->expires_next = expires_next; - spin_unlock(&cpu_base->lock); + raw_spin_unlock(&cpu_base->lock); /* Reprogramming necessary ? */ if (expires_next.tv64 == KTIME_MAX || @@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void) gettime = 0; } - spin_lock(&cpu_base->lock); + raw_spin_lock(&cpu_base->lock); while ((node = base->first)) { struct hrtimer *timer; @@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void) __run_hrtimer(timer, &base->softirq_time); } - spin_unlock(&cpu_base->lock); + raw_spin_unlock(&cpu_base->lock); } } @@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; - spin_lock_init(&cpu_base->lock); + raw_spin_lock_init(&cpu_base->lock); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) cpu_base->clock_base[i].cpu_base = cpu_base; @@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu) * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. */ - spin_lock(&new_base->lock); - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&new_base->lock); + raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { migrate_hrtimer_list(&old_base->clock_base[i], &new_base->clock_base[i]); } - spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); + raw_spin_unlock(&old_base->lock); + raw_spin_unlock(&new_base->lock); /* Check, if we got expired work to do */ __hrtimer_peek_ahead_timers(); diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 9d80db4747d4..28265636b6c2 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, next_one: i = 0; - spin_lock_irqsave(&base->cpu_base->lock, flags); + raw_spin_lock_irqsave(&base->cpu_base->lock, flags); curr = base->first; /* @@ -100,13 +100,13 @@ next_one: timer = rb_entry(curr, struct hrtimer, node); tmp = *timer; - spin_unlock_irqrestore(&base->cpu_base->lock, flags); + raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); print_timer(m, timer, &tmp, i, now); next++; goto next_one; } - spin_unlock_irqrestore(&base->cpu_base->lock, flags); + raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); } static void diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 63b117e9eba1..2f3b585b8d7d 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock); /* * Per-CPU lookup locks for fast hash lookup: */ -static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock); +static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock); /* * Mutex to serialize state changes with show-stats activities: @@ -238,7 +238,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, /* * It doesnt matter which lock we take: */ - spinlock_t *lock; + raw_spinlock_t *lock; struct entry *entry, input; unsigned long flags; @@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, input.pid = pid; input.timer_flag = timer_flag; - spin_lock_irqsave(lock, flags); + raw_spin_lock_irqsave(lock, flags); if (!timer_stats_active) goto out_unlock; @@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, atomic_inc(&overflow_count); out_unlock: - spin_unlock_irqrestore(lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } static void print_name_offset(struct seq_file *m, unsigned long addr) @@ -348,10 +348,11 @@ static void sync_access(void) int cpu; for_each_online_cpu(cpu) { - spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); - spin_lock_irqsave(lock, flags); + raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); + + raw_spin_lock_irqsave(lock, flags); /* nothing */ - spin_unlock_irqrestore(lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } } @@ -409,7 +410,7 @@ void __init init_timer_stats(void) int cpu; for_each_possible_cpu(cpu) - spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); + raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); } static int __init init_tstats_procfs(void) -- cgit v1.2.3 From e625cce1b73fb38b74e5387226534f7bcbfc36fe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Nov 2009 18:02:06 +0100 Subject: perf_event: Convert to raw_spinlock Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- include/linux/perf_event.h | 2 +- kernel/hw_breakpoint.c | 4 +- kernel/perf_event.c | 106 ++++++++++++++++++++++----------------------- 3 files changed, 56 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 64a53f74c9a9..da7bdc23f279 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -681,7 +681,7 @@ struct perf_event_context { * Protect the states of the events in the list, * nr_active, and the list: */ - spinlock_t lock; + raw_spinlock_t lock; /* * Protect the list of events. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 366eedf949c0..dbcbf6a33a08 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk) list = &ctx->event_list; - spin_lock_irqsave(&ctx->lock, flags); + raw_spin_lock_irqsave(&ctx->lock, flags); /* * The current breakpoint counter is not included in the list @@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk) count++; } - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); return count; } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e73e53c7582f..9052d6c8c9fd 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags) * if so. If we locked the right context, then it * can't get swapped on us any more. */ - spin_lock_irqsave(&ctx->lock, *flags); + raw_spin_lock_irqsave(&ctx->lock, *flags); if (ctx != rcu_dereference(task->perf_event_ctxp)) { - spin_unlock_irqrestore(&ctx->lock, *flags); + raw_spin_unlock_irqrestore(&ctx->lock, *flags); goto retry; } if (!atomic_inc_not_zero(&ctx->refcount)) { - spin_unlock_irqrestore(&ctx->lock, *flags); + raw_spin_unlock_irqrestore(&ctx->lock, *flags); ctx = NULL; } } @@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task ctx = perf_lock_task_context(task, &flags); if (ctx) { ++ctx->pin_count; - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); } return ctx; } @@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx) { unsigned long flags; - spin_lock_irqsave(&ctx->lock, flags); + raw_spin_lock_irqsave(&ctx->lock, flags); --ctx->pin_count; - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); put_ctx(ctx); } @@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); /* * Protect the list operation against NMI by disabling the * events on a global level. @@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info) } perf_enable(); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } @@ -488,12 +488,12 @@ retry: task_oncpu_function_call(task, __perf_event_remove_from_context, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * If the context is active we need to retry the smp call. */ if (ctx->nr_active && !list_empty(&event->group_entry)) { - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); goto retry; } @@ -504,7 +504,7 @@ retry: */ if (!list_empty(&event->group_entry)) list_del_event(event, ctx); - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } /* @@ -535,7 +535,7 @@ static void __perf_event_disable(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); /* * If the event is on, turn it off. @@ -551,7 +551,7 @@ static void __perf_event_disable(void *info) event->state = PERF_EVENT_STATE_OFF; } - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event) retry: task_oncpu_function_call(task, __perf_event_disable, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * If the event is still active, we need to retry the cross-call. */ if (event->state == PERF_EVENT_STATE_ACTIVE) { - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); goto retry; } @@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event) event->state = PERF_EVENT_STATE_OFF; } - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } static int @@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info) cpuctx->task_ctx = ctx; } - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); @@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info) unlock: perf_enable(); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -856,12 +856,12 @@ retry: task_oncpu_function_call(task, __perf_install_in_context, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * we need to retry the smp call. */ if (ctx->is_active && list_empty(&event->group_entry)) { - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); goto retry; } @@ -872,7 +872,7 @@ retry: */ if (list_empty(&event->group_entry)) add_event_to_ctx(event, ctx); - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } /* @@ -917,7 +917,7 @@ static void __perf_event_enable(void *info) cpuctx->task_ctx = ctx; } - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); @@ -959,7 +959,7 @@ static void __perf_event_enable(void *info) } unlock: - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event) return; } - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); if (event->state >= PERF_EVENT_STATE_INACTIVE) goto out; @@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event) event->state = PERF_EVENT_STATE_OFF; retry: - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); task_oncpu_function_call(task, __perf_event_enable, event); - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); /* * If the context is active and the event is still off, @@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event) __perf_event_mark_enabled(event, ctx); out: - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); } static int perf_event_refresh(struct perf_event *event, int refresh) @@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx, { struct perf_event *event; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 0; if (likely(!ctx->nr_events)) goto out; @@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx, } perf_enable(); out: - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task, * order we take the locks because no other cpu could * be trying to lock both of these tasks. */ - spin_lock(&ctx->lock); - spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&ctx->lock); + raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { /* * XXX do we need a memory barrier of sorts @@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task, perf_event_sync_stat(ctx, next_ctx); } - spin_unlock(&next_ctx->lock); - spin_unlock(&ctx->lock); + raw_spin_unlock(&next_ctx->lock); + raw_spin_unlock(&ctx->lock); } rcu_read_unlock(); @@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx, struct perf_event *event; int can_add_hw = 1; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); ctx->is_active = 1; if (likely(!ctx->nr_events)) goto out; @@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx, } perf_enable(); out: - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) struct hw_perf_event *hwc; u64 interrupts, freq; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; @@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) perf_enable(); } } - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } /* @@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx) if (!ctx->nr_events) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); /* * Rotate the first entry last (works just fine for group events too): */ @@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx) } perf_enable(); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); } void perf_event_task_tick(struct task_struct *curr, int cpu) @@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) __perf_event_task_sched_out(ctx); - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); list_for_each_entry(event, &ctx->group_list, group_entry) { if (!event->attr.enable_on_exec) @@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task) if (enabled) unclone_ctx(ctx); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); perf_event_task_sched_in(task, smp_processor_id()); out: @@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - spin_lock(&ctx->lock); + raw_spin_lock(&ctx->lock); update_context_time(ctx); update_event_times(event); - spin_unlock(&ctx->lock); + raw_spin_unlock(&ctx->lock); event->pmu->read(event); } @@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event) struct perf_event_context *ctx = event->ctx; unsigned long flags; - spin_lock_irqsave(&ctx->lock, flags); + raw_spin_lock_irqsave(&ctx->lock, flags); update_context_time(ctx); update_event_times(event); - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); } return atomic64_read(&event->count); @@ -1579,7 +1579,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx, struct task_struct *task) { - spin_lock_init(&ctx->lock); + raw_spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->group_list); INIT_LIST_HEAD(&ctx->event_list); @@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) ctx = perf_lock_task_context(task, &flags); if (ctx) { unclone_ctx(ctx); - spin_unlock_irqrestore(&ctx->lock, flags); + raw_spin_unlock_irqrestore(&ctx->lock, flags); } if (!ctx) { @@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) if (!value) return -EINVAL; - spin_lock_irq(&ctx->lock); + raw_spin_lock_irq(&ctx->lock); if (event->attr.freq) { if (value > sysctl_perf_event_sample_rate) { ret = -EINVAL; @@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) event->hw.sample_period = value; } unlock: - spin_unlock_irq(&ctx->lock); + raw_spin_unlock_irq(&ctx->lock); return ret; } @@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child) * reading child->perf_event_ctxp, we wait until it has * incremented the context's refcount before we do put_ctx below. */ - spin_lock(&child_ctx->lock); + raw_spin_lock(&child_ctx->lock); child->perf_event_ctxp = NULL; /* * If this context is a clone; unclone it so it can't get @@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child) */ unclone_ctx(child_ctx); update_context_time(child_ctx); - spin_unlock_irqrestore(&child_ctx->lock, flags); + raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* * Report the task dead after unscheduling the events so that we @@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class, perf_reserved_percpu = val; for_each_online_cpu(cpu) { cpuctx = &per_cpu(perf_cpu_context, cpu); - spin_lock_irq(&cpuctx->ctx.lock); + raw_spin_lock_irq(&cpuctx->ctx.lock); mpt = min(perf_max_events - cpuctx->ctx.nr_events, perf_max_events - perf_reserved_percpu); cpuctx->max_pertask = mpt; - spin_unlock_irq(&cpuctx->ctx.lock); + raw_spin_unlock_irq(&cpuctx->ctx.lock); } spin_unlock(&perf_resource_lock); -- cgit v1.2.3 From 4056c9a344d60ee96471a5f3b0a3c8a90371c8fd Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 3 Dec 2009 20:28:04 +0200 Subject: nfsd: Remove unused dprintk This doesn't appear to be useful. Signed-off-by: Boaz Harrosh Signed-off-by: J. Bruce Fields --- include/linux/nfsd/nfsfh.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index 8f641c908450..2973e1135343 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -20,7 +20,6 @@ # include #endif #include -#include /* * This is the old "dentry style" Linux NFSv2 file handle. @@ -329,9 +328,6 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass) struct dentry *dentry = fhp->fh_dentry; struct inode *inode; - dfprintk(FILEOP, "nfsd: fh_lock(%s) locked = %d\n", - SVCFH_fmt(fhp), fhp->fh_locked); - BUG_ON(!dentry); if (fhp->fh_locked) { -- cgit v1.2.3 From a600ffcbb3743cf1296bee2a41d4824c719d7181 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 3 Dec 2009 20:28:35 +0200 Subject: sunrpc: Clean never used include files Remove include of two headers never used by this file. Doing so exposed a missing #include in include/linux/sunrpc/rpc_rdma.h. I did not see any other users dependency but if exist they should be fixed since these headers are totally irrelevant to here. Signed-off-by: Boaz Harrosh Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/debug.h | 3 --- include/linux/sunrpc/rpc_rdma.h | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 10709cbe96fd..c2786f20016f 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -28,9 +28,6 @@ #ifdef __KERNEL__ -#include -#include - /* * Enable RPC debugging/profiling. */ diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 87b895d5c786..b78f16b1dea3 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -40,6 +40,8 @@ #ifndef _LINUX_SUNRPC_RPC_RDMA_H #define _LINUX_SUNRPC_RPC_RDMA_H +#include + struct rpcrdma_segment { __be32 rs_handle; /* Registered memory handle */ __be32 rs_length; /* Length of the chunk in bytes */ -- cgit v1.2.3 From d703158229329af7152d159753f849aa7bd55ee6 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 3 Dec 2009 20:28:47 +0200 Subject: nfsd: Fix independence of a few nfsd related headers An header should be compilation independent, .i.e pull in any header who's declarations are directly used by this header. And not let users re-include all it's dependencies all over again. [At the end of the day what's the use of a header if it does not have more then one user?] Signed-off-by: Boaz Harrosh Signed-off-by: J. Bruce Fields --- include/linux/nfs_xdr.h | 1 + include/linux/nfsacl.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 62f63fb0c4c8..00a0c8170816 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -2,6 +2,7 @@ #define _LINUX_NFS_XDR_H #include +#include /* * To change the maximum rsize and wsize supported by the NFS client, adjust diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 43011b69297c..f321b578edeb 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h @@ -29,6 +29,7 @@ #ifdef __KERNEL__ #include +#include /* Maximum number of ACL entries over NFS */ #define NFS_ACL_MAX_ENTRIES 1024 -- cgit v1.2.3 From 72579ac9cd68081108277c31781b127d0f420d61 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 3 Dec 2009 20:28:59 +0200 Subject: nfsd: Headers Independence and include cleanups * Add includes that are directly used by headers * Remove includes that are not needed These are the changes made: [xdr.h] struct nfsd_readdirres has an embedded struct readdir_cd from nfsd.h fixing that we can drop other includes [xdr4.h] embedded types defined both at state.h and nfsd.h [syscall.h] After export.h fix none of these stuff is needed. fix extra space in # include <> statement [stats.h] does not need but was export to user-mode so I don't touch it [state.h] embedded types from nfsfh.h like struct knfsd_fh. bringing that eliminates the need for all other includes [nfsfh.h] directly manipulating types from sunrpc/svc.h. Removed Other unused headers. [nfsd.h] removed unused headers include [export.h] lots of sunrpc/svc.h types and a single prototype declaration with pointer from nfsfh.h, but all users of export.h do need nfsfh.h any way. remove now un-needed include. [const.h] Unfixed (not independent) [cache.h] could do with a forward declaration of "struct svc_rqst;" from sunrpc/svc.h but all users absolutely will need sunrpc/svc.h it is easier overall this way. Signed-off-by: Boaz Harrosh Signed-off-by: J. Bruce Fields --- include/linux/nfsd/cache.h | 3 +-- include/linux/nfsd/export.h | 2 +- include/linux/nfsd/nfsd.h | 4 ---- include/linux/nfsd/nfsfh.h | 3 +-- include/linux/nfsd/state.h | 4 +--- include/linux/nfsd/syscall.h | 8 +------- include/linux/nfsd/xdr.h | 3 +-- include/linux/nfsd/xdr4.h | 3 ++- 8 files changed, 8 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h index 3a3f58934f5e..a165425dea41 100644 --- a/include/linux/nfsd/cache.h +++ b/include/linux/nfsd/cache.h @@ -10,8 +10,7 @@ #ifndef NFSCACHE_H #define NFSCACHE_H -#include -#include +#include /* * Representation of a reply cache entry. diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index a6d9ef2bb34a..ef3d416fcf67 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -12,7 +12,7 @@ # include #ifdef __KERNEL__ -# include +# include #endif /* diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index e4518d090a8c..74f67c2aca34 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -11,13 +11,9 @@ #define LINUX_NFSD_NFSD_H #include -#include -#include -#include #include #include -#include #include #include /* diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index 2973e1135343..49523edbc510 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -16,8 +16,7 @@ # include #ifdef __KERNEL__ -# include -# include +# include #endif #include diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 5aadf8aa3a97..2af75686e0d3 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -37,9 +37,7 @@ #ifndef _NFSD4_STATE_H #define _NFSD4_STATE_H -#include -#include -#include +#include typedef struct { u32 cl_boot; diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h index 7a3b565b898f..812bc1e160dc 100644 --- a/include/linux/nfsd/syscall.h +++ b/include/linux/nfsd/syscall.h @@ -9,14 +9,8 @@ #ifndef NFSD_SYSCALL_H #define NFSD_SYSCALL_H -# include -#ifdef __KERNEL__ -# include -#endif -#include -#include +#include #include -#include /* * Version of the syscall interface diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h index a0132ef58f21..58f824d854c2 100644 --- a/include/linux/nfsd/xdr.h +++ b/include/linux/nfsd/xdr.h @@ -7,9 +7,8 @@ #ifndef LINUX_NFSD_H #define LINUX_NFSD_H -#include #include -#include +#include struct nfsd_fhandle { struct svc_fh fh; diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 73164c2b3d29..1bf266239c7e 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -39,7 +39,8 @@ #ifndef _LINUX_NFSD_XDR4_H #define _LINUX_NFSD_XDR4_H -#include +#include +#include #define NFSD4_MAX_TAGLEN 128 #define XDR_LEN(n) (((n) + 3) & ~3) -- cgit v1.2.3 From 9a74af21330c8d46efa977d088a62cc1bfa954e9 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 3 Dec 2009 20:30:56 +0200 Subject: nfsd: Move private headers to source directory Lots of include/linux/nfsd/* headers are only used by nfsd module. Move them to the source directory Signed-off-by: Boaz Harrosh Signed-off-by: J. Bruce Fields --- fs/nfsd/auth.c | 2 +- fs/nfsd/cache.h | 85 +++++++ fs/nfsd/export.c | 3 +- fs/nfsd/lockd.c | 2 +- fs/nfsd/nfs2acl.c | 7 +- fs/nfsd/nfs3acl.c | 7 +- fs/nfsd/nfs3proc.c | 4 +- fs/nfsd/nfs3xdr.c | 2 +- fs/nfsd/nfs4callback.c | 4 +- fs/nfsd/nfs4proc.c | 4 +- fs/nfsd/nfs4recover.c | 5 +- fs/nfsd/nfs4state.c | 2 +- fs/nfsd/nfs4xdr.c | 3 +- fs/nfsd/nfscache.c | 4 +- fs/nfsd/nfsctl.c | 5 +- fs/nfsd/nfsd.h | 335 +++++++++++++++++++++++++++ fs/nfsd/nfsfh.c | 2 +- fs/nfsd/nfsproc.c | 4 +- fs/nfsd/nfssvc.c | 4 +- fs/nfsd/nfsxdr.c | 2 +- fs/nfsd/state.h | 409 ++++++++++++++++++++++++++++++++ fs/nfsd/stats.c | 4 +- fs/nfsd/vfs.c | 18 +- fs/nfsd/xdr.h | 176 ++++++++++++++ fs/nfsd/xdr3.h | 346 +++++++++++++++++++++++++++ fs/nfsd/xdr4.h | 564 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/nfsd/cache.h | 85 ------- include/linux/nfsd/nfsd.h | 335 --------------------------- include/linux/nfsd/state.h | 409 -------------------------------- include/linux/nfsd/xdr.h | 176 -------------- include/linux/nfsd/xdr3.h | 346 --------------------------- include/linux/nfsd/xdr4.h | 564 --------------------------------------------- 32 files changed, 1963 insertions(+), 1955 deletions(-) create mode 100644 fs/nfsd/cache.h create mode 100644 fs/nfsd/nfsd.h create mode 100644 fs/nfsd/state.h create mode 100644 fs/nfsd/xdr.h create mode 100644 fs/nfsd/xdr3.h create mode 100644 fs/nfsd/xdr4.h delete mode 100644 include/linux/nfsd/cache.h delete mode 100644 include/linux/nfsd/nfsd.h delete mode 100644 include/linux/nfsd/state.h delete mode 100644 include/linux/nfsd/xdr.h delete mode 100644 include/linux/nfsd/xdr3.h delete mode 100644 include/linux/nfsd/xdr4.h (limited to 'include') diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index ad354d284cf8..71209d4993d0 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c @@ -5,7 +5,7 @@ */ #include -#include +#include "nfsd.h" #include "auth.h" int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp) diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h new file mode 100644 index 000000000000..a165425dea41 --- /dev/null +++ b/fs/nfsd/cache.h @@ -0,0 +1,85 @@ +/* + * include/linux/nfsd/cache.h + * + * Request reply cache. This was heavily inspired by the + * implementation in 4.3BSD/4.4BSD. + * + * Copyright (C) 1995, 1996 Olaf Kirch + */ + +#ifndef NFSCACHE_H +#define NFSCACHE_H + +#include + +/* + * Representation of a reply cache entry. + */ +struct svc_cacherep { + struct hlist_node c_hash; + struct list_head c_lru; + + unsigned char c_state, /* unused, inprog, done */ + c_type, /* status, buffer */ + c_secure : 1; /* req came from port < 1024 */ + struct sockaddr_in c_addr; + __be32 c_xid; + u32 c_prot; + u32 c_proc; + u32 c_vers; + unsigned long c_timestamp; + union { + struct kvec u_vec; + __be32 u_status; + } c_u; +}; + +#define c_replvec c_u.u_vec +#define c_replstat c_u.u_status + +/* cache entry states */ +enum { + RC_UNUSED, + RC_INPROG, + RC_DONE +}; + +/* return values */ +enum { + RC_DROPIT, + RC_REPLY, + RC_DOIT, + RC_INTR +}; + +/* + * Cache types. + * We may want to add more types one day, e.g. for diropres and + * attrstat replies. Using cache entries with fixed length instead + * of buffer pointers may be more efficient. + */ +enum { + RC_NOCACHE, + RC_REPLSTAT, + RC_REPLBUFF, +}; + +/* + * If requests are retransmitted within this interval, they're dropped. + */ +#define RC_DELAY (HZ/5) + +int nfsd_reply_cache_init(void); +void nfsd_reply_cache_shutdown(void); +int nfsd_cache_lookup(struct svc_rqst *, int); +void nfsd_cache_update(struct svc_rqst *, int, __be32 *); + +#ifdef CONFIG_NFSD_V4 +void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); +#else /* CONFIG_NFSD_V4 */ +static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) +{ +} +#endif /* CONFIG_NFSD_V4 */ + +#endif /* NFSCACHE_H */ diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 68e63f441444..cb3dae2fcd86 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -18,10 +18,11 @@ #include #include -#include #include #include +#include "nfsd.h" + #define NFSDDBG_FACILITY NFSDDBG_EXPORT typedef struct auth_domain svc_client; diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index 801ef7104ff4..6f12777ed227 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c @@ -9,8 +9,8 @@ */ #include -#include #include +#include "nfsd.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_LOCKD diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index a54628de7715..874e2a94bf4f 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -6,10 +6,11 @@ * Copyright (C) 2002-2003 Andreas Gruenbacher */ -#include -#include -#include +#include "nfsd.h" +/* FIXME: nfsacl.h is a broken header */ #include +#include "cache.h" +#include "xdr3.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index 2f5c61bea908..c6011ddbadc0 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -6,10 +6,11 @@ * Copyright (C) 2002-2003 Andreas Gruenbacher */ -#include -#include -#include +#include "nfsd.h" +/* FIXME: nfsacl.h is a broken header */ #include +#include "cache.h" +#include "xdr3.h" #include "vfs.h" #define RETURN_STATUS(st) { resp->status = (st); return (st); } diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index b694b4304544..90b19ca75b34 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -10,8 +10,8 @@ #include #include -#include -#include +#include "cache.h" +#include "xdr3.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 623e13aa6259..c523bb88c10b 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -9,7 +9,7 @@ */ #include -#include +#include "xdr3.h" #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_XDR diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 4fe396071b61..f7a315827638 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -34,8 +34,8 @@ */ #include -#include -#include +#include "nfsd.h" +#include "state.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 61f682c77e7f..e2b5666f25d1 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -36,8 +36,8 @@ */ #include -#include -#include +#include "cache.h" +#include "xdr4.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 48742f243c25..6744e7f2da0e 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -33,12 +33,13 @@ * */ -#include -#include #include #include #include #include + +#include "nfsd.h" +#include "state.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1fe6e29fd500..2923e6c1da18 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -36,11 +36,11 @@ #include #include -#include #include #include #include #include +#include "xdr4.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 2fa96821f5b5..cab978031100 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -43,10 +43,11 @@ #include #include #include -#include #include #include #include + +#include "xdr4.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_XDR diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 96694b8345ef..18aa9729a380 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -10,8 +10,8 @@ * Copyright (C) 1995, 1996 Olaf Kirch */ -#include -#include +#include "nfsd.h" +#include "cache.h" /* Size of reply cache. Common values are: * 4.3BSD: 128 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index e4f49fd6af44..0415680d3f58 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -11,12 +11,13 @@ #include #include -#include -#include #include #include #include +#include "nfsd.h" +#include "cache.h" + /* * We have a single directory with 9 nodes in it. */ diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h new file mode 100644 index 000000000000..74f67c2aca34 --- /dev/null +++ b/fs/nfsd/nfsd.h @@ -0,0 +1,335 @@ +/* + * linux/include/linux/nfsd/nfsd.h + * + * Hodge-podge collection of knfsd-related stuff. + * I will sort this out later. + * + * Copyright (C) 1995-1997 Olaf Kirch + */ + +#ifndef LINUX_NFSD_NFSD_H +#define LINUX_NFSD_NFSD_H + +#include +#include + +#include +#include +#include +/* + * nfsd version + */ +#define NFSD_SUPPORTED_MINOR_VERSION 1 + +struct readdir_cd { + __be32 err; /* 0, nfserr, or nfserr_eof */ +}; + + +extern struct svc_program nfsd_program; +extern struct svc_version nfsd_version2, nfsd_version3, + nfsd_version4; +extern u32 nfsd_supported_minorversion; +extern struct mutex nfsd_mutex; +extern struct svc_serv *nfsd_serv; +extern spinlock_t nfsd_drc_lock; +extern unsigned int nfsd_drc_max_mem; +extern unsigned int nfsd_drc_mem_used; + +extern const struct seq_operations nfs_exports_op; + +/* + * Function prototypes. + */ +int nfsd_svc(unsigned short port, int nrservs); +int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp); + +int nfsd_nrthreads(void); +int nfsd_nrpools(void); +int nfsd_get_nrthreads(int n, int *); +int nfsd_set_nrthreads(int n, int *); + +#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) +#ifdef CONFIG_NFSD_V2_ACL +extern struct svc_version nfsd_acl_version2; +#else +#define nfsd_acl_version2 NULL +#endif +#ifdef CONFIG_NFSD_V3_ACL +extern struct svc_version nfsd_acl_version3; +#else +#define nfsd_acl_version3 NULL +#endif +#endif + +enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; +int nfsd_vers(int vers, enum vers_op change); +int nfsd_minorversion(u32 minorversion, enum vers_op change); +void nfsd_reset_versions(void); +int nfsd_create_serv(void); + +extern int nfsd_max_blksize; + +/* + * NFSv4 State + */ +#ifdef CONFIG_NFSD_V4 +extern unsigned int max_delegations; +int nfs4_state_init(void); +void nfsd4_free_slabs(void); +int nfs4_state_start(void); +void nfs4_state_shutdown(void); +time_t nfs4_lease_time(void); +void nfs4_reset_lease(time_t leasetime); +int nfs4_reset_recoverydir(char *recdir); +#else +static inline int nfs4_state_init(void) { return 0; } +static inline void nfsd4_free_slabs(void) { } +static inline int nfs4_state_start(void) { return 0; } +static inline void nfs4_state_shutdown(void) { } +static inline time_t nfs4_lease_time(void) { return 0; } +static inline void nfs4_reset_lease(time_t leasetime) { } +static inline int nfs4_reset_recoverydir(char *recdir) { return 0; } +#endif + +/* + * lockd binding + */ +void nfsd_lockd_init(void); +void nfsd_lockd_shutdown(void); + + +/* + * These macros provide pre-xdr'ed values for faster operation. + */ +#define nfs_ok cpu_to_be32(NFS_OK) +#define nfserr_perm cpu_to_be32(NFSERR_PERM) +#define nfserr_noent cpu_to_be32(NFSERR_NOENT) +#define nfserr_io cpu_to_be32(NFSERR_IO) +#define nfserr_nxio cpu_to_be32(NFSERR_NXIO) +#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN) +#define nfserr_acces cpu_to_be32(NFSERR_ACCES) +#define nfserr_exist cpu_to_be32(NFSERR_EXIST) +#define nfserr_xdev cpu_to_be32(NFSERR_XDEV) +#define nfserr_nodev cpu_to_be32(NFSERR_NODEV) +#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR) +#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR) +#define nfserr_inval cpu_to_be32(NFSERR_INVAL) +#define nfserr_fbig cpu_to_be32(NFSERR_FBIG) +#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC) +#define nfserr_rofs cpu_to_be32(NFSERR_ROFS) +#define nfserr_mlink cpu_to_be32(NFSERR_MLINK) +#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP) +#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG) +#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY) +#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT) +#define nfserr_stale cpu_to_be32(NFSERR_STALE) +#define nfserr_remote cpu_to_be32(NFSERR_REMOTE) +#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH) +#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE) +#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC) +#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE) +#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP) +#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL) +#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT) +#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE) +#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX) +#define nfserr_denied cpu_to_be32(NFSERR_DENIED) +#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK) +#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED) +#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE) +#define nfserr_same cpu_to_be32(NFSERR_SAME) +#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE) +#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID) +#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE) +#define nfserr_moved cpu_to_be32(NFSERR_MOVED) +#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE) +#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH) +#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED) +#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID) +#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID) +#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID) +#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID) +#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK) +#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME) +#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH) +#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) +#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) +#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) +#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) +#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) +#define nfserr_grace cpu_to_be32(NFSERR_GRACE) +#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE) +#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD) +#define nfserr_badname cpu_to_be32(NFSERR_BADNAME) +#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) +#define nfserr_locked cpu_to_be32(NFSERR_LOCKED) +#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) +#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) +#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) +#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) +#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION) +#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT) +#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY) +#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION) +#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED) +#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY) +#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER) +#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE) +#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT) +#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT) +#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE) +#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED) +#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS) +#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG) +#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG) +#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE) +#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP) +#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND) +#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS) +#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION) +#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP) +#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY) +#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE) +#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY) +#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT) +#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION) +#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP) +#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT) +#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP) +#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED) +#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE) +#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL) +#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG) +#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT) +#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED) + +/* error codes for internal use */ +/* if a request fails due to kmalloc failure, it gets dropped. + * Client should resend eventually + */ +#define nfserr_dropit cpu_to_be32(30000) +/* end-of-file indicator in readdir */ +#define nfserr_eof cpu_to_be32(30001) +/* replay detected */ +#define nfserr_replay_me cpu_to_be32(11001) +/* nfs41 replay detected */ +#define nfserr_replay_cache cpu_to_be32(11002) + +/* Check for dir entries '.' and '..' */ +#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) + +/* + * Time of server startup + */ +extern struct timeval nfssvc_boot; + +#ifdef CONFIG_NFSD_V4 + +/* before processing a COMPOUND operation, we have to check that there + * is enough space in the buffer for XDR encode to succeed. otherwise, + * we might process an operation with side effects, and be unable to + * tell the client that the operation succeeded. + * + * COMPOUND_SLACK_SPACE - this is the minimum bytes of buffer space + * needed to encode an "ordinary" _successful_ operation. (GETATTR, + * READ, READDIR, and READLINK have their own buffer checks.) if we + * fall below this level, we fail the next operation with NFS4ERR_RESOURCE. + * + * COMPOUND_ERR_SLACK_SPACE - this is the minimum bytes of buffer space + * needed to encode an operation which has failed with NFS4ERR_RESOURCE. + * care is taken to ensure that we never fall below this level for any + * reason. + */ +#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */ +#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */ + +#define NFSD_LEASE_TIME (nfs4_lease_time()) +#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */ + +/* + * The following attributes are currently not supported by the NFSv4 server: + * ARCHIVE (deprecated anyway) + * HIDDEN (unlikely to be supported any time soon) + * MIMETYPE (unlikely to be supported any time soon) + * QUOTA_* (will be supported in a forthcoming patch) + * SYSTEM (unlikely to be supported any time soon) + * TIME_BACKUP (unlikely to be supported any time soon) + * TIME_CREATE (unlikely to be supported any time soon) + */ +#define NFSD4_SUPPORTED_ATTRS_WORD0 \ +(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ + | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ + | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ + | FATTR4_WORD0_UNIQUE_HANDLES | FATTR4_WORD0_LEASE_TIME | FATTR4_WORD0_RDATTR_ERROR \ + | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_CANSETTIME | FATTR4_WORD0_CASE_INSENSITIVE \ + | FATTR4_WORD0_CASE_PRESERVING | FATTR4_WORD0_CHOWN_RESTRICTED \ + | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \ + | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_HOMOGENEOUS \ + | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ + | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) + +#define NFSD4_SUPPORTED_ATTRS_WORD1 \ +(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ + | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ + | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ + | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_ACCESS_SET \ + | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ + | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) + +#define NFSD4_SUPPORTED_ATTRS_WORD2 0 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \ + NFSD4_SUPPORTED_ATTRS_WORD0 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \ + NFSD4_SUPPORTED_ATTRS_WORD1 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \ + (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) + +static inline u32 nfsd_suppattrs0(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 + : NFSD4_SUPPORTED_ATTRS_WORD0; +} + +static inline u32 nfsd_suppattrs1(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1 + : NFSD4_SUPPORTED_ATTRS_WORD1; +} + +static inline u32 nfsd_suppattrs2(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2 + : NFSD4_SUPPORTED_ATTRS_WORD2; +} + +/* These will return ERR_INVAL if specified in GETATTR or READDIR. */ +#define NFSD_WRITEONLY_ATTRS_WORD1 \ +(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) + +/* These are the only attrs allowed in CREATE/OPEN/SETATTR. */ +#define NFSD_WRITEABLE_ATTRS_WORD0 \ +(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL ) +#define NFSD_WRITEABLE_ATTRS_WORD1 \ +(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ + | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) +#define NFSD_WRITEABLE_ATTRS_WORD2 0 + +#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \ + NFSD_WRITEABLE_ATTRS_WORD0 +/* + * we currently store the exclusive create verifier in the v_{a,m}time + * attributes so the client can't set these at create time using EXCLUSIVE4_1 + */ +#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \ + (NFSD_WRITEABLE_ATTRS_WORD1 & \ + ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)) +#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \ + NFSD_WRITEABLE_ATTRS_WORD2 + +#endif /* CONFIG_NFSD_V4 */ + +#endif /* LINUX_NFSD_NFSD_H */ diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 739948165034..0eb1c59f5ab8 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -12,7 +12,7 @@ #include #include -#include +#include "nfsd.h" #include "vfs.h" #include "auth.h" diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index b6bd9e0d7cd0..21a5f793c3d1 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -9,8 +9,8 @@ #include -#include -#include +#include "cache.h" +#include "xdr.h" #include "vfs.h" typedef struct svc_rqst svc_rqst; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index b2d7ffac0357..b520ce10bd15 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -15,11 +15,11 @@ #include #include -#include -#include #include #include #include +#include "nfsd.h" +#include "cache.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_SVC diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 5e0603da39e7..3bec831704af 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -6,7 +6,7 @@ * Copyright (C) 1995, 1996 Olaf Kirch */ -#include +#include "xdr.h" #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_XDR diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h new file mode 100644 index 000000000000..2af75686e0d3 --- /dev/null +++ b/fs/nfsd/state.h @@ -0,0 +1,409 @@ +/* + * linux/include/nfsd/state.h + * + * Copyright (c) 2001 The Regents of the University of Michigan. + * All rights reserved. + * + * Kendrick Smith + * Andy Adamson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _NFSD4_STATE_H +#define _NFSD4_STATE_H + +#include + +typedef struct { + u32 cl_boot; + u32 cl_id; +} clientid_t; + +typedef struct { + u32 so_boot; + u32 so_stateownerid; + u32 so_fileid; +} stateid_opaque_t; + +typedef struct { + u32 si_generation; + stateid_opaque_t si_opaque; +} stateid_t; +#define si_boot si_opaque.so_boot +#define si_stateownerid si_opaque.so_stateownerid +#define si_fileid si_opaque.so_fileid + +#define STATEID_FMT "(%08x/%08x/%08x/%08x)" +#define STATEID_VAL(s) \ + (s)->si_boot, \ + (s)->si_stateownerid, \ + (s)->si_fileid, \ + (s)->si_generation + +struct nfsd4_cb_sequence { + /* args/res */ + u32 cbs_minorversion; + struct nfs4_client *cbs_clp; +}; + +struct nfs4_delegation { + struct list_head dl_perfile; + struct list_head dl_perclnt; + struct list_head dl_recall_lru; /* delegation recalled */ + atomic_t dl_count; /* ref count */ + struct nfs4_client *dl_client; + struct nfs4_file *dl_file; + struct file_lock *dl_flock; + struct file *dl_vfs_file; + u32 dl_type; + time_t dl_time; +/* For recall: */ + u32 dl_ident; + stateid_t dl_stateid; + struct knfsd_fh dl_fh; + int dl_retries; +}; + +/* client delegation callback info */ +struct nfs4_cb_conn { + /* SETCLIENTID info */ + struct sockaddr_storage cb_addr; + size_t cb_addrlen; + u32 cb_prog; + u32 cb_minorversion; + u32 cb_ident; /* minorversion 0 only */ + /* RPC client info */ + atomic_t cb_set; /* successful CB_NULL call */ + struct rpc_clnt * cb_client; +}; + +/* Maximum number of slots per session. 160 is useful for long haul TCP */ +#define NFSD_MAX_SLOTS_PER_SESSION 160 +/* Maximum number of operations per session compound */ +#define NFSD_MAX_OPS_PER_COMPOUND 16 +/* Maximum session per slot cache size */ +#define NFSD_SLOT_CACHE_SIZE 1024 +/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ +#define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 +#define NFSD_MAX_MEM_PER_SESSION \ + (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) + +struct nfsd4_slot { + bool sl_inuse; + bool sl_cachethis; + u16 sl_opcnt; + u32 sl_seqid; + __be32 sl_status; + u32 sl_datalen; + char sl_data[]; +}; + +struct nfsd4_channel_attrs { + u32 headerpadsz; + u32 maxreq_sz; + u32 maxresp_sz; + u32 maxresp_cached; + u32 maxops; + u32 maxreqs; + u32 nr_rdma_attrs; + u32 rdma_attrs; +}; + +struct nfsd4_create_session { + clientid_t clientid; + struct nfs4_sessionid sessionid; + u32 seqid; + u32 flags; + struct nfsd4_channel_attrs fore_channel; + struct nfsd4_channel_attrs back_channel; + u32 callback_prog; + u32 uid; + u32 gid; +}; + +/* The single slot clientid cache structure */ +struct nfsd4_clid_slot { + u32 sl_seqid; + __be32 sl_status; + struct nfsd4_create_session sl_cr_ses; +}; + +struct nfsd4_session { + struct kref se_ref; + struct list_head se_hash; /* hash by sessionid */ + struct list_head se_perclnt; + u32 se_flags; + struct nfs4_client *se_client; /* for expire_client */ + struct nfs4_sessionid se_sessionid; + struct nfsd4_channel_attrs se_fchannel; + struct nfsd4_channel_attrs se_bchannel; + struct nfsd4_slot *se_slots[]; /* forward channel slots */ +}; + +static inline void +nfsd4_put_session(struct nfsd4_session *ses) +{ + extern void free_session(struct kref *kref); + kref_put(&ses->se_ref, free_session); +} + +static inline void +nfsd4_get_session(struct nfsd4_session *ses) +{ + kref_get(&ses->se_ref); +} + +/* formatted contents of nfs4_sessionid */ +struct nfsd4_sessionid { + clientid_t clientid; + u32 sequence; + u32 reserved; +}; + +#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ + +/* + * struct nfs4_client - one per client. Clientids live here. + * o Each nfs4_client is hashed by clientid. + * + * o Each nfs4_clients is also hashed by name + * (the opaque quantity initially sent by the client to identify itself). + * + * o cl_perclient list is used to ensure no dangling stateowner references + * when we expire the nfs4_client + */ +struct nfs4_client { + struct list_head cl_idhash; /* hash by cl_clientid.id */ + struct list_head cl_strhash; /* hash by cl_name */ + struct list_head cl_openowners; + struct list_head cl_delegations; + struct list_head cl_lru; /* tail queue */ + struct xdr_netobj cl_name; /* id generated by client */ + char cl_recdir[HEXDIR_LEN]; /* recovery dir */ + nfs4_verifier cl_verifier; /* generated by client */ + time_t cl_time; /* time of last lease renewal */ + struct sockaddr_storage cl_addr; /* client ipaddress */ + u32 cl_flavor; /* setclientid pseudoflavor */ + char *cl_principal; /* setclientid principal name */ + struct svc_cred cl_cred; /* setclientid principal */ + clientid_t cl_clientid; /* generated by server */ + nfs4_verifier cl_confirm; /* generated by server */ + struct nfs4_cb_conn cl_cb_conn; /* callback info */ + atomic_t cl_count; /* ref count */ + u32 cl_firststate; /* recovery dir creation */ + + /* for nfs41 */ + struct list_head cl_sessions; + struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ + u32 cl_exchange_flags; + struct nfs4_sessionid cl_sessionid; + + /* for nfs41 callbacks */ + /* We currently support a single back channel with a single slot */ + unsigned long cl_cb_slot_busy; + u32 cl_cb_seq_nr; + struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ + struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ + /* wait here for slots */ +}; + +/* struct nfs4_client_reset + * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl + * upon lease reset, or from upcall to state_daemon (to read in state + * from non-volitile storage) upon reboot. + */ +struct nfs4_client_reclaim { + struct list_head cr_strhash; /* hash by cr_name */ + char cr_recdir[HEXDIR_LEN]; /* recover dir */ +}; + +static inline void +update_stateid(stateid_t *stateid) +{ + stateid->si_generation++; +} + +/* A reasonable value for REPLAY_ISIZE was estimated as follows: + * The OPEN response, typically the largest, requires + * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) + + * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) + + * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes + */ + +#define NFSD4_REPLAY_ISIZE 112 + +/* + * Replay buffer, where the result of the last seqid-mutating operation + * is cached. + */ +struct nfs4_replay { + __be32 rp_status; + unsigned int rp_buflen; + char *rp_buf; + unsigned intrp_allocated; + struct knfsd_fh rp_openfh; + char rp_ibuf[NFSD4_REPLAY_ISIZE]; +}; + +/* +* nfs4_stateowner can either be an open_owner, or a lock_owner +* +* so_idhash: stateid_hashtbl[] for open owner, lockstateid_hashtbl[] +* for lock_owner +* so_strhash: ownerstr_hashtbl[] for open_owner, lock_ownerstr_hashtbl[] +* for lock_owner +* so_perclient: nfs4_client->cl_perclient entry - used when nfs4_client +* struct is reaped. +* so_perfilestate: heads the list of nfs4_stateid (either open or lock) +* and is used to ensure no dangling nfs4_stateid references when we +* release a stateowner. +* so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when +* close is called to reap associated byte-range locks +* so_close_lru: (open) stateowner is placed on this list instead of being +* reaped (when so_perfilestate is empty) to hold the last close replay. +* reaped by laundramat thread after lease period. +*/ +struct nfs4_stateowner { + struct kref so_ref; + struct list_head so_idhash; /* hash by so_id */ + struct list_head so_strhash; /* hash by op_name */ + struct list_head so_perclient; + struct list_head so_stateids; + struct list_head so_perstateid; /* for lockowners only */ + struct list_head so_close_lru; /* tail queue */ + time_t so_time; /* time of placement on so_close_lru */ + int so_is_open_owner; /* 1=openowner,0=lockowner */ + u32 so_id; + struct nfs4_client * so_client; + /* after increment in ENCODE_SEQID_OP_TAIL, represents the next + * sequence id expected from the client: */ + u32 so_seqid; + struct xdr_netobj so_owner; /* open owner name */ + int so_confirmed; /* successful OPEN_CONFIRM? */ + struct nfs4_replay so_replay; +}; + +/* +* nfs4_file: a file opened by some number of (open) nfs4_stateowners. +* o fi_perfile list is used to search for conflicting +* share_acces, share_deny on the file. +*/ +struct nfs4_file { + atomic_t fi_ref; + struct list_head fi_hash; /* hash by "struct inode *" */ + struct list_head fi_stateids; + struct list_head fi_delegations; + struct inode *fi_inode; + u32 fi_id; /* used with stateowner->so_id + * for stateid_hashtbl hash */ + bool fi_had_conflict; +}; + +/* +* nfs4_stateid can either be an open stateid or (eventually) a lock stateid +* +* (open)nfs4_stateid: one per (open)nfs4_stateowner, nfs4_file +* +* st_hash: stateid_hashtbl[] entry or lockstateid_hashtbl entry +* st_perfile: file_hashtbl[] entry. +* st_perfile_state: nfs4_stateowner->so_perfilestate +* st_perlockowner: (open stateid) list of lock nfs4_stateowners +* st_access_bmap: used only for open stateid +* st_deny_bmap: used only for open stateid +* st_openstp: open stateid lock stateid was derived from +* +* XXX: open stateids and lock stateids have diverged sufficiently that +* we should consider defining separate structs for the two cases. +*/ + +struct nfs4_stateid { + struct list_head st_hash; + struct list_head st_perfile; + struct list_head st_perstateowner; + struct list_head st_lockowners; + struct nfs4_stateowner * st_stateowner; + struct nfs4_file * st_file; + stateid_t st_stateid; + struct file * st_vfs_file; + unsigned long st_access_bmap; + unsigned long st_deny_bmap; + struct nfs4_stateid * st_openstp; +}; + +/* flags for preprocess_seqid_op() */ +#define HAS_SESSION 0x00000001 +#define CONFIRM 0x00000002 +#define OPEN_STATE 0x00000004 +#define LOCK_STATE 0x00000008 +#define RD_STATE 0x00000010 +#define WR_STATE 0x00000020 +#define CLOSE_STATE 0x00000040 + +#define seqid_mutating_err(err) \ + (((err) != nfserr_stale_clientid) && \ + ((err) != nfserr_bad_seqid) && \ + ((err) != nfserr_stale_stateid) && \ + ((err) != nfserr_bad_stateid)) + +struct nfsd4_compound_state; + +extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, + stateid_t *stateid, int flags, struct file **filp); +extern void nfs4_lock_state(void); +extern void nfs4_unlock_state(void); +extern int nfs4_in_grace(void); +extern __be32 nfs4_check_open_reclaim(clientid_t *clid); +extern void put_nfs4_client(struct nfs4_client *clp); +extern void nfs4_free_stateowner(struct kref *kref); +extern int set_callback_cred(void); +extern void nfsd4_probe_callback(struct nfs4_client *clp); +extern void nfsd4_cb_recall(struct nfs4_delegation *dp); +extern void nfs4_put_delegation(struct nfs4_delegation *dp); +extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); +extern void nfsd4_init_recdir(char *recdir_name); +extern int nfsd4_recdir_load(void); +extern void nfsd4_shutdown_recdir(void); +extern int nfs4_client_to_reclaim(const char *name); +extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); +extern void nfsd4_recdir_purge_old(void); +extern int nfsd4_create_clid_dir(struct nfs4_client *clp); +extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); + +static inline void +nfs4_put_stateowner(struct nfs4_stateowner *so) +{ + kref_put(&so->so_ref, nfs4_free_stateowner); +} + +static inline void +nfs4_get_stateowner(struct nfs4_stateowner *so) +{ + kref_get(&so->so_ref); +} + +#endif /* NFSD4_STATE_H */ diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c index e3e411e9fe4a..3fc69dfd3091 100644 --- a/fs/nfsd/stats.c +++ b/fs/nfsd/stats.c @@ -25,11 +25,11 @@ #include #include - #include -#include #include +#include "nfsd.h" + struct nfsd_stats nfsdstats; struct svc_stat nfsd_svcstats = { .program = &nfsd_program, diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 81ce108c114e..04bdba12d21b 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -22,23 +22,25 @@ #include #include #include -#include -#ifdef CONFIG_NFSD_V3 -#include -#endif /* CONFIG_NFSD_V3 */ #include #include #include #include +#include +#include +#include + +#ifdef CONFIG_NFSD_V3 +#include "xdr3.h" +#endif /* CONFIG_NFSD_V3 */ + #ifdef CONFIG_NFSD_V4 #include #include #endif /* CONFIG_NFSD_V4 */ -#include -#include -#include "vfs.h" -#include +#include "nfsd.h" +#include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_FILEOP diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h new file mode 100644 index 000000000000..235ee5c3be54 --- /dev/null +++ b/fs/nfsd/xdr.h @@ -0,0 +1,176 @@ +/* + * linux/include/linux/nfsd/xdr.h + * + * XDR types for nfsd. This is mainly a typing exercise. + */ + +#ifndef LINUX_NFSD_H +#define LINUX_NFSD_H + +#include +#include "nfsd.h" + +struct nfsd_fhandle { + struct svc_fh fh; +}; + +struct nfsd_sattrargs { + struct svc_fh fh; + struct iattr attrs; +}; + +struct nfsd_diropargs { + struct svc_fh fh; + char * name; + unsigned int len; +}; + +struct nfsd_readargs { + struct svc_fh fh; + __u32 offset; + __u32 count; + int vlen; +}; + +struct nfsd_writeargs { + svc_fh fh; + __u32 offset; + int len; + int vlen; +}; + +struct nfsd_createargs { + struct svc_fh fh; + char * name; + unsigned int len; + struct iattr attrs; +}; + +struct nfsd_renameargs { + struct svc_fh ffh; + char * fname; + unsigned int flen; + struct svc_fh tfh; + char * tname; + unsigned int tlen; +}; + +struct nfsd_readlinkargs { + struct svc_fh fh; + char * buffer; +}; + +struct nfsd_linkargs { + struct svc_fh ffh; + struct svc_fh tfh; + char * tname; + unsigned int tlen; +}; + +struct nfsd_symlinkargs { + struct svc_fh ffh; + char * fname; + unsigned int flen; + char * tname; + unsigned int tlen; + struct iattr attrs; +}; + +struct nfsd_readdirargs { + struct svc_fh fh; + __u32 cookie; + __u32 count; + __be32 * buffer; +}; + +struct nfsd_attrstat { + struct svc_fh fh; + struct kstat stat; +}; + +struct nfsd_diropres { + struct svc_fh fh; + struct kstat stat; +}; + +struct nfsd_readlinkres { + int len; +}; + +struct nfsd_readres { + struct svc_fh fh; + unsigned long count; + struct kstat stat; +}; + +struct nfsd_readdirres { + int count; + + struct readdir_cd common; + __be32 * buffer; + int buflen; + __be32 * offset; +}; + +struct nfsd_statfsres { + struct kstatfs stats; +}; + +/* + * Storage requirements for XDR arguments and results. + */ +union nfsd_xdrstore { + struct nfsd_sattrargs sattr; + struct nfsd_diropargs dirop; + struct nfsd_readargs read; + struct nfsd_writeargs write; + struct nfsd_createargs create; + struct nfsd_renameargs rename; + struct nfsd_linkargs link; + struct nfsd_symlinkargs symlink; + struct nfsd_readdirargs readdir; +}; + +#define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore) + + +int nfssvc_decode_void(struct svc_rqst *, __be32 *, void *); +int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); +int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *, + struct nfsd_sattrargs *); +int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *, + struct nfsd_diropargs *); +int nfssvc_decode_readargs(struct svc_rqst *, __be32 *, + struct nfsd_readargs *); +int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *, + struct nfsd_writeargs *); +int nfssvc_decode_createargs(struct svc_rqst *, __be32 *, + struct nfsd_createargs *); +int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *, + struct nfsd_renameargs *); +int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *, + struct nfsd_readlinkargs *); +int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *, + struct nfsd_linkargs *); +int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *, + struct nfsd_symlinkargs *); +int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *, + struct nfsd_readdirargs *); +int nfssvc_encode_void(struct svc_rqst *, __be32 *, void *); +int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *, struct nfsd_attrstat *); +int nfssvc_encode_diropres(struct svc_rqst *, __be32 *, struct nfsd_diropres *); +int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *, struct nfsd_readlinkres *); +int nfssvc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd_readres *); +int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *, struct nfsd_statfsres *); +int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *, struct nfsd_readdirres *); + +int nfssvc_encode_entry(void *, const char *name, + int namlen, loff_t offset, u64 ino, unsigned int); + +int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); + +/* Helper functions for NFSv2 ACL code */ +__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp); +__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp); + +#endif /* LINUX_NFSD_H */ diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h new file mode 100644 index 000000000000..b330756973cf --- /dev/null +++ b/fs/nfsd/xdr3.h @@ -0,0 +1,346 @@ +/* + * linux/include/linux/nfsd/xdr3.h + * + * XDR types for NFSv3 in nfsd. + * + * Copyright (C) 1996-1998, Olaf Kirch + */ + +#ifndef _LINUX_NFSD_XDR3_H +#define _LINUX_NFSD_XDR3_H + +#include "xdr.h" + +struct nfsd3_sattrargs { + struct svc_fh fh; + struct iattr attrs; + int check_guard; + time_t guardtime; +}; + +struct nfsd3_diropargs { + struct svc_fh fh; + char * name; + unsigned int len; +}; + +struct nfsd3_accessargs { + struct svc_fh fh; + unsigned int access; +}; + +struct nfsd3_readargs { + struct svc_fh fh; + __u64 offset; + __u32 count; + int vlen; +}; + +struct nfsd3_writeargs { + svc_fh fh; + __u64 offset; + __u32 count; + int stable; + __u32 len; + int vlen; +}; + +struct nfsd3_createargs { + struct svc_fh fh; + char * name; + unsigned int len; + int createmode; + struct iattr attrs; + __be32 * verf; +}; + +struct nfsd3_mknodargs { + struct svc_fh fh; + char * name; + unsigned int len; + __u32 ftype; + __u32 major, minor; + struct iattr attrs; +}; + +struct nfsd3_renameargs { + struct svc_fh ffh; + char * fname; + unsigned int flen; + struct svc_fh tfh; + char * tname; + unsigned int tlen; +}; + +struct nfsd3_readlinkargs { + struct svc_fh fh; + char * buffer; +}; + +struct nfsd3_linkargs { + struct svc_fh ffh; + struct svc_fh tfh; + char * tname; + unsigned int tlen; +}; + +struct nfsd3_symlinkargs { + struct svc_fh ffh; + char * fname; + unsigned int flen; + char * tname; + unsigned int tlen; + struct iattr attrs; +}; + +struct nfsd3_readdirargs { + struct svc_fh fh; + __u64 cookie; + __u32 dircount; + __u32 count; + __be32 * verf; + __be32 * buffer; +}; + +struct nfsd3_commitargs { + struct svc_fh fh; + __u64 offset; + __u32 count; +}; + +struct nfsd3_getaclargs { + struct svc_fh fh; + int mask; +}; + +struct posix_acl; +struct nfsd3_setaclargs { + struct svc_fh fh; + int mask; + struct posix_acl *acl_access; + struct posix_acl *acl_default; +}; + +struct nfsd3_attrstat { + __be32 status; + struct svc_fh fh; + struct kstat stat; +}; + +/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */ +struct nfsd3_diropres { + __be32 status; + struct svc_fh dirfh; + struct svc_fh fh; +}; + +struct nfsd3_accessres { + __be32 status; + struct svc_fh fh; + __u32 access; +}; + +struct nfsd3_readlinkres { + __be32 status; + struct svc_fh fh; + __u32 len; +}; + +struct nfsd3_readres { + __be32 status; + struct svc_fh fh; + unsigned long count; + int eof; +}; + +struct nfsd3_writeres { + __be32 status; + struct svc_fh fh; + unsigned long count; + int committed; +}; + +struct nfsd3_renameres { + __be32 status; + struct svc_fh ffh; + struct svc_fh tfh; +}; + +struct nfsd3_linkres { + __be32 status; + struct svc_fh tfh; + struct svc_fh fh; +}; + +struct nfsd3_readdirres { + __be32 status; + struct svc_fh fh; + int count; + __be32 verf[2]; + + struct readdir_cd common; + __be32 * buffer; + int buflen; + __be32 * offset; + __be32 * offset1; + struct svc_rqst * rqstp; + +}; + +struct nfsd3_fsstatres { + __be32 status; + struct kstatfs stats; + __u32 invarsec; +}; + +struct nfsd3_fsinfores { + __be32 status; + __u32 f_rtmax; + __u32 f_rtpref; + __u32 f_rtmult; + __u32 f_wtmax; + __u32 f_wtpref; + __u32 f_wtmult; + __u32 f_dtpref; + __u64 f_maxfilesize; + __u32 f_properties; +}; + +struct nfsd3_pathconfres { + __be32 status; + __u32 p_link_max; + __u32 p_name_max; + __u32 p_no_trunc; + __u32 p_chown_restricted; + __u32 p_case_insensitive; + __u32 p_case_preserving; +}; + +struct nfsd3_commitres { + __be32 status; + struct svc_fh fh; +}; + +struct nfsd3_getaclres { + __be32 status; + struct svc_fh fh; + int mask; + struct posix_acl *acl_access; + struct posix_acl *acl_default; +}; + +/* dummy type for release */ +struct nfsd3_fhandle_pair { + __u32 dummy; + struct svc_fh fh1; + struct svc_fh fh2; +}; + +/* + * Storage requirements for XDR arguments and results. + */ +union nfsd3_xdrstore { + struct nfsd3_sattrargs sattrargs; + struct nfsd3_diropargs diropargs; + struct nfsd3_readargs readargs; + struct nfsd3_writeargs writeargs; + struct nfsd3_createargs createargs; + struct nfsd3_renameargs renameargs; + struct nfsd3_linkargs linkargs; + struct nfsd3_symlinkargs symlinkargs; + struct nfsd3_readdirargs readdirargs; + struct nfsd3_diropres diropres; + struct nfsd3_accessres accessres; + struct nfsd3_readlinkres readlinkres; + struct nfsd3_readres readres; + struct nfsd3_writeres writeres; + struct nfsd3_renameres renameres; + struct nfsd3_linkres linkres; + struct nfsd3_readdirres readdirres; + struct nfsd3_fsstatres fsstatres; + struct nfsd3_fsinfores fsinfores; + struct nfsd3_pathconfres pathconfres; + struct nfsd3_commitres commitres; + struct nfsd3_getaclres getaclres; +}; + +#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore) + +int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); +int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *, + struct nfsd3_sattrargs *); +int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *, + struct nfsd3_diropargs *); +int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *, + struct nfsd3_accessargs *); +int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *, + struct nfsd3_readargs *); +int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *, + struct nfsd3_writeargs *); +int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *, + struct nfsd3_createargs *); +int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *, + struct nfsd3_createargs *); +int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *, + struct nfsd3_mknodargs *); +int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *, + struct nfsd3_renameargs *); +int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *, + struct nfsd3_readlinkargs *); +int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *, + struct nfsd3_linkargs *); +int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *, + struct nfsd3_symlinkargs *); +int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *, + struct nfsd3_readdirargs *); +int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *, + struct nfsd3_readdirargs *); +int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *, + struct nfsd3_commitargs *); +int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *, void *); +int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *, + struct nfsd3_attrstat *); +int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *, + struct nfsd3_attrstat *); +int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *, + struct nfsd3_diropres *); +int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *, + struct nfsd3_accessres *); +int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *, + struct nfsd3_readlinkres *); +int nfs3svc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd3_readres *); +int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *, struct nfsd3_writeres *); +int nfs3svc_encode_createres(struct svc_rqst *, __be32 *, + struct nfsd3_diropres *); +int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *, + struct nfsd3_renameres *); +int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *, + struct nfsd3_linkres *); +int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *, + struct nfsd3_readdirres *); +int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *, + struct nfsd3_fsstatres *); +int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *, + struct nfsd3_fsinfores *); +int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *, + struct nfsd3_pathconfres *); +int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *, + struct nfsd3_commitres *); + +int nfs3svc_release_fhandle(struct svc_rqst *, __be32 *, + struct nfsd3_attrstat *); +int nfs3svc_release_fhandle2(struct svc_rqst *, __be32 *, + struct nfsd3_fhandle_pair *); +int nfs3svc_encode_entry(void *, const char *name, + int namlen, loff_t offset, u64 ino, + unsigned int); +int nfs3svc_encode_entry_plus(void *, const char *name, + int namlen, loff_t offset, u64 ino, + unsigned int); +/* Helper functions for NFSv3 ACL code */ +__be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, + struct svc_fh *fhp); +__be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp); + + +#endif /* _LINUX_NFSD_XDR3_H */ diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h new file mode 100644 index 000000000000..83202a1cf07b --- /dev/null +++ b/fs/nfsd/xdr4.h @@ -0,0 +1,564 @@ +/* + * include/linux/nfsd/xdr4.h + * + * Server-side types for NFSv4. + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Kendrick Smith + * Andy Adamson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _LINUX_NFSD_XDR4_H +#define _LINUX_NFSD_XDR4_H + +#include "state.h" +#include "nfsd.h" + +#define NFSD4_MAX_TAGLEN 128 +#define XDR_LEN(n) (((n) + 3) & ~3) + +struct nfsd4_compound_state { + struct svc_fh current_fh; + struct svc_fh save_fh; + struct nfs4_stateowner *replay_owner; + /* For sessions DRC */ + struct nfsd4_session *session; + struct nfsd4_slot *slot; + __be32 *datap; + size_t iovlen; + u32 minorversion; + u32 status; +}; + +static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) +{ + return cs->slot != NULL; +} + +struct nfsd4_change_info { + u32 atomic; + bool change_supported; + u32 before_ctime_sec; + u32 before_ctime_nsec; + u64 before_change; + u32 after_ctime_sec; + u32 after_ctime_nsec; + u64 after_change; +}; + +struct nfsd4_access { + u32 ac_req_access; /* request */ + u32 ac_supported; /* response */ + u32 ac_resp_access; /* response */ +}; + +struct nfsd4_close { + u32 cl_seqid; /* request */ + stateid_t cl_stateid; /* request+response */ + struct nfs4_stateowner * cl_stateowner; /* response */ +}; + +struct nfsd4_commit { + u64 co_offset; /* request */ + u32 co_count; /* request */ + nfs4_verifier co_verf; /* response */ +}; + +struct nfsd4_create { + u32 cr_namelen; /* request */ + char * cr_name; /* request */ + u32 cr_type; /* request */ + union { /* request */ + struct { + u32 namelen; + char *name; + } link; /* NF4LNK */ + struct { + u32 specdata1; + u32 specdata2; + } dev; /* NF4BLK, NF4CHR */ + } u; + u32 cr_bmval[3]; /* request */ + struct iattr cr_iattr; /* request */ + struct nfsd4_change_info cr_cinfo; /* response */ + struct nfs4_acl *cr_acl; +}; +#define cr_linklen u.link.namelen +#define cr_linkname u.link.name +#define cr_specdata1 u.dev.specdata1 +#define cr_specdata2 u.dev.specdata2 + +struct nfsd4_delegreturn { + stateid_t dr_stateid; +}; + +struct nfsd4_getattr { + u32 ga_bmval[3]; /* request */ + struct svc_fh *ga_fhp; /* response */ +}; + +struct nfsd4_link { + u32 li_namelen; /* request */ + char * li_name; /* request */ + struct nfsd4_change_info li_cinfo; /* response */ +}; + +struct nfsd4_lock_denied { + clientid_t ld_clientid; + struct nfs4_stateowner *ld_sop; + u64 ld_start; + u64 ld_length; + u32 ld_type; +}; + +struct nfsd4_lock { + /* request */ + u32 lk_type; + u32 lk_reclaim; /* boolean */ + u64 lk_offset; + u64 lk_length; + u32 lk_is_new; + union { + struct { + u32 open_seqid; + stateid_t open_stateid; + u32 lock_seqid; + clientid_t clientid; + struct xdr_netobj owner; + } new; + struct { + stateid_t lock_stateid; + u32 lock_seqid; + } old; + } v; + + /* response */ + union { + struct { + stateid_t stateid; + } ok; + struct nfsd4_lock_denied denied; + } u; + /* The lk_replay_owner is the open owner in the open_to_lock_owner + * case and the lock owner otherwise: */ + struct nfs4_stateowner *lk_replay_owner; +}; +#define lk_new_open_seqid v.new.open_seqid +#define lk_new_open_stateid v.new.open_stateid +#define lk_new_lock_seqid v.new.lock_seqid +#define lk_new_clientid v.new.clientid +#define lk_new_owner v.new.owner +#define lk_old_lock_stateid v.old.lock_stateid +#define lk_old_lock_seqid v.old.lock_seqid + +#define lk_rflags u.ok.rflags +#define lk_resp_stateid u.ok.stateid +#define lk_denied u.denied + + +struct nfsd4_lockt { + u32 lt_type; + clientid_t lt_clientid; + struct xdr_netobj lt_owner; + u64 lt_offset; + u64 lt_length; + struct nfs4_stateowner * lt_stateowner; + struct nfsd4_lock_denied lt_denied; +}; + + +struct nfsd4_locku { + u32 lu_type; + u32 lu_seqid; + stateid_t lu_stateid; + u64 lu_offset; + u64 lu_length; + struct nfs4_stateowner *lu_stateowner; +}; + + +struct nfsd4_lookup { + u32 lo_len; /* request */ + char * lo_name; /* request */ +}; + +struct nfsd4_putfh { + u32 pf_fhlen; /* request */ + char *pf_fhval; /* request */ +}; + +struct nfsd4_open { + u32 op_claim_type; /* request */ + struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */ + u32 op_delegate_type; /* request - CLAIM_PREV only */ + stateid_t op_delegate_stateid; /* request - response */ + u32 op_create; /* request */ + u32 op_createmode; /* request */ + u32 op_bmval[3]; /* request */ + struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ + nfs4_verifier verf; /* EXCLUSIVE4 */ + clientid_t op_clientid; /* request */ + struct xdr_netobj op_owner; /* request */ + u32 op_seqid; /* request */ + u32 op_share_access; /* request */ + u32 op_share_deny; /* request */ + stateid_t op_stateid; /* response */ + u32 op_recall; /* recall */ + struct nfsd4_change_info op_cinfo; /* response */ + u32 op_rflags; /* response */ + int op_truncate; /* used during processing */ + struct nfs4_stateowner *op_stateowner; /* used during processing */ + struct nfs4_acl *op_acl; +}; +#define op_iattr iattr +#define op_verf verf + +struct nfsd4_open_confirm { + stateid_t oc_req_stateid /* request */; + u32 oc_seqid /* request */; + stateid_t oc_resp_stateid /* response */; + struct nfs4_stateowner * oc_stateowner; /* response */ +}; + +struct nfsd4_open_downgrade { + stateid_t od_stateid; + u32 od_seqid; + u32 od_share_access; + u32 od_share_deny; + struct nfs4_stateowner *od_stateowner; +}; + + +struct nfsd4_read { + stateid_t rd_stateid; /* request */ + u64 rd_offset; /* request */ + u32 rd_length; /* request */ + int rd_vlen; + struct file *rd_filp; + + struct svc_rqst *rd_rqstp; /* response */ + struct svc_fh * rd_fhp; /* response */ +}; + +struct nfsd4_readdir { + u64 rd_cookie; /* request */ + nfs4_verifier rd_verf; /* request */ + u32 rd_dircount; /* request */ + u32 rd_maxcount; /* request */ + u32 rd_bmval[3]; /* request */ + struct svc_rqst *rd_rqstp; /* response */ + struct svc_fh * rd_fhp; /* response */ + + struct readdir_cd common; + __be32 * buffer; + int buflen; + __be32 * offset; +}; + +struct nfsd4_release_lockowner { + clientid_t rl_clientid; + struct xdr_netobj rl_owner; +}; +struct nfsd4_readlink { + struct svc_rqst *rl_rqstp; /* request */ + struct svc_fh * rl_fhp; /* request */ +}; + +struct nfsd4_remove { + u32 rm_namelen; /* request */ + char * rm_name; /* request */ + struct nfsd4_change_info rm_cinfo; /* response */ +}; + +struct nfsd4_rename { + u32 rn_snamelen; /* request */ + char * rn_sname; /* request */ + u32 rn_tnamelen; /* request */ + char * rn_tname; /* request */ + struct nfsd4_change_info rn_sinfo; /* response */ + struct nfsd4_change_info rn_tinfo; /* response */ +}; + +struct nfsd4_secinfo { + u32 si_namelen; /* request */ + char *si_name; /* request */ + struct svc_export *si_exp; /* response */ +}; + +struct nfsd4_setattr { + stateid_t sa_stateid; /* request */ + u32 sa_bmval[3]; /* request */ + struct iattr sa_iattr; /* request */ + struct nfs4_acl *sa_acl; +}; + +struct nfsd4_setclientid { + nfs4_verifier se_verf; /* request */ + u32 se_namelen; /* request */ + char * se_name; /* request */ + u32 se_callback_prog; /* request */ + u32 se_callback_netid_len; /* request */ + char * se_callback_netid_val; /* request */ + u32 se_callback_addr_len; /* request */ + char * se_callback_addr_val; /* request */ + u32 se_callback_ident; /* request */ + clientid_t se_clientid; /* response */ + nfs4_verifier se_confirm; /* response */ +}; + +struct nfsd4_setclientid_confirm { + clientid_t sc_clientid; + nfs4_verifier sc_confirm; +}; + +/* also used for NVERIFY */ +struct nfsd4_verify { + u32 ve_bmval[3]; /* request */ + u32 ve_attrlen; /* request */ + char * ve_attrval; /* request */ +}; + +struct nfsd4_write { + stateid_t wr_stateid; /* request */ + u64 wr_offset; /* request */ + u32 wr_stable_how; /* request */ + u32 wr_buflen; /* request */ + int wr_vlen; + + u32 wr_bytes_written; /* response */ + u32 wr_how_written; /* response */ + nfs4_verifier wr_verifier; /* response */ +}; + +struct nfsd4_exchange_id { + nfs4_verifier verifier; + struct xdr_netobj clname; + u32 flags; + clientid_t clientid; + u32 seqid; + int spa_how; +}; + +struct nfsd4_sequence { + struct nfs4_sessionid sessionid; /* request/response */ + u32 seqid; /* request/response */ + u32 slotid; /* request/response */ + u32 maxslots; /* request/response */ + u32 cachethis; /* request */ +#if 0 + u32 target_maxslots; /* response */ + u32 status_flags; /* response */ +#endif /* not yet */ +}; + +struct nfsd4_destroy_session { + struct nfs4_sessionid sessionid; +}; + +struct nfsd4_op { + int opnum; + __be32 status; + union { + struct nfsd4_access access; + struct nfsd4_close close; + struct nfsd4_commit commit; + struct nfsd4_create create; + struct nfsd4_delegreturn delegreturn; + struct nfsd4_getattr getattr; + struct svc_fh * getfh; + struct nfsd4_link link; + struct nfsd4_lock lock; + struct nfsd4_lockt lockt; + struct nfsd4_locku locku; + struct nfsd4_lookup lookup; + struct nfsd4_verify nverify; + struct nfsd4_open open; + struct nfsd4_open_confirm open_confirm; + struct nfsd4_open_downgrade open_downgrade; + struct nfsd4_putfh putfh; + struct nfsd4_read read; + struct nfsd4_readdir readdir; + struct nfsd4_readlink readlink; + struct nfsd4_remove remove; + struct nfsd4_rename rename; + clientid_t renew; + struct nfsd4_secinfo secinfo; + struct nfsd4_setattr setattr; + struct nfsd4_setclientid setclientid; + struct nfsd4_setclientid_confirm setclientid_confirm; + struct nfsd4_verify verify; + struct nfsd4_write write; + struct nfsd4_release_lockowner release_lockowner; + + /* NFSv4.1 */ + struct nfsd4_exchange_id exchange_id; + struct nfsd4_create_session create_session; + struct nfsd4_destroy_session destroy_session; + struct nfsd4_sequence sequence; + } u; + struct nfs4_replay * replay; +}; + +struct nfsd4_compoundargs { + /* scratch variables for XDR decode */ + __be32 * p; + __be32 * end; + struct page ** pagelist; + int pagelen; + __be32 tmp[8]; + __be32 * tmpp; + struct tmpbuf { + struct tmpbuf *next; + void (*release)(const void *); + void *buf; + } *to_free; + + struct svc_rqst *rqstp; + + u32 taglen; + char * tag; + u32 minorversion; + u32 opcnt; + struct nfsd4_op *ops; + struct nfsd4_op iops[8]; +}; + +struct nfsd4_compoundres { + /* scratch variables for XDR encode */ + __be32 * p; + __be32 * end; + struct xdr_buf * xbuf; + struct svc_rqst * rqstp; + + u32 taglen; + char * tag; + u32 opcnt; + __be32 * tagp; /* tag, opcount encode location */ + struct nfsd4_compound_state cstate; +}; + +static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) +{ + struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; + return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; +} + +static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) +{ + return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); +} + +#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) + +static inline void +set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) +{ + BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved); + cinfo->atomic = 1; + cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); + if (cinfo->change_supported) { + cinfo->before_change = fhp->fh_pre_change; + cinfo->after_change = fhp->fh_post_change; + } else { + cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; + cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; + cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; + cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; + } +} + +int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *); +int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *, + struct nfsd4_compoundargs *); +int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *, + struct nfsd4_compoundres *); +void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *); +void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op); +__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, + struct dentry *dentry, __be32 *buffer, int *countp, + u32 *bmval, struct svc_rqst *, int ignore_crossmnt); +extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, + struct nfsd4_setclientid *setclid); +extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, + struct nfsd4_setclientid_confirm *setclientid_confirm); +extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); +extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, + struct nfsd4_sequence *seq); +extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, +struct nfsd4_exchange_id *); + extern __be32 nfsd4_create_session(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_create_session *); +extern __be32 nfsd4_sequence(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_sequence *); +extern __be32 nfsd4_destroy_session(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_destroy_session *); +extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, + struct nfsd4_open *open); +extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, + struct svc_fh *current_fh, struct nfsd4_open *open); +extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc); +extern __be32 nfsd4_close(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, + struct nfsd4_close *close); +extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, + struct nfsd4_open_downgrade *od); +extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *, + struct nfsd4_lock *lock); +extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, + struct nfsd4_lockt *lockt); +extern __be32 nfsd4_locku(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, + struct nfsd4_locku *locku); +extern __be32 +nfsd4_release_lockowner(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, + struct nfsd4_release_lockowner *rlockowner); +extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *); +extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr); +extern __be32 nfsd4_renew(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, clientid_t *clid); +#endif + +/* + * Local variables: + * c-basic-offset: 8 + * End: + */ diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h deleted file mode 100644 index a165425dea41..000000000000 --- a/include/linux/nfsd/cache.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * include/linux/nfsd/cache.h - * - * Request reply cache. This was heavily inspired by the - * implementation in 4.3BSD/4.4BSD. - * - * Copyright (C) 1995, 1996 Olaf Kirch - */ - -#ifndef NFSCACHE_H -#define NFSCACHE_H - -#include - -/* - * Representation of a reply cache entry. - */ -struct svc_cacherep { - struct hlist_node c_hash; - struct list_head c_lru; - - unsigned char c_state, /* unused, inprog, done */ - c_type, /* status, buffer */ - c_secure : 1; /* req came from port < 1024 */ - struct sockaddr_in c_addr; - __be32 c_xid; - u32 c_prot; - u32 c_proc; - u32 c_vers; - unsigned long c_timestamp; - union { - struct kvec u_vec; - __be32 u_status; - } c_u; -}; - -#define c_replvec c_u.u_vec -#define c_replstat c_u.u_status - -/* cache entry states */ -enum { - RC_UNUSED, - RC_INPROG, - RC_DONE -}; - -/* return values */ -enum { - RC_DROPIT, - RC_REPLY, - RC_DOIT, - RC_INTR -}; - -/* - * Cache types. - * We may want to add more types one day, e.g. for diropres and - * attrstat replies. Using cache entries with fixed length instead - * of buffer pointers may be more efficient. - */ -enum { - RC_NOCACHE, - RC_REPLSTAT, - RC_REPLBUFF, -}; - -/* - * If requests are retransmitted within this interval, they're dropped. - */ -#define RC_DELAY (HZ/5) - -int nfsd_reply_cache_init(void); -void nfsd_reply_cache_shutdown(void); -int nfsd_cache_lookup(struct svc_rqst *, int); -void nfsd_cache_update(struct svc_rqst *, int, __be32 *); - -#ifdef CONFIG_NFSD_V4 -void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); -#else /* CONFIG_NFSD_V4 */ -static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) -{ -} -#endif /* CONFIG_NFSD_V4 */ - -#endif /* NFSCACHE_H */ diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h deleted file mode 100644 index 74f67c2aca34..000000000000 --- a/include/linux/nfsd/nfsd.h +++ /dev/null @@ -1,335 +0,0 @@ -/* - * linux/include/linux/nfsd/nfsd.h - * - * Hodge-podge collection of knfsd-related stuff. - * I will sort this out later. - * - * Copyright (C) 1995-1997 Olaf Kirch - */ - -#ifndef LINUX_NFSD_NFSD_H -#define LINUX_NFSD_NFSD_H - -#include -#include - -#include -#include -#include -/* - * nfsd version - */ -#define NFSD_SUPPORTED_MINOR_VERSION 1 - -struct readdir_cd { - __be32 err; /* 0, nfserr, or nfserr_eof */ -}; - - -extern struct svc_program nfsd_program; -extern struct svc_version nfsd_version2, nfsd_version3, - nfsd_version4; -extern u32 nfsd_supported_minorversion; -extern struct mutex nfsd_mutex; -extern struct svc_serv *nfsd_serv; -extern spinlock_t nfsd_drc_lock; -extern unsigned int nfsd_drc_max_mem; -extern unsigned int nfsd_drc_mem_used; - -extern const struct seq_operations nfs_exports_op; - -/* - * Function prototypes. - */ -int nfsd_svc(unsigned short port, int nrservs); -int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp); - -int nfsd_nrthreads(void); -int nfsd_nrpools(void); -int nfsd_get_nrthreads(int n, int *); -int nfsd_set_nrthreads(int n, int *); - -#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) -#ifdef CONFIG_NFSD_V2_ACL -extern struct svc_version nfsd_acl_version2; -#else -#define nfsd_acl_version2 NULL -#endif -#ifdef CONFIG_NFSD_V3_ACL -extern struct svc_version nfsd_acl_version3; -#else -#define nfsd_acl_version3 NULL -#endif -#endif - -enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; -int nfsd_vers(int vers, enum vers_op change); -int nfsd_minorversion(u32 minorversion, enum vers_op change); -void nfsd_reset_versions(void); -int nfsd_create_serv(void); - -extern int nfsd_max_blksize; - -/* - * NFSv4 State - */ -#ifdef CONFIG_NFSD_V4 -extern unsigned int max_delegations; -int nfs4_state_init(void); -void nfsd4_free_slabs(void); -int nfs4_state_start(void); -void nfs4_state_shutdown(void); -time_t nfs4_lease_time(void); -void nfs4_reset_lease(time_t leasetime); -int nfs4_reset_recoverydir(char *recdir); -#else -static inline int nfs4_state_init(void) { return 0; } -static inline void nfsd4_free_slabs(void) { } -static inline int nfs4_state_start(void) { return 0; } -static inline void nfs4_state_shutdown(void) { } -static inline time_t nfs4_lease_time(void) { return 0; } -static inline void nfs4_reset_lease(time_t leasetime) { } -static inline int nfs4_reset_recoverydir(char *recdir) { return 0; } -#endif - -/* - * lockd binding - */ -void nfsd_lockd_init(void); -void nfsd_lockd_shutdown(void); - - -/* - * These macros provide pre-xdr'ed values for faster operation. - */ -#define nfs_ok cpu_to_be32(NFS_OK) -#define nfserr_perm cpu_to_be32(NFSERR_PERM) -#define nfserr_noent cpu_to_be32(NFSERR_NOENT) -#define nfserr_io cpu_to_be32(NFSERR_IO) -#define nfserr_nxio cpu_to_be32(NFSERR_NXIO) -#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN) -#define nfserr_acces cpu_to_be32(NFSERR_ACCES) -#define nfserr_exist cpu_to_be32(NFSERR_EXIST) -#define nfserr_xdev cpu_to_be32(NFSERR_XDEV) -#define nfserr_nodev cpu_to_be32(NFSERR_NODEV) -#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR) -#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR) -#define nfserr_inval cpu_to_be32(NFSERR_INVAL) -#define nfserr_fbig cpu_to_be32(NFSERR_FBIG) -#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC) -#define nfserr_rofs cpu_to_be32(NFSERR_ROFS) -#define nfserr_mlink cpu_to_be32(NFSERR_MLINK) -#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP) -#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG) -#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY) -#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT) -#define nfserr_stale cpu_to_be32(NFSERR_STALE) -#define nfserr_remote cpu_to_be32(NFSERR_REMOTE) -#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH) -#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE) -#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC) -#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE) -#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP) -#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL) -#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT) -#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE) -#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX) -#define nfserr_denied cpu_to_be32(NFSERR_DENIED) -#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK) -#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED) -#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE) -#define nfserr_same cpu_to_be32(NFSERR_SAME) -#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE) -#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID) -#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE) -#define nfserr_moved cpu_to_be32(NFSERR_MOVED) -#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE) -#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH) -#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED) -#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID) -#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID) -#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID) -#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID) -#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK) -#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME) -#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH) -#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) -#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) -#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) -#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) -#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) -#define nfserr_grace cpu_to_be32(NFSERR_GRACE) -#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE) -#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD) -#define nfserr_badname cpu_to_be32(NFSERR_BADNAME) -#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) -#define nfserr_locked cpu_to_be32(NFSERR_LOCKED) -#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) -#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) -#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) -#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) -#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION) -#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT) -#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY) -#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION) -#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED) -#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY) -#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER) -#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE) -#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT) -#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT) -#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE) -#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED) -#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS) -#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG) -#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG) -#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE) -#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP) -#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND) -#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS) -#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION) -#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP) -#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY) -#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE) -#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY) -#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT) -#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION) -#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP) -#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT) -#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP) -#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED) -#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE) -#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL) -#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG) -#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT) -#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED) - -/* error codes for internal use */ -/* if a request fails due to kmalloc failure, it gets dropped. - * Client should resend eventually - */ -#define nfserr_dropit cpu_to_be32(30000) -/* end-of-file indicator in readdir */ -#define nfserr_eof cpu_to_be32(30001) -/* replay detected */ -#define nfserr_replay_me cpu_to_be32(11001) -/* nfs41 replay detected */ -#define nfserr_replay_cache cpu_to_be32(11002) - -/* Check for dir entries '.' and '..' */ -#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) - -/* - * Time of server startup - */ -extern struct timeval nfssvc_boot; - -#ifdef CONFIG_NFSD_V4 - -/* before processing a COMPOUND operation, we have to check that there - * is enough space in the buffer for XDR encode to succeed. otherwise, - * we might process an operation with side effects, and be unable to - * tell the client that the operation succeeded. - * - * COMPOUND_SLACK_SPACE - this is the minimum bytes of buffer space - * needed to encode an "ordinary" _successful_ operation. (GETATTR, - * READ, READDIR, and READLINK have their own buffer checks.) if we - * fall below this level, we fail the next operation with NFS4ERR_RESOURCE. - * - * COMPOUND_ERR_SLACK_SPACE - this is the minimum bytes of buffer space - * needed to encode an operation which has failed with NFS4ERR_RESOURCE. - * care is taken to ensure that we never fall below this level for any - * reason. - */ -#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */ -#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */ - -#define NFSD_LEASE_TIME (nfs4_lease_time()) -#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */ - -/* - * The following attributes are currently not supported by the NFSv4 server: - * ARCHIVE (deprecated anyway) - * HIDDEN (unlikely to be supported any time soon) - * MIMETYPE (unlikely to be supported any time soon) - * QUOTA_* (will be supported in a forthcoming patch) - * SYSTEM (unlikely to be supported any time soon) - * TIME_BACKUP (unlikely to be supported any time soon) - * TIME_CREATE (unlikely to be supported any time soon) - */ -#define NFSD4_SUPPORTED_ATTRS_WORD0 \ -(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ - | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ - | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ - | FATTR4_WORD0_UNIQUE_HANDLES | FATTR4_WORD0_LEASE_TIME | FATTR4_WORD0_RDATTR_ERROR \ - | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_CANSETTIME | FATTR4_WORD0_CASE_INSENSITIVE \ - | FATTR4_WORD0_CASE_PRESERVING | FATTR4_WORD0_CHOWN_RESTRICTED \ - | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \ - | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_HOMOGENEOUS \ - | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ - | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) - -#define NFSD4_SUPPORTED_ATTRS_WORD1 \ -(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ - | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ - | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ - | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_ACCESS_SET \ - | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ - | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) - -#define NFSD4_SUPPORTED_ATTRS_WORD2 0 - -#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \ - NFSD4_SUPPORTED_ATTRS_WORD0 - -#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \ - NFSD4_SUPPORTED_ATTRS_WORD1 - -#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \ - (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) - -static inline u32 nfsd_suppattrs0(u32 minorversion) -{ - return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 - : NFSD4_SUPPORTED_ATTRS_WORD0; -} - -static inline u32 nfsd_suppattrs1(u32 minorversion) -{ - return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1 - : NFSD4_SUPPORTED_ATTRS_WORD1; -} - -static inline u32 nfsd_suppattrs2(u32 minorversion) -{ - return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2 - : NFSD4_SUPPORTED_ATTRS_WORD2; -} - -/* These will return ERR_INVAL if specified in GETATTR or READDIR. */ -#define NFSD_WRITEONLY_ATTRS_WORD1 \ -(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) - -/* These are the only attrs allowed in CREATE/OPEN/SETATTR. */ -#define NFSD_WRITEABLE_ATTRS_WORD0 \ -(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL ) -#define NFSD_WRITEABLE_ATTRS_WORD1 \ -(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ - | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) -#define NFSD_WRITEABLE_ATTRS_WORD2 0 - -#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \ - NFSD_WRITEABLE_ATTRS_WORD0 -/* - * we currently store the exclusive create verifier in the v_{a,m}time - * attributes so the client can't set these at create time using EXCLUSIVE4_1 - */ -#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \ - (NFSD_WRITEABLE_ATTRS_WORD1 & \ - ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)) -#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \ - NFSD_WRITEABLE_ATTRS_WORD2 - -#endif /* CONFIG_NFSD_V4 */ - -#endif /* LINUX_NFSD_NFSD_H */ diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h deleted file mode 100644 index 2af75686e0d3..000000000000 --- a/include/linux/nfsd/state.h +++ /dev/null @@ -1,409 +0,0 @@ -/* - * linux/include/nfsd/state.h - * - * Copyright (c) 2001 The Regents of the University of Michigan. - * All rights reserved. - * - * Kendrick Smith - * Andy Adamson - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef _NFSD4_STATE_H -#define _NFSD4_STATE_H - -#include - -typedef struct { - u32 cl_boot; - u32 cl_id; -} clientid_t; - -typedef struct { - u32 so_boot; - u32 so_stateownerid; - u32 so_fileid; -} stateid_opaque_t; - -typedef struct { - u32 si_generation; - stateid_opaque_t si_opaque; -} stateid_t; -#define si_boot si_opaque.so_boot -#define si_stateownerid si_opaque.so_stateownerid -#define si_fileid si_opaque.so_fileid - -#define STATEID_FMT "(%08x/%08x/%08x/%08x)" -#define STATEID_VAL(s) \ - (s)->si_boot, \ - (s)->si_stateownerid, \ - (s)->si_fileid, \ - (s)->si_generation - -struct nfsd4_cb_sequence { - /* args/res */ - u32 cbs_minorversion; - struct nfs4_client *cbs_clp; -}; - -struct nfs4_delegation { - struct list_head dl_perfile; - struct list_head dl_perclnt; - struct list_head dl_recall_lru; /* delegation recalled */ - atomic_t dl_count; /* ref count */ - struct nfs4_client *dl_client; - struct nfs4_file *dl_file; - struct file_lock *dl_flock; - struct file *dl_vfs_file; - u32 dl_type; - time_t dl_time; -/* For recall: */ - u32 dl_ident; - stateid_t dl_stateid; - struct knfsd_fh dl_fh; - int dl_retries; -}; - -/* client delegation callback info */ -struct nfs4_cb_conn { - /* SETCLIENTID info */ - struct sockaddr_storage cb_addr; - size_t cb_addrlen; - u32 cb_prog; - u32 cb_minorversion; - u32 cb_ident; /* minorversion 0 only */ - /* RPC client info */ - atomic_t cb_set; /* successful CB_NULL call */ - struct rpc_clnt * cb_client; -}; - -/* Maximum number of slots per session. 160 is useful for long haul TCP */ -#define NFSD_MAX_SLOTS_PER_SESSION 160 -/* Maximum number of operations per session compound */ -#define NFSD_MAX_OPS_PER_COMPOUND 16 -/* Maximum session per slot cache size */ -#define NFSD_SLOT_CACHE_SIZE 1024 -/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ -#define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 -#define NFSD_MAX_MEM_PER_SESSION \ - (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) - -struct nfsd4_slot { - bool sl_inuse; - bool sl_cachethis; - u16 sl_opcnt; - u32 sl_seqid; - __be32 sl_status; - u32 sl_datalen; - char sl_data[]; -}; - -struct nfsd4_channel_attrs { - u32 headerpadsz; - u32 maxreq_sz; - u32 maxresp_sz; - u32 maxresp_cached; - u32 maxops; - u32 maxreqs; - u32 nr_rdma_attrs; - u32 rdma_attrs; -}; - -struct nfsd4_create_session { - clientid_t clientid; - struct nfs4_sessionid sessionid; - u32 seqid; - u32 flags; - struct nfsd4_channel_attrs fore_channel; - struct nfsd4_channel_attrs back_channel; - u32 callback_prog; - u32 uid; - u32 gid; -}; - -/* The single slot clientid cache structure */ -struct nfsd4_clid_slot { - u32 sl_seqid; - __be32 sl_status; - struct nfsd4_create_session sl_cr_ses; -}; - -struct nfsd4_session { - struct kref se_ref; - struct list_head se_hash; /* hash by sessionid */ - struct list_head se_perclnt; - u32 se_flags; - struct nfs4_client *se_client; /* for expire_client */ - struct nfs4_sessionid se_sessionid; - struct nfsd4_channel_attrs se_fchannel; - struct nfsd4_channel_attrs se_bchannel; - struct nfsd4_slot *se_slots[]; /* forward channel slots */ -}; - -static inline void -nfsd4_put_session(struct nfsd4_session *ses) -{ - extern void free_session(struct kref *kref); - kref_put(&ses->se_ref, free_session); -} - -static inline void -nfsd4_get_session(struct nfsd4_session *ses) -{ - kref_get(&ses->se_ref); -} - -/* formatted contents of nfs4_sessionid */ -struct nfsd4_sessionid { - clientid_t clientid; - u32 sequence; - u32 reserved; -}; - -#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ - -/* - * struct nfs4_client - one per client. Clientids live here. - * o Each nfs4_client is hashed by clientid. - * - * o Each nfs4_clients is also hashed by name - * (the opaque quantity initially sent by the client to identify itself). - * - * o cl_perclient list is used to ensure no dangling stateowner references - * when we expire the nfs4_client - */ -struct nfs4_client { - struct list_head cl_idhash; /* hash by cl_clientid.id */ - struct list_head cl_strhash; /* hash by cl_name */ - struct list_head cl_openowners; - struct list_head cl_delegations; - struct list_head cl_lru; /* tail queue */ - struct xdr_netobj cl_name; /* id generated by client */ - char cl_recdir[HEXDIR_LEN]; /* recovery dir */ - nfs4_verifier cl_verifier; /* generated by client */ - time_t cl_time; /* time of last lease renewal */ - struct sockaddr_storage cl_addr; /* client ipaddress */ - u32 cl_flavor; /* setclientid pseudoflavor */ - char *cl_principal; /* setclientid principal name */ - struct svc_cred cl_cred; /* setclientid principal */ - clientid_t cl_clientid; /* generated by server */ - nfs4_verifier cl_confirm; /* generated by server */ - struct nfs4_cb_conn cl_cb_conn; /* callback info */ - atomic_t cl_count; /* ref count */ - u32 cl_firststate; /* recovery dir creation */ - - /* for nfs41 */ - struct list_head cl_sessions; - struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ - u32 cl_exchange_flags; - struct nfs4_sessionid cl_sessionid; - - /* for nfs41 callbacks */ - /* We currently support a single back channel with a single slot */ - unsigned long cl_cb_slot_busy; - u32 cl_cb_seq_nr; - struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ - struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ - /* wait here for slots */ -}; - -/* struct nfs4_client_reset - * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl - * upon lease reset, or from upcall to state_daemon (to read in state - * from non-volitile storage) upon reboot. - */ -struct nfs4_client_reclaim { - struct list_head cr_strhash; /* hash by cr_name */ - char cr_recdir[HEXDIR_LEN]; /* recover dir */ -}; - -static inline void -update_stateid(stateid_t *stateid) -{ - stateid->si_generation++; -} - -/* A reasonable value for REPLAY_ISIZE was estimated as follows: - * The OPEN response, typically the largest, requires - * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) + - * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) + - * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes - */ - -#define NFSD4_REPLAY_ISIZE 112 - -/* - * Replay buffer, where the result of the last seqid-mutating operation - * is cached. - */ -struct nfs4_replay { - __be32 rp_status; - unsigned int rp_buflen; - char *rp_buf; - unsigned intrp_allocated; - struct knfsd_fh rp_openfh; - char rp_ibuf[NFSD4_REPLAY_ISIZE]; -}; - -/* -* nfs4_stateowner can either be an open_owner, or a lock_owner -* -* so_idhash: stateid_hashtbl[] for open owner, lockstateid_hashtbl[] -* for lock_owner -* so_strhash: ownerstr_hashtbl[] for open_owner, lock_ownerstr_hashtbl[] -* for lock_owner -* so_perclient: nfs4_client->cl_perclient entry - used when nfs4_client -* struct is reaped. -* so_perfilestate: heads the list of nfs4_stateid (either open or lock) -* and is used to ensure no dangling nfs4_stateid references when we -* release a stateowner. -* so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when -* close is called to reap associated byte-range locks -* so_close_lru: (open) stateowner is placed on this list instead of being -* reaped (when so_perfilestate is empty) to hold the last close replay. -* reaped by laundramat thread after lease period. -*/ -struct nfs4_stateowner { - struct kref so_ref; - struct list_head so_idhash; /* hash by so_id */ - struct list_head so_strhash; /* hash by op_name */ - struct list_head so_perclient; - struct list_head so_stateids; - struct list_head so_perstateid; /* for lockowners only */ - struct list_head so_close_lru; /* tail queue */ - time_t so_time; /* time of placement on so_close_lru */ - int so_is_open_owner; /* 1=openowner,0=lockowner */ - u32 so_id; - struct nfs4_client * so_client; - /* after increment in ENCODE_SEQID_OP_TAIL, represents the next - * sequence id expected from the client: */ - u32 so_seqid; - struct xdr_netobj so_owner; /* open owner name */ - int so_confirmed; /* successful OPEN_CONFIRM? */ - struct nfs4_replay so_replay; -}; - -/* -* nfs4_file: a file opened by some number of (open) nfs4_stateowners. -* o fi_perfile list is used to search for conflicting -* share_acces, share_deny on the file. -*/ -struct nfs4_file { - atomic_t fi_ref; - struct list_head fi_hash; /* hash by "struct inode *" */ - struct list_head fi_stateids; - struct list_head fi_delegations; - struct inode *fi_inode; - u32 fi_id; /* used with stateowner->so_id - * for stateid_hashtbl hash */ - bool fi_had_conflict; -}; - -/* -* nfs4_stateid can either be an open stateid or (eventually) a lock stateid -* -* (open)nfs4_stateid: one per (open)nfs4_stateowner, nfs4_file -* -* st_hash: stateid_hashtbl[] entry or lockstateid_hashtbl entry -* st_perfile: file_hashtbl[] entry. -* st_perfile_state: nfs4_stateowner->so_perfilestate -* st_perlockowner: (open stateid) list of lock nfs4_stateowners -* st_access_bmap: used only for open stateid -* st_deny_bmap: used only for open stateid -* st_openstp: open stateid lock stateid was derived from -* -* XXX: open stateids and lock stateids have diverged sufficiently that -* we should consider defining separate structs for the two cases. -*/ - -struct nfs4_stateid { - struct list_head st_hash; - struct list_head st_perfile; - struct list_head st_perstateowner; - struct list_head st_lockowners; - struct nfs4_stateowner * st_stateowner; - struct nfs4_file * st_file; - stateid_t st_stateid; - struct file * st_vfs_file; - unsigned long st_access_bmap; - unsigned long st_deny_bmap; - struct nfs4_stateid * st_openstp; -}; - -/* flags for preprocess_seqid_op() */ -#define HAS_SESSION 0x00000001 -#define CONFIRM 0x00000002 -#define OPEN_STATE 0x00000004 -#define LOCK_STATE 0x00000008 -#define RD_STATE 0x00000010 -#define WR_STATE 0x00000020 -#define CLOSE_STATE 0x00000040 - -#define seqid_mutating_err(err) \ - (((err) != nfserr_stale_clientid) && \ - ((err) != nfserr_bad_seqid) && \ - ((err) != nfserr_stale_stateid) && \ - ((err) != nfserr_bad_stateid)) - -struct nfsd4_compound_state; - -extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, - stateid_t *stateid, int flags, struct file **filp); -extern void nfs4_lock_state(void); -extern void nfs4_unlock_state(void); -extern int nfs4_in_grace(void); -extern __be32 nfs4_check_open_reclaim(clientid_t *clid); -extern void put_nfs4_client(struct nfs4_client *clp); -extern void nfs4_free_stateowner(struct kref *kref); -extern int set_callback_cred(void); -extern void nfsd4_probe_callback(struct nfs4_client *clp); -extern void nfsd4_cb_recall(struct nfs4_delegation *dp); -extern void nfs4_put_delegation(struct nfs4_delegation *dp); -extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); -extern void nfsd4_init_recdir(char *recdir_name); -extern int nfsd4_recdir_load(void); -extern void nfsd4_shutdown_recdir(void); -extern int nfs4_client_to_reclaim(const char *name); -extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); -extern void nfsd4_recdir_purge_old(void); -extern int nfsd4_create_clid_dir(struct nfs4_client *clp); -extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); - -static inline void -nfs4_put_stateowner(struct nfs4_stateowner *so) -{ - kref_put(&so->so_ref, nfs4_free_stateowner); -} - -static inline void -nfs4_get_stateowner(struct nfs4_stateowner *so) -{ - kref_get(&so->so_ref); -} - -#endif /* NFSD4_STATE_H */ diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h deleted file mode 100644 index 58f824d854c2..000000000000 --- a/include/linux/nfsd/xdr.h +++ /dev/null @@ -1,176 +0,0 @@ -/* - * linux/include/linux/nfsd/xdr.h - * - * XDR types for nfsd. This is mainly a typing exercise. - */ - -#ifndef LINUX_NFSD_H -#define LINUX_NFSD_H - -#include -#include - -struct nfsd_fhandle { - struct svc_fh fh; -}; - -struct nfsd_sattrargs { - struct svc_fh fh; - struct iattr attrs; -}; - -struct nfsd_diropargs { - struct svc_fh fh; - char * name; - unsigned int len; -}; - -struct nfsd_readargs { - struct svc_fh fh; - __u32 offset; - __u32 count; - int vlen; -}; - -struct nfsd_writeargs { - svc_fh fh; - __u32 offset; - int len; - int vlen; -}; - -struct nfsd_createargs { - struct svc_fh fh; - char * name; - unsigned int len; - struct iattr attrs; -}; - -struct nfsd_renameargs { - struct svc_fh ffh; - char * fname; - unsigned int flen; - struct svc_fh tfh; - char * tname; - unsigned int tlen; -}; - -struct nfsd_readlinkargs { - struct svc_fh fh; - char * buffer; -}; - -struct nfsd_linkargs { - struct svc_fh ffh; - struct svc_fh tfh; - char * tname; - unsigned int tlen; -}; - -struct nfsd_symlinkargs { - struct svc_fh ffh; - char * fname; - unsigned int flen; - char * tname; - unsigned int tlen; - struct iattr attrs; -}; - -struct nfsd_readdirargs { - struct svc_fh fh; - __u32 cookie; - __u32 count; - __be32 * buffer; -}; - -struct nfsd_attrstat { - struct svc_fh fh; - struct kstat stat; -}; - -struct nfsd_diropres { - struct svc_fh fh; - struct kstat stat; -}; - -struct nfsd_readlinkres { - int len; -}; - -struct nfsd_readres { - struct svc_fh fh; - unsigned long count; - struct kstat stat; -}; - -struct nfsd_readdirres { - int count; - - struct readdir_cd common; - __be32 * buffer; - int buflen; - __be32 * offset; -}; - -struct nfsd_statfsres { - struct kstatfs stats; -}; - -/* - * Storage requirements for XDR arguments and results. - */ -union nfsd_xdrstore { - struct nfsd_sattrargs sattr; - struct nfsd_diropargs dirop; - struct nfsd_readargs read; - struct nfsd_writeargs write; - struct nfsd_createargs create; - struct nfsd_renameargs rename; - struct nfsd_linkargs link; - struct nfsd_symlinkargs symlink; - struct nfsd_readdirargs readdir; -}; - -#define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore) - - -int nfssvc_decode_void(struct svc_rqst *, __be32 *, void *); -int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); -int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *, - struct nfsd_sattrargs *); -int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *, - struct nfsd_diropargs *); -int nfssvc_decode_readargs(struct svc_rqst *, __be32 *, - struct nfsd_readargs *); -int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *, - struct nfsd_writeargs *); -int nfssvc_decode_createargs(struct svc_rqst *, __be32 *, - struct nfsd_createargs *); -int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *, - struct nfsd_renameargs *); -int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *, - struct nfsd_readlinkargs *); -int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *, - struct nfsd_linkargs *); -int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *, - struct nfsd_symlinkargs *); -int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *, - struct nfsd_readdirargs *); -int nfssvc_encode_void(struct svc_rqst *, __be32 *, void *); -int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *, struct nfsd_attrstat *); -int nfssvc_encode_diropres(struct svc_rqst *, __be32 *, struct nfsd_diropres *); -int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *, struct nfsd_readlinkres *); -int nfssvc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd_readres *); -int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *, struct nfsd_statfsres *); -int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *, struct nfsd_readdirres *); - -int nfssvc_encode_entry(void *, const char *name, - int namlen, loff_t offset, u64 ino, unsigned int); - -int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); - -/* Helper functions for NFSv2 ACL code */ -__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp); -__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp); - -#endif /* LINUX_NFSD_H */ diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h deleted file mode 100644 index 421eddd65a25..000000000000 --- a/include/linux/nfsd/xdr3.h +++ /dev/null @@ -1,346 +0,0 @@ -/* - * linux/include/linux/nfsd/xdr3.h - * - * XDR types for NFSv3 in nfsd. - * - * Copyright (C) 1996-1998, Olaf Kirch - */ - -#ifndef _LINUX_NFSD_XDR3_H -#define _LINUX_NFSD_XDR3_H - -#include - -struct nfsd3_sattrargs { - struct svc_fh fh; - struct iattr attrs; - int check_guard; - time_t guardtime; -}; - -struct nfsd3_diropargs { - struct svc_fh fh; - char * name; - unsigned int len; -}; - -struct nfsd3_accessargs { - struct svc_fh fh; - unsigned int access; -}; - -struct nfsd3_readargs { - struct svc_fh fh; - __u64 offset; - __u32 count; - int vlen; -}; - -struct nfsd3_writeargs { - svc_fh fh; - __u64 offset; - __u32 count; - int stable; - __u32 len; - int vlen; -}; - -struct nfsd3_createargs { - struct svc_fh fh; - char * name; - unsigned int len; - int createmode; - struct iattr attrs; - __be32 * verf; -}; - -struct nfsd3_mknodargs { - struct svc_fh fh; - char * name; - unsigned int len; - __u32 ftype; - __u32 major, minor; - struct iattr attrs; -}; - -struct nfsd3_renameargs { - struct svc_fh ffh; - char * fname; - unsigned int flen; - struct svc_fh tfh; - char * tname; - unsigned int tlen; -}; - -struct nfsd3_readlinkargs { - struct svc_fh fh; - char * buffer; -}; - -struct nfsd3_linkargs { - struct svc_fh ffh; - struct svc_fh tfh; - char * tname; - unsigned int tlen; -}; - -struct nfsd3_symlinkargs { - struct svc_fh ffh; - char * fname; - unsigned int flen; - char * tname; - unsigned int tlen; - struct iattr attrs; -}; - -struct nfsd3_readdirargs { - struct svc_fh fh; - __u64 cookie; - __u32 dircount; - __u32 count; - __be32 * verf; - __be32 * buffer; -}; - -struct nfsd3_commitargs { - struct svc_fh fh; - __u64 offset; - __u32 count; -}; - -struct nfsd3_getaclargs { - struct svc_fh fh; - int mask; -}; - -struct posix_acl; -struct nfsd3_setaclargs { - struct svc_fh fh; - int mask; - struct posix_acl *acl_access; - struct posix_acl *acl_default; -}; - -struct nfsd3_attrstat { - __be32 status; - struct svc_fh fh; - struct kstat stat; -}; - -/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */ -struct nfsd3_diropres { - __be32 status; - struct svc_fh dirfh; - struct svc_fh fh; -}; - -struct nfsd3_accessres { - __be32 status; - struct svc_fh fh; - __u32 access; -}; - -struct nfsd3_readlinkres { - __be32 status; - struct svc_fh fh; - __u32 len; -}; - -struct nfsd3_readres { - __be32 status; - struct svc_fh fh; - unsigned long count; - int eof; -}; - -struct nfsd3_writeres { - __be32 status; - struct svc_fh fh; - unsigned long count; - int committed; -}; - -struct nfsd3_renameres { - __be32 status; - struct svc_fh ffh; - struct svc_fh tfh; -}; - -struct nfsd3_linkres { - __be32 status; - struct svc_fh tfh; - struct svc_fh fh; -}; - -struct nfsd3_readdirres { - __be32 status; - struct svc_fh fh; - int count; - __be32 verf[2]; - - struct readdir_cd common; - __be32 * buffer; - int buflen; - __be32 * offset; - __be32 * offset1; - struct svc_rqst * rqstp; - -}; - -struct nfsd3_fsstatres { - __be32 status; - struct kstatfs stats; - __u32 invarsec; -}; - -struct nfsd3_fsinfores { - __be32 status; - __u32 f_rtmax; - __u32 f_rtpref; - __u32 f_rtmult; - __u32 f_wtmax; - __u32 f_wtpref; - __u32 f_wtmult; - __u32 f_dtpref; - __u64 f_maxfilesize; - __u32 f_properties; -}; - -struct nfsd3_pathconfres { - __be32 status; - __u32 p_link_max; - __u32 p_name_max; - __u32 p_no_trunc; - __u32 p_chown_restricted; - __u32 p_case_insensitive; - __u32 p_case_preserving; -}; - -struct nfsd3_commitres { - __be32 status; - struct svc_fh fh; -}; - -struct nfsd3_getaclres { - __be32 status; - struct svc_fh fh; - int mask; - struct posix_acl *acl_access; - struct posix_acl *acl_default; -}; - -/* dummy type for release */ -struct nfsd3_fhandle_pair { - __u32 dummy; - struct svc_fh fh1; - struct svc_fh fh2; -}; - -/* - * Storage requirements for XDR arguments and results. - */ -union nfsd3_xdrstore { - struct nfsd3_sattrargs sattrargs; - struct nfsd3_diropargs diropargs; - struct nfsd3_readargs readargs; - struct nfsd3_writeargs writeargs; - struct nfsd3_createargs createargs; - struct nfsd3_renameargs renameargs; - struct nfsd3_linkargs linkargs; - struct nfsd3_symlinkargs symlinkargs; - struct nfsd3_readdirargs readdirargs; - struct nfsd3_diropres diropres; - struct nfsd3_accessres accessres; - struct nfsd3_readlinkres readlinkres; - struct nfsd3_readres readres; - struct nfsd3_writeres writeres; - struct nfsd3_renameres renameres; - struct nfsd3_linkres linkres; - struct nfsd3_readdirres readdirres; - struct nfsd3_fsstatres fsstatres; - struct nfsd3_fsinfores fsinfores; - struct nfsd3_pathconfres pathconfres; - struct nfsd3_commitres commitres; - struct nfsd3_getaclres getaclres; -}; - -#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore) - -int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); -int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *, - struct nfsd3_sattrargs *); -int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *, - struct nfsd3_diropargs *); -int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *, - struct nfsd3_accessargs *); -int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *, - struct nfsd3_readargs *); -int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *, - struct nfsd3_writeargs *); -int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *, - struct nfsd3_createargs *); -int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *, - struct nfsd3_createargs *); -int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *, - struct nfsd3_mknodargs *); -int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *, - struct nfsd3_renameargs *); -int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *, - struct nfsd3_readlinkargs *); -int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *, - struct nfsd3_linkargs *); -int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *, - struct nfsd3_symlinkargs *); -int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *, - struct nfsd3_readdirargs *); -int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *, - struct nfsd3_readdirargs *); -int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *, - struct nfsd3_commitargs *); -int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *, void *); -int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *, - struct nfsd3_attrstat *); -int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *, - struct nfsd3_attrstat *); -int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *, - struct nfsd3_diropres *); -int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *, - struct nfsd3_accessres *); -int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *, - struct nfsd3_readlinkres *); -int nfs3svc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd3_readres *); -int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *, struct nfsd3_writeres *); -int nfs3svc_encode_createres(struct svc_rqst *, __be32 *, - struct nfsd3_diropres *); -int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *, - struct nfsd3_renameres *); -int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *, - struct nfsd3_linkres *); -int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *, - struct nfsd3_readdirres *); -int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *, - struct nfsd3_fsstatres *); -int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *, - struct nfsd3_fsinfores *); -int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *, - struct nfsd3_pathconfres *); -int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *, - struct nfsd3_commitres *); - -int nfs3svc_release_fhandle(struct svc_rqst *, __be32 *, - struct nfsd3_attrstat *); -int nfs3svc_release_fhandle2(struct svc_rqst *, __be32 *, - struct nfsd3_fhandle_pair *); -int nfs3svc_encode_entry(void *, const char *name, - int namlen, loff_t offset, u64 ino, - unsigned int); -int nfs3svc_encode_entry_plus(void *, const char *name, - int namlen, loff_t offset, u64 ino, - unsigned int); -/* Helper functions for NFSv3 ACL code */ -__be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, - struct svc_fh *fhp); -__be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp); - - -#endif /* _LINUX_NFSD_XDR3_H */ diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h deleted file mode 100644 index 1bf266239c7e..000000000000 --- a/include/linux/nfsd/xdr4.h +++ /dev/null @@ -1,564 +0,0 @@ -/* - * include/linux/nfsd/xdr4.h - * - * Server-side types for NFSv4. - * - * Copyright (c) 2002 The Regents of the University of Michigan. - * All rights reserved. - * - * Kendrick Smith - * Andy Adamson - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef _LINUX_NFSD_XDR4_H -#define _LINUX_NFSD_XDR4_H - -#include -#include - -#define NFSD4_MAX_TAGLEN 128 -#define XDR_LEN(n) (((n) + 3) & ~3) - -struct nfsd4_compound_state { - struct svc_fh current_fh; - struct svc_fh save_fh; - struct nfs4_stateowner *replay_owner; - /* For sessions DRC */ - struct nfsd4_session *session; - struct nfsd4_slot *slot; - __be32 *datap; - size_t iovlen; - u32 minorversion; - u32 status; -}; - -static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) -{ - return cs->slot != NULL; -} - -struct nfsd4_change_info { - u32 atomic; - bool change_supported; - u32 before_ctime_sec; - u32 before_ctime_nsec; - u64 before_change; - u32 after_ctime_sec; - u32 after_ctime_nsec; - u64 after_change; -}; - -struct nfsd4_access { - u32 ac_req_access; /* request */ - u32 ac_supported; /* response */ - u32 ac_resp_access; /* response */ -}; - -struct nfsd4_close { - u32 cl_seqid; /* request */ - stateid_t cl_stateid; /* request+response */ - struct nfs4_stateowner * cl_stateowner; /* response */ -}; - -struct nfsd4_commit { - u64 co_offset; /* request */ - u32 co_count; /* request */ - nfs4_verifier co_verf; /* response */ -}; - -struct nfsd4_create { - u32 cr_namelen; /* request */ - char * cr_name; /* request */ - u32 cr_type; /* request */ - union { /* request */ - struct { - u32 namelen; - char *name; - } link; /* NF4LNK */ - struct { - u32 specdata1; - u32 specdata2; - } dev; /* NF4BLK, NF4CHR */ - } u; - u32 cr_bmval[3]; /* request */ - struct iattr cr_iattr; /* request */ - struct nfsd4_change_info cr_cinfo; /* response */ - struct nfs4_acl *cr_acl; -}; -#define cr_linklen u.link.namelen -#define cr_linkname u.link.name -#define cr_specdata1 u.dev.specdata1 -#define cr_specdata2 u.dev.specdata2 - -struct nfsd4_delegreturn { - stateid_t dr_stateid; -}; - -struct nfsd4_getattr { - u32 ga_bmval[3]; /* request */ - struct svc_fh *ga_fhp; /* response */ -}; - -struct nfsd4_link { - u32 li_namelen; /* request */ - char * li_name; /* request */ - struct nfsd4_change_info li_cinfo; /* response */ -}; - -struct nfsd4_lock_denied { - clientid_t ld_clientid; - struct nfs4_stateowner *ld_sop; - u64 ld_start; - u64 ld_length; - u32 ld_type; -}; - -struct nfsd4_lock { - /* request */ - u32 lk_type; - u32 lk_reclaim; /* boolean */ - u64 lk_offset; - u64 lk_length; - u32 lk_is_new; - union { - struct { - u32 open_seqid; - stateid_t open_stateid; - u32 lock_seqid; - clientid_t clientid; - struct xdr_netobj owner; - } new; - struct { - stateid_t lock_stateid; - u32 lock_seqid; - } old; - } v; - - /* response */ - union { - struct { - stateid_t stateid; - } ok; - struct nfsd4_lock_denied denied; - } u; - /* The lk_replay_owner is the open owner in the open_to_lock_owner - * case and the lock owner otherwise: */ - struct nfs4_stateowner *lk_replay_owner; -}; -#define lk_new_open_seqid v.new.open_seqid -#define lk_new_open_stateid v.new.open_stateid -#define lk_new_lock_seqid v.new.lock_seqid -#define lk_new_clientid v.new.clientid -#define lk_new_owner v.new.owner -#define lk_old_lock_stateid v.old.lock_stateid -#define lk_old_lock_seqid v.old.lock_seqid - -#define lk_rflags u.ok.rflags -#define lk_resp_stateid u.ok.stateid -#define lk_denied u.denied - - -struct nfsd4_lockt { - u32 lt_type; - clientid_t lt_clientid; - struct xdr_netobj lt_owner; - u64 lt_offset; - u64 lt_length; - struct nfs4_stateowner * lt_stateowner; - struct nfsd4_lock_denied lt_denied; -}; - - -struct nfsd4_locku { - u32 lu_type; - u32 lu_seqid; - stateid_t lu_stateid; - u64 lu_offset; - u64 lu_length; - struct nfs4_stateowner *lu_stateowner; -}; - - -struct nfsd4_lookup { - u32 lo_len; /* request */ - char * lo_name; /* request */ -}; - -struct nfsd4_putfh { - u32 pf_fhlen; /* request */ - char *pf_fhval; /* request */ -}; - -struct nfsd4_open { - u32 op_claim_type; /* request */ - struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */ - u32 op_delegate_type; /* request - CLAIM_PREV only */ - stateid_t op_delegate_stateid; /* request - response */ - u32 op_create; /* request */ - u32 op_createmode; /* request */ - u32 op_bmval[3]; /* request */ - struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ - nfs4_verifier verf; /* EXCLUSIVE4 */ - clientid_t op_clientid; /* request */ - struct xdr_netobj op_owner; /* request */ - u32 op_seqid; /* request */ - u32 op_share_access; /* request */ - u32 op_share_deny; /* request */ - stateid_t op_stateid; /* response */ - u32 op_recall; /* recall */ - struct nfsd4_change_info op_cinfo; /* response */ - u32 op_rflags; /* response */ - int op_truncate; /* used during processing */ - struct nfs4_stateowner *op_stateowner; /* used during processing */ - struct nfs4_acl *op_acl; -}; -#define op_iattr iattr -#define op_verf verf - -struct nfsd4_open_confirm { - stateid_t oc_req_stateid /* request */; - u32 oc_seqid /* request */; - stateid_t oc_resp_stateid /* response */; - struct nfs4_stateowner * oc_stateowner; /* response */ -}; - -struct nfsd4_open_downgrade { - stateid_t od_stateid; - u32 od_seqid; - u32 od_share_access; - u32 od_share_deny; - struct nfs4_stateowner *od_stateowner; -}; - - -struct nfsd4_read { - stateid_t rd_stateid; /* request */ - u64 rd_offset; /* request */ - u32 rd_length; /* request */ - int rd_vlen; - struct file *rd_filp; - - struct svc_rqst *rd_rqstp; /* response */ - struct svc_fh * rd_fhp; /* response */ -}; - -struct nfsd4_readdir { - u64 rd_cookie; /* request */ - nfs4_verifier rd_verf; /* request */ - u32 rd_dircount; /* request */ - u32 rd_maxcount; /* request */ - u32 rd_bmval[3]; /* request */ - struct svc_rqst *rd_rqstp; /* response */ - struct svc_fh * rd_fhp; /* response */ - - struct readdir_cd common; - __be32 * buffer; - int buflen; - __be32 * offset; -}; - -struct nfsd4_release_lockowner { - clientid_t rl_clientid; - struct xdr_netobj rl_owner; -}; -struct nfsd4_readlink { - struct svc_rqst *rl_rqstp; /* request */ - struct svc_fh * rl_fhp; /* request */ -}; - -struct nfsd4_remove { - u32 rm_namelen; /* request */ - char * rm_name; /* request */ - struct nfsd4_change_info rm_cinfo; /* response */ -}; - -struct nfsd4_rename { - u32 rn_snamelen; /* request */ - char * rn_sname; /* request */ - u32 rn_tnamelen; /* request */ - char * rn_tname; /* request */ - struct nfsd4_change_info rn_sinfo; /* response */ - struct nfsd4_change_info rn_tinfo; /* response */ -}; - -struct nfsd4_secinfo { - u32 si_namelen; /* request */ - char *si_name; /* request */ - struct svc_export *si_exp; /* response */ -}; - -struct nfsd4_setattr { - stateid_t sa_stateid; /* request */ - u32 sa_bmval[3]; /* request */ - struct iattr sa_iattr; /* request */ - struct nfs4_acl *sa_acl; -}; - -struct nfsd4_setclientid { - nfs4_verifier se_verf; /* request */ - u32 se_namelen; /* request */ - char * se_name; /* request */ - u32 se_callback_prog; /* request */ - u32 se_callback_netid_len; /* request */ - char * se_callback_netid_val; /* request */ - u32 se_callback_addr_len; /* request */ - char * se_callback_addr_val; /* request */ - u32 se_callback_ident; /* request */ - clientid_t se_clientid; /* response */ - nfs4_verifier se_confirm; /* response */ -}; - -struct nfsd4_setclientid_confirm { - clientid_t sc_clientid; - nfs4_verifier sc_confirm; -}; - -/* also used for NVERIFY */ -struct nfsd4_verify { - u32 ve_bmval[3]; /* request */ - u32 ve_attrlen; /* request */ - char * ve_attrval; /* request */ -}; - -struct nfsd4_write { - stateid_t wr_stateid; /* request */ - u64 wr_offset; /* request */ - u32 wr_stable_how; /* request */ - u32 wr_buflen; /* request */ - int wr_vlen; - - u32 wr_bytes_written; /* response */ - u32 wr_how_written; /* response */ - nfs4_verifier wr_verifier; /* response */ -}; - -struct nfsd4_exchange_id { - nfs4_verifier verifier; - struct xdr_netobj clname; - u32 flags; - clientid_t clientid; - u32 seqid; - int spa_how; -}; - -struct nfsd4_sequence { - struct nfs4_sessionid sessionid; /* request/response */ - u32 seqid; /* request/response */ - u32 slotid; /* request/response */ - u32 maxslots; /* request/response */ - u32 cachethis; /* request */ -#if 0 - u32 target_maxslots; /* response */ - u32 status_flags; /* response */ -#endif /* not yet */ -}; - -struct nfsd4_destroy_session { - struct nfs4_sessionid sessionid; -}; - -struct nfsd4_op { - int opnum; - __be32 status; - union { - struct nfsd4_access access; - struct nfsd4_close close; - struct nfsd4_commit commit; - struct nfsd4_create create; - struct nfsd4_delegreturn delegreturn; - struct nfsd4_getattr getattr; - struct svc_fh * getfh; - struct nfsd4_link link; - struct nfsd4_lock lock; - struct nfsd4_lockt lockt; - struct nfsd4_locku locku; - struct nfsd4_lookup lookup; - struct nfsd4_verify nverify; - struct nfsd4_open open; - struct nfsd4_open_confirm open_confirm; - struct nfsd4_open_downgrade open_downgrade; - struct nfsd4_putfh putfh; - struct nfsd4_read read; - struct nfsd4_readdir readdir; - struct nfsd4_readlink readlink; - struct nfsd4_remove remove; - struct nfsd4_rename rename; - clientid_t renew; - struct nfsd4_secinfo secinfo; - struct nfsd4_setattr setattr; - struct nfsd4_setclientid setclientid; - struct nfsd4_setclientid_confirm setclientid_confirm; - struct nfsd4_verify verify; - struct nfsd4_write write; - struct nfsd4_release_lockowner release_lockowner; - - /* NFSv4.1 */ - struct nfsd4_exchange_id exchange_id; - struct nfsd4_create_session create_session; - struct nfsd4_destroy_session destroy_session; - struct nfsd4_sequence sequence; - } u; - struct nfs4_replay * replay; -}; - -struct nfsd4_compoundargs { - /* scratch variables for XDR decode */ - __be32 * p; - __be32 * end; - struct page ** pagelist; - int pagelen; - __be32 tmp[8]; - __be32 * tmpp; - struct tmpbuf { - struct tmpbuf *next; - void (*release)(const void *); - void *buf; - } *to_free; - - struct svc_rqst *rqstp; - - u32 taglen; - char * tag; - u32 minorversion; - u32 opcnt; - struct nfsd4_op *ops; - struct nfsd4_op iops[8]; -}; - -struct nfsd4_compoundres { - /* scratch variables for XDR encode */ - __be32 * p; - __be32 * end; - struct xdr_buf * xbuf; - struct svc_rqst * rqstp; - - u32 taglen; - char * tag; - u32 opcnt; - __be32 * tagp; /* tag, opcount encode location */ - struct nfsd4_compound_state cstate; -}; - -static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) -{ - struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; - return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; -} - -static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) -{ - return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); -} - -#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) - -static inline void -set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) -{ - BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved); - cinfo->atomic = 1; - cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); - if (cinfo->change_supported) { - cinfo->before_change = fhp->fh_pre_change; - cinfo->after_change = fhp->fh_post_change; - } else { - cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; - cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; - cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; - cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; - } -} - -int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *); -int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *, - struct nfsd4_compoundargs *); -int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *, - struct nfsd4_compoundres *); -void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *); -void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op); -__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, - struct dentry *dentry, __be32 *buffer, int *countp, - u32 *bmval, struct svc_rqst *, int ignore_crossmnt); -extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, - struct nfsd4_setclientid *setclid); -extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, - struct nfsd4_setclientid_confirm *setclientid_confirm); -extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); -extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, - struct nfsd4_sequence *seq); -extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, -struct nfsd4_exchange_id *); - extern __be32 nfsd4_create_session(struct svc_rqst *, - struct nfsd4_compound_state *, - struct nfsd4_create_session *); -extern __be32 nfsd4_sequence(struct svc_rqst *, - struct nfsd4_compound_state *, - struct nfsd4_sequence *); -extern __be32 nfsd4_destroy_session(struct svc_rqst *, - struct nfsd4_compound_state *, - struct nfsd4_destroy_session *); -extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, - struct nfsd4_open *open); -extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, - struct svc_fh *current_fh, struct nfsd4_open *open); -extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc); -extern __be32 nfsd4_close(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, - struct nfsd4_close *close); -extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, - struct nfsd4_open_downgrade *od); -extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *, - struct nfsd4_lock *lock); -extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, - struct nfsd4_lockt *lockt); -extern __be32 nfsd4_locku(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, - struct nfsd4_locku *locku); -extern __be32 -nfsd4_release_lockowner(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, - struct nfsd4_release_lockowner *rlockowner); -extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *); -extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr); -extern __be32 nfsd4_renew(struct svc_rqst *rqstp, - struct nfsd4_compound_state *, clientid_t *clid); -#endif - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ -- cgit v1.2.3 From e8e8753f7a32ce4f636771126fc8eba0dc4ad817 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 14 Dec 2009 12:53:32 -0500 Subject: nfsd: new interface to advertise export features Soon we will add the new V4ROOT flag, and allow the INSECURE flag to vary by pseudoflavor. It would be useful for nfs-utils (for example, for improved exportfs error reporting) to be able to know when this happens. Use this new interface for that purpose. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfsctl.c | 21 +++++++++++++++++++++ include/linux/nfsd/export.h | 3 ++- 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 0415680d3f58..e7051ac4dc73 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -31,6 +31,7 @@ enum { NFSD_Getfd, NFSD_Getfs, NFSD_List, + NFSD_Export_features, NFSD_Fh, NFSD_FO_UnlockIP, NFSD_FO_UnlockFS, @@ -149,6 +150,24 @@ static const struct file_operations exports_operations = { .owner = THIS_MODULE, }; +static int export_features_show(struct seq_file *m, void *v) +{ + seq_printf(m, "0x%x 0x%x\n", NFSEXP_ALLFLAGS, NFSEXP_SECINFO_FLAGS); + return 0; +} + +static int export_features_open(struct inode *inode, struct file *file) +{ + return single_open(file, export_features_show, NULL); +} + +static struct file_operations export_features_operations = { + .open = export_features_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); @@ -1306,6 +1325,8 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent) [NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_List] = {"exports", &exports_operations, S_IRUGO}, + [NFSD_Export_features] = {"export_features", + &export_features_operations, S_IRUGO}, [NFSD_FO_UnlockIP] = {"unlock_ip", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_FO_UnlockFS] = {"unlock_filesystem", diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index ef3d416fcf67..4f1df1d7312c 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -39,7 +39,8 @@ #define NFSEXP_FSID 0x2000 #define NFSEXP_CROSSMOUNT 0x4000 #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ -#define NFSEXP_ALLFLAGS 0xFE3F +/* All flags that we claim to support. (Note we don't support NOACL.) */ +#define NFSEXP_ALLFLAGS 0x7E3F /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ -- cgit v1.2.3 From 12045a6ee9908b38b6d286530c7d816e39071346 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 8 Dec 2009 18:15:52 -0500 Subject: nfsd: let "insecure" flag vary by pseudoflavor This was an oversight; it should be among the export flags that can be allowed to vary by pseudoflavor. This allows an administrator to (for example) allow auth_sys mounts only from low ports, but allow auth_krb5 mounts to use any port. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfsfh.c | 4 +++- include/linux/nfsd/export.h | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 0eb1c59f5ab8..951938d6c495 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -88,8 +88,10 @@ nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int type) static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp, struct svc_export *exp) { + int flags = nfsexp_flags(rqstp, exp); + /* Check if the request originated from a secure port. */ - if (!rqstp->rq_secure && EX_SECURE(exp)) { + if (!rqstp->rq_secure && (flags & NFSEXP_INSECURE_PORT)) { RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); dprintk(KERN_WARNING "nfsd: request from insecure port %s!\n", diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 4f1df1d7312c..4cafbe1255f0 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -44,7 +44,8 @@ /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ - | NFSEXP_ALLSQUASH) + | NFSEXP_ALLSQUASH \ + | NFSEXP_INSECURE_PORT) #ifdef __KERNEL__ @@ -109,7 +110,6 @@ struct svc_expkey { struct path ek_path; }; -#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) #define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) -- cgit v1.2.3 From 9e1b9b80721661bd63b3662453767b22cd614fe7 Mon Sep 17 00:00:00 2001 From: Alan Jenkins Date: Sat, 7 Nov 2009 21:03:54 +0000 Subject: module: make MODULE_SYMBOL_PREFIX into a CONFIG option The next commit will require the use of MODULE_SYMBOL_PREFIX in .tmp_exports-asm.S. Currently it is mixed in with C structure definitions in "asm/module.h". Move the definition of this arch option into Kconfig, so it can be easily accessed by any code. This also lets modpost.c use the same definition. Previously modpost relied on a hardcoded list of architectures in mk_elfconfig.c. A build test for blackfin, one of the two MODULE_SYMBOL_PREFIX archs, showed the generated code was unchanged. vmlinux was identical save for build ids, and an apparently randomized suffix on a single "__key" symbol in the kallsyms data). Signed-off-by: Alan Jenkins Acked-by: Mike Frysinger (blackfin) CC: Sam Ravnborg Signed-off-by: Rusty Russell --- arch/blackfin/Kconfig | 4 ++++ arch/blackfin/include/asm/module.h | 2 -- arch/blackfin/kernel/vmlinux.lds.S | 2 -- arch/h8300/Kconfig | 4 ++++ arch/h8300/include/asm/module.h | 2 -- arch/h8300/kernel/vmlinux.lds.S | 1 - include/asm-generic/vmlinux.lds.h | 8 ++++++-- include/linux/module.h | 6 ++++-- scripts/Makefile.lib | 5 +++++ scripts/mod/Makefile | 2 +- scripts/mod/mk_elfconfig.c | 9 --------- scripts/mod/modpost.c | 9 +++++++++ 12 files changed, 33 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index ae6a60f10120..2180433213b7 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -5,6 +5,10 @@ mainmenu "Blackfin Kernel Configuration" +config SYMBOL_PREFIX + string + default "_" + config MMU def_bool n diff --git a/arch/blackfin/include/asm/module.h b/arch/blackfin/include/asm/module.h index 9c1cfffddd9b..4282b169ead9 100644 --- a/arch/blackfin/include/asm/module.h +++ b/arch/blackfin/include/asm/module.h @@ -7,8 +7,6 @@ #ifndef _ASM_BFIN_MODULE_H #define _ASM_BFIN_MODULE_H -#define MODULE_SYMBOL_PREFIX "_" - #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define Elf_Ehdr Elf32_Ehdr diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 10e12539000e..f39707c6590d 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -4,8 +4,6 @@ * Licensed under the GPL-2 or later */ -#define VMLINUX_SYMBOL(_sym_) _##_sym_ - #include #include #include diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 9420648352b8..53cc669e6d59 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -10,6 +10,10 @@ config H8300 default y select HAVE_IDE +config SYMBOL_PREFIX + string + default "_" + config MMU bool default n diff --git a/arch/h8300/include/asm/module.h b/arch/h8300/include/asm/module.h index de23231f3196..8e46724b7c09 100644 --- a/arch/h8300/include/asm/module.h +++ b/arch/h8300/include/asm/module.h @@ -8,6 +8,4 @@ struct mod_arch_specific { }; #define Elf_Sym Elf32_Sym #define Elf_Ehdr Elf32_Ehdr -#define MODULE_SYMBOL_PREFIX "_" - #endif /* _ASM_H8/300_MODULE_H */ diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index b9e24907e6ea..03d356d96e5d 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -1,4 +1,3 @@ -#define VMLINUX_SYMBOL(_sym_) _##_sym_ #include #include diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index b6e818f4b247..67e652068e0e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -52,8 +52,12 @@ #define LOAD_OFFSET 0 #endif -#ifndef VMLINUX_SYMBOL -#define VMLINUX_SYMBOL(_sym_) _sym_ +#ifndef SYMBOL_PREFIX +#define VMLINUX_SYMBOL(sym) sym +#else +#define PASTE2(x,y) x##y +#define PASTE(x,y) PASTE2(x,y) +#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym) #endif /* Align . to a 8 byte boundary equals to maximum function alignment. */ diff --git a/include/linux/module.h b/include/linux/module.h index 482efc865acf..6cb1a3cab5d3 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -25,8 +25,10 @@ /* Not Yet Implemented */ #define MODULE_SUPPORTED_DEVICE(name) -/* some toolchains uses a `_' prefix for all user symbols */ -#ifndef MODULE_SYMBOL_PREFIX +/* Some toolchains use a `_' prefix for all user symbols. */ +#ifdef CONFIG_SYMBOL_PREFIX +#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX +#else #define MODULE_SYMBOL_PREFIX "" #endif diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index ffdafb26f539..224d85e72ef1 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -127,6 +127,11 @@ _c_flags += $(if $(patsubst n%,, \ $(CFLAGS_GCOV)) endif +ifdef CONFIG_SYMBOL_PREFIX +_cpp_flags += -DSYMBOL_PREFIX=$(patsubst "%",%,$(CONFIG_SYMBOL_PREFIX)) +endif + + # If building the kernel in a separate objtree expand all occurrences # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/'). diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile index 11d69c35e5b4..ff954f8168c1 100644 --- a/scripts/mod/Makefile +++ b/scripts/mod/Makefile @@ -8,7 +8,7 @@ modpost-objs := modpost.o file2alias.o sumversion.o $(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h quiet_cmd_elfconfig = MKELF $@ - cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@ + cmd_elfconfig = $(obj)/mk_elfconfig < $< > $@ $(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE $(call if_changed,elfconfig) diff --git a/scripts/mod/mk_elfconfig.c b/scripts/mod/mk_elfconfig.c index 6a96d47bd1e6..639bca7ba559 100644 --- a/scripts/mod/mk_elfconfig.c +++ b/scripts/mod/mk_elfconfig.c @@ -9,9 +9,6 @@ main(int argc, char **argv) unsigned char ei[EI_NIDENT]; union { short s; char c[2]; } endian_test; - if (argc != 2) { - fprintf(stderr, "Error: no arch\n"); - } if (fread(ei, 1, EI_NIDENT, stdin) != EI_NIDENT) { fprintf(stderr, "Error: input truncated\n"); return 1; @@ -55,12 +52,6 @@ main(int argc, char **argv) else exit(1); - if ((strcmp(argv[1], "h8300") == 0) - || (strcmp(argv[1], "blackfin") == 0)) - printf("#define MODULE_SYMBOL_PREFIX \"_\"\n"); - else - printf("#define MODULE_SYMBOL_PREFIX \"\"\n"); - return 0; } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 801a16a17545..fb0f9b711af3 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -15,8 +15,17 @@ #include #include #include "modpost.h" +#include "../../include/linux/autoconf.h" #include "../../include/linux/license.h" +/* Some toolchains use a `_' prefix for all user symbols. */ +#ifdef CONFIG_SYMBOL_PREFIX +#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX +#else +#define MODULE_SYMBOL_PREFIX "" +#endif + + /* Are we using CONFIG_MODVERSIONS? */ int modversions = 0; /* Warn about undefined symbols? (do so if we have vmlinux) */ -- cgit v1.2.3 From e36c54582c6f14adc9e10473e2aec2cc4f0acc03 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 14 Dec 2009 15:58:33 -0500 Subject: tracing: Fix return of trace_dump_stack() The trace_dump_stack() returned a value for a void function. Also, added the missing stub for trace_dump_stack() when tracing is not configured. Reported-by: Ingo Molnar LKML-Reference: <20091214162713.GA31060@elte.hu> Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- include/linux/kernel.h | 1 + kernel/trace/trace.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5ad4199fb073..f1dc752da0d2 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -527,6 +527,7 @@ trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); static inline void tracing_start(void) { } static inline void tracing_stop(void) { } static inline void ftrace_off_permanent(void) { } +static inline void trace_dump_stack(void) { } static inline int trace_printk(const char *fmt, ...) { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index bd7b969a729a..ee61915935d5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1158,7 +1158,7 @@ void trace_dump_stack(void) unsigned long flags; if (tracing_disabled || tracing_selftest_running) - return 0; + return; local_save_flags(flags); -- cgit v1.2.3 From 0b5ccb2ee250136dd7385b1c7da28417d0d4d32d Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 15 Dec 2009 16:59:18 +0100 Subject: ipv6: reassembly: use seperate reassembly queues for conntrack and local delivery Currently the same reassembly queue might be used for packets reassembled by conntrack in different positions in the stack (PREROUTING/LOCAL_OUT), as well as local delivery. This can cause "packet jumps" when the fragment completing a reassembled packet is queued from a different position in the stack than the previous ones. Add a "user" identifier to the reassembly queue key to seperate the queues of each caller, similar to what we do for IPv4. Signed-off-by: Patrick McHardy --- include/net/ipv6.h | 7 +++++++ include/net/netfilter/ipv6/nf_conntrack_ipv6.h | 2 +- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 13 +++++++++++-- net/ipv6/netfilter/nf_conntrack_reasm.c | 7 ++++--- net/ipv6/reassembly.c | 5 ++++- 5 files changed, 27 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 92db8617d188..d6916035bcea 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -350,8 +350,15 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1, struct inet_frag_queue; +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER, + IP6_DEFRAG_CONNTRACK_IN, + IP6_DEFRAG_CONNTRACK_OUT, +}; + struct ip6_create_arg { __be32 id; + u32 user; struct in6_addr *src; struct in6_addr *dst; }; diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index abc55ad75c2b..1ee717eb5b09 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; extern int nf_ct_frag6_init(void); extern void nf_ct_frag6_cleanup(void); -extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb); +extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, struct net_device *in, struct net_device *out, diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 5f2ec208a8c3..c0a82fe78321 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -187,6 +187,16 @@ out: return nf_conntrack_confirm(skb); } +static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, + struct sk_buff *skb) +{ + if (hooknum == NF_INET_PRE_ROUTING) + return IP6_DEFRAG_CONNTRACK_IN; + else + return IP6_DEFRAG_CONNTRACK_OUT; + +} + static unsigned int ipv6_defrag(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, @@ -199,8 +209,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum, if (skb->nfct) return NF_ACCEPT; - reasm = nf_ct_frag6_gather(skb); - + reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); /* queued */ if (reasm == NULL) return NF_STOLEN; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index e0b9424fa1b2..312c20adc83f 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -168,13 +168,14 @@ out: /* Creation primitives. */ static __inline__ struct nf_ct_frag6_queue * -fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) +fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) { struct inet_frag_queue *q; struct ip6_create_arg arg; unsigned int hash; arg.id = id; + arg.user = user; arg.src = src; arg.dst = dst; @@ -559,7 +560,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) return 0; } -struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) +struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) { struct sk_buff *clone; struct net_device *dev = skb->dev; @@ -605,7 +606,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) nf_ct_frag6_evictor(); - fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); + fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); goto ret_orig; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 4d98549a6868..3b3a95607125 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -72,6 +72,7 @@ struct frag_queue struct inet_frag_queue q; __be32 id; /* fragment id */ + u32 user; struct in6_addr saddr; struct in6_addr daddr; @@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) struct ip6_create_arg *arg = a; fq = container_of(q, struct frag_queue, q); - return (fq->id == arg->id && + return (fq->id == arg->id && fq->user == arg->user && ipv6_addr_equal(&fq->saddr, arg->src) && ipv6_addr_equal(&fq->daddr, arg->dst)); } @@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) struct ip6_create_arg *arg = a; fq->id = arg->id; + fq->user = arg->user; ipv6_addr_copy(&fq->saddr, arg->src); ipv6_addr_copy(&fq->daddr, arg->dst); } @@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, unsigned int hash; arg.id = id; + arg.user = IP6_DEFRAG_LOCAL_DELIVER; arg.src = src; arg.dst = dst; -- cgit v1.2.3 From 8fa9ff6849bb86c59cc2ea9faadf3cb2d5223497 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 15 Dec 2009 16:59:59 +0100 Subject: netfilter: fix crashes in bridge netfilter caused by fragment jumps When fragments from bridge netfilter are passed to IPv4 or IPv6 conntrack and a reassembly queue with the same fragment key already exists from reassembling a similar packet received on a different device (f.i. with multicasted fragments), the reassembled packet might continue on a different codepath than where the head fragment originated. This can cause crashes in bridge netfilter when a fragment received on a non-bridge device (and thus with skb->nf_bridge == NULL) continues through the bridge netfilter code. Add a new reassembly identifier for packets originating from bridge netfilter and use it to put those packets in insolated queues. Fixes http://bugzilla.kernel.org/show_bug.cgi?id=14805 Reported-and-Tested-by: Chong Qiao Signed-off-by: Patrick McHardy --- include/net/ip.h | 1 + include/net/ipv6.h | 1 + net/ipv4/netfilter/nf_defrag_ipv4.c | 21 +++++++++++++++++---- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 6 ++++++ 4 files changed, 25 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/ip.h b/include/net/ip.h index e6b9d12d5f62..85108cfbb1ae 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -337,6 +337,7 @@ enum ip_defrag_users { IP_DEFRAG_CALL_RA_CHAIN, IP_DEFRAG_CONNTRACK_IN, IP_DEFRAG_CONNTRACK_OUT, + IP_DEFRAG_CONNTRACK_BRIDGE_IN, IP_DEFRAG_VS_IN, IP_DEFRAG_VS_OUT, IP_DEFRAG_VS_FWD diff --git a/include/net/ipv6.h b/include/net/ipv6.h index d6916035bcea..ccab5946c830 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -354,6 +354,7 @@ enum ip6_defrag_users { IP6_DEFRAG_LOCAL_DELIVER, IP6_DEFRAG_CONNTRACK_IN, IP6_DEFRAG_CONNTRACK_OUT, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN, }; struct ip6_create_arg { diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index fa2d6b6fc3e5..331ead3ebd1b 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) return err; } +static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, + struct sk_buff *skb) +{ +#ifdef CONFIG_BRIDGE_NETFILTER + if (skb->nf_bridge && + skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) + return IP_DEFRAG_CONNTRACK_BRIDGE_IN; +#endif + if (hooknum == NF_INET_PRE_ROUTING) + return IP_DEFRAG_CONNTRACK_IN; + else + return IP_DEFRAG_CONNTRACK_OUT; +} + static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, @@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, #endif /* Gather fragments. */ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { - if (nf_ct_ipv4_gather_frags(skb, - hooknum == NF_INET_PRE_ROUTING ? - IP_DEFRAG_CONNTRACK_IN : - IP_DEFRAG_CONNTRACK_OUT)) + enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb); + if (nf_ct_ipv4_gather_frags(skb, user)) return NF_STOLEN; } return NF_ACCEPT; diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index c0a82fe78321..0956ebabbff2 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -190,6 +191,11 @@ out: static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, struct sk_buff *skb) { +#ifdef CONFIG_BRIDGE_NETFILTER + if (skb->nf_bridge && + skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) + return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; +#endif if (hooknum == NF_INET_PRE_ROUTING) return IP6_DEFRAG_CONNTRACK_IN; else -- cgit v1.2.3 From 4e7b8a6cef64a4c1f1194f9926f794c2b75ebdd7 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 14 Dec 2009 17:58:13 -0800 Subject: nodemask: make NODEMASK_ALLOC more general This is a series of patches to provide control over the location of the allocation and freeing of persistent huge pages on a NUMA platform. Please consider for merging into mmotm. This series uses two mechanisms to constrain the nodes from which persistent huge pages are allocated: 1) the task NUMA mempolicy of the task modifying a new sysctl "nr_hugepages_mempolicy", based on a suggestion by Mel Gorman; and 2) a subset of the hugepages hstate sysfs attributes have been added [in V4] to each node system device under: /sys/devices/node/node[0-9]*/hugepages The per node attibutes allow direct assignment of a huge page count on a specific node, regardless of the task's mempolicy or cpuset constraints. This patch: NODEMASK_ALLOC(x, m) assumes x is a type of struct, which is unnecessary. It's perfectly reasonable to use this macro to allocate a nodemask_t, which is anonymous, either dynamically or on the stack depending on NODES_SHIFT. Signed-off-by: David Rientjes Signed-off-by: Lee Schermerhorn Acked-by: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: Andi Kleen Cc: David Rientjes Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/nodemask.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index b359c4a9ec9e..ca9b489a27f8 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -481,14 +481,14 @@ static inline int num_node_state(enum node_states state) /* * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) + * NODEMASK_ALLOC(x, m) allocates an object of type 'x' with the name 'm'. */ - #if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ -#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL) -#define NODEMASK_FREE(m) kfree(m) +#define NODEMASK_ALLOC(x, m) x *m = kmalloc(sizeof(*m), GFP_KERNEL) +#define NODEMASK_FREE(m) kfree(m) #else -#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m -#define NODEMASK_FREE(m) +#define NODEMASK_ALLOC(x, m) x _m, *m = &_m +#define NODEMASK_FREE(m) do {} while (0) #endif /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ @@ -497,8 +497,9 @@ struct nodemask_scratch { nodemask_t mask2; }; -#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x) -#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) +#define NODEMASK_SCRATCH(x) \ + NODEMASK_ALLOC(struct nodemask_scratch, x) +#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) #endif /* __LINUX_NODEMASK_H */ -- cgit v1.2.3 From c1e6c8d074ea3621106548654cc244d2edc12ead Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:17 -0800 Subject: hugetlb: factor init_nodemask_of_node() Factor init_nodemask_of_node() out of the nodemask_of_node() macro. This will be used to populate the huge pages "nodes_allowed" nodemask for a single node when basing nodes_allowed on a preferred/local mempolicy or when a persistent huge page pool page count is modified via a per node sysfs attribute. Signed-off-by: Lee Schermerhorn Acked-by: Mel Gorman Reviewed-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Randy Dunlap Cc: Nishanth Aravamudan Acked-by: David Rientjes Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/nodemask.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index ca9b489a27f8..cbd521a03127 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp) return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } +static inline void init_nodemask_of_node(nodemask_t *mask, int node) +{ + nodes_clear(*mask); + node_set(node, *mask); +} + #define nodemask_of_node(node) \ ({ \ typeof(_unused_nodemask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ - m.bits[0] = 1UL<<(node); \ + m.bits[0] = 1UL << (node); \ } else { \ - nodes_clear(m); \ - node_set((node), m); \ + init_nodemask_of_node(&m, (node)); \ } \ m; \ }) -- cgit v1.2.3 From 06808b0827e1cd14eedc96bac2655d5b37ac246c Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:21 -0800 Subject: hugetlb: derive huge pages nodes allowed from task mempolicy This patch derives a "nodes_allowed" node mask from the numa mempolicy of the task modifying the number of persistent huge pages to control the allocation, freeing and adjusting of surplus huge pages when the pool page count is modified via the new sysctl or sysfs attribute "nr_hugepages_mempolicy". The nodes_allowed mask is derived as follows: * For "default" [NULL] task mempolicy, a NULL nodemask_t pointer is produced. This will cause the hugetlb subsystem to use node_online_map as the "nodes_allowed". This preserves the behavior before this patch. * For "preferred" mempolicy, including explicit local allocation, a nodemask with the single preferred node will be produced. "local" policy will NOT track any internode migrations of the task adjusting nr_hugepages. * For "bind" and "interleave" policy, the mempolicy's nodemask will be used. * Other than to inform the construction of the nodes_allowed node mask, the actual mempolicy mode is ignored. That is, all modes behave like interleave over the resulting nodes_allowed mask with no "fallback". See the updated documentation [next patch] for more information about the implications of this patch. Examples: Starting with: Node 0 HugePages_Total: 0 Node 1 HugePages_Total: 0 Node 2 HugePages_Total: 0 Node 3 HugePages_Total: 0 Default behavior [with or without this patch] balances persistent hugepage allocation across nodes [with sufficient contiguous memory]: sysctl vm.nr_hugepages[_mempolicy]=32 yields: Node 0 HugePages_Total: 8 Node 1 HugePages_Total: 8 Node 2 HugePages_Total: 8 Node 3 HugePages_Total: 8 Of course, we only have nr_hugepages_mempolicy with the patch, but with default mempolicy, nr_hugepages_mempolicy behaves the same as nr_hugepages. Applying mempolicy--e.g., with numactl [using '-m' a.k.a. '--membind' because it allows multiple nodes to be specified and it's easy to type]--we can allocate huge pages on individual nodes or sets of nodes. So, starting from the condition above, with 8 huge pages per node, add 8 more to node 2 using: numactl -m 2 sysctl vm.nr_hugepages_mempolicy=40 This yields: Node 0 HugePages_Total: 8 Node 1 HugePages_Total: 8 Node 2 HugePages_Total: 16 Node 3 HugePages_Total: 8 The incremental 8 huge pages were restricted to node 2 by the specified mempolicy. Similarly, we can use mempolicy to free persistent huge pages from specified nodes: numactl -m 0,1 sysctl vm.nr_hugepages_mempolicy=32 yields: Node 0 HugePages_Total: 4 Node 1 HugePages_Total: 4 Node 2 HugePages_Total: 16 Node 3 HugePages_Total: 8 The 8 huge pages freed were balanced over nodes 0 and 1. [rientjes@google.com: accomodate reworked NODEMASK_ALLOC] Signed-off-by: David Rientjes Signed-off-by: Lee Schermerhorn Acked-by: Mel Gorman Reviewed-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 6 +++ include/linux/mempolicy.h | 3 ++ kernel/sysctl.c | 15 +++++++- mm/hugetlb.c | 97 ++++++++++++++++++++++++++++++++++++++++------- mm/mempolicy.c | 47 +++++++++++++++++++++++ 5 files changed, 153 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 41a59afc70fa..78b4bc64c006 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); + +#ifdef CONFIG_NUMA +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +#endif + int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 085c903fe0f1..1cc966cd3e5f 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p); extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); +extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern unsigned slab_node(struct mempolicy *policy); extern enum zone_type policy_zone; @@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, return node_zonelist(0, gfp_flags); } +static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; } + static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 554ac4894f0f..60fc93131095 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1051,7 +1051,7 @@ static struct ctl_table vm_table[] = { .extra2 = &one_hundred, }, #ifdef CONFIG_HUGETLB_PAGE - { + { .procname = "nr_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), @@ -1059,7 +1059,18 @@ static struct ctl_table vm_table[] = { .proc_handler = hugetlb_sysctl_handler, .extra1 = (void *)&hugetlb_zero, .extra2 = (void *)&hugetlb_infinity, - }, + }, +#ifdef CONFIG_NUMA + { + .procname = "nr_hugepages_mempolicy", + .data = NULL, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = &hugetlb_mempolicy_sysctl_handler, + .extra1 = (void *)&hugetlb_zero, + .extra2 = (void *)&hugetlb_infinity, + }, +#endif { .procname = "hugetlb_shm_group", .data = &sysctl_hugetlb_shm_group, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 324d1abae876..1125d818ea06 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1330,29 +1330,71 @@ static struct hstate *kobj_to_hstate(struct kobject *kobj) return NULL; } -static ssize_t nr_hugepages_show(struct kobject *kobj, +static ssize_t nr_hugepages_show_common(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj); return sprintf(buf, "%lu\n", h->nr_huge_pages); } -static ssize_t nr_hugepages_store(struct kobject *kobj, - struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t nr_hugepages_store_common(bool obey_mempolicy, + struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t len) { int err; - unsigned long input; + unsigned long count; struct hstate *h = kobj_to_hstate(kobj); + NODEMASK_ALLOC(nodemask_t, nodes_allowed); - err = strict_strtoul(buf, 10, &input); + err = strict_strtoul(buf, 10, &count); if (err) return 0; - h->max_huge_pages = set_max_huge_pages(h, input, &node_online_map); + if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) { + NODEMASK_FREE(nodes_allowed); + nodes_allowed = &node_online_map; + } + h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); - return count; + if (nodes_allowed != &node_online_map) + NODEMASK_FREE(nodes_allowed); + + return len; +} + +static ssize_t nr_hugepages_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return nr_hugepages_show_common(kobj, attr, buf); +} + +static ssize_t nr_hugepages_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t len) +{ + return nr_hugepages_store_common(false, kobj, attr, buf, len); } HSTATE_ATTR(nr_hugepages); +#ifdef CONFIG_NUMA + +/* + * hstate attribute for optionally mempolicy-based constraint on persistent + * huge page alloc/free. + */ +static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return nr_hugepages_show_common(kobj, attr, buf); +} + +static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t len) +{ + return nr_hugepages_store_common(true, kobj, attr, buf, len); +} +HSTATE_ATTR(nr_hugepages_mempolicy); +#endif + + static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -1408,6 +1450,9 @@ static struct attribute *hstate_attrs[] = { &free_hugepages_attr.attr, &resv_hugepages_attr.attr, &surplus_hugepages_attr.attr, +#ifdef CONFIG_NUMA + &nr_hugepages_mempolicy_attr.attr, +#endif NULL, }; @@ -1574,9 +1619,9 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL -int hugetlb_sysctl_handler(struct ctl_table *table, int write, - void __user *buffer, - size_t *length, loff_t *ppos) +static int hugetlb_sysctl_handler_common(bool obey_mempolicy, + struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp; @@ -1588,13 +1633,39 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write, table->maxlen = sizeof(unsigned long); proc_doulongvec_minmax(table, write, buffer, length, ppos); - if (write) - h->max_huge_pages = set_max_huge_pages(h, tmp, - &node_online_map); + if (write) { + NODEMASK_ALLOC(nodemask_t, nodes_allowed); + if (!(obey_mempolicy && + init_nodemask_of_mempolicy(nodes_allowed))) { + NODEMASK_FREE(nodes_allowed); + nodes_allowed = &node_states[N_HIGH_MEMORY]; + } + h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); + + if (nodes_allowed != &node_states[N_HIGH_MEMORY]) + NODEMASK_FREE(nodes_allowed); + } return 0; } +int hugetlb_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + + return hugetlb_sysctl_handler_common(false, table, write, + buffer, length, ppos); +} + +#ifdef CONFIG_NUMA +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + return hugetlb_sysctl_handler_common(true, table, write, + buffer, length, ppos); +} +#endif /* CONFIG_NUMA */ + int hugetlb_treat_movable_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0f89eabbaf3e..f11fdad06204 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1568,6 +1568,53 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, } return zl; } + +/* + * init_nodemask_of_mempolicy + * + * If the current task's mempolicy is "default" [NULL], return 'false' + * to indicate default policy. Otherwise, extract the policy nodemask + * for 'bind' or 'interleave' policy into the argument nodemask, or + * initialize the argument nodemask to contain the single node for + * 'preferred' or 'local' policy and return 'true' to indicate presence + * of non-default mempolicy. + * + * We don't bother with reference counting the mempolicy [mpol_get/put] + * because the current task is examining it's own mempolicy and a task's + * mempolicy is only ever changed by the task itself. + * + * N.B., it is the caller's responsibility to free a returned nodemask. + */ +bool init_nodemask_of_mempolicy(nodemask_t *mask) +{ + struct mempolicy *mempolicy; + int nid; + + if (!(mask && current->mempolicy)) + return false; + + mempolicy = current->mempolicy; + switch (mempolicy->mode) { + case MPOL_PREFERRED: + if (mempolicy->flags & MPOL_F_LOCAL) + nid = numa_node_id(); + else + nid = mempolicy->v.preferred_node; + init_nodemask_of_node(mask, nid); + break; + + case MPOL_BIND: + /* Fall through */ + case MPOL_INTERLEAVE: + *mask = mempolicy->v.nodes; + break; + + default: + BUG(); + } + + return true; +} #endif /* Allocate a page in interleaved policy. -- cgit v1.2.3 From 4e25b2576efda24c02e2d6b9bcb5965a3f865f33 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:23 -0800 Subject: hugetlb: add generic definition of NUMA_NO_NODE Move definition of NUMA_NO_NODE from ia64 and x86_64 arch specific headers to generic header 'linux/numa.h' for use in generic code. NUMA_NO_NODE replaces bare '-1' where it's used in this series to indicate "no node id specified". Ultimately, it can be used to replace the -1 elsewhere where it is used similarly. Signed-off-by: Lee Schermerhorn Acked-by: David Rientjes Acked-by: Mel Gorman Reviewed-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/numa.h | 2 -- arch/x86/include/asm/topology.h | 9 +++++++-- include/linux/numa.h | 2 ++ 3 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 3499ff57bf42..6a8a27cfae3e 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h @@ -22,8 +22,6 @@ #include -#define NUMA_NO_NODE -1 - extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; extern pg_data_t *pgdat_list[MAX_NUMNODES]; diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 40e37b10c6c0..c5087d796587 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -35,11 +35,16 @@ # endif #endif -/* Node not present */ -#define NUMA_NO_NODE (-1) +/* + * to preserve the visibility of NUMA_NO_NODE definition, + * moved to there from here. May be used independent of + * CONFIG_NUMA. + */ +#include #ifdef CONFIG_NUMA #include + #include #ifdef CONFIG_X86_32 diff --git a/include/linux/numa.h b/include/linux/numa.h index a31a7301b159..3aaa31603a86 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -10,4 +10,6 @@ #define MAX_NUMNODES (1 << NODES_SHIFT) +#define NUMA_NO_NODE (-1) + #endif /* _LINUX_NUMA_H */ -- cgit v1.2.3 From 9a30523066cde73c1442b76224bb540de9f9b0b0 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:25 -0800 Subject: hugetlb: add per node hstate attributes Add the per huge page size control/query attributes to the per node sysdevs: /sys/devices/system/node/node/hugepages/hugepages-/ nr_hugepages - r/w free_huge_pages - r/o surplus_huge_pages - r/o The patch attempts to re-use/share as much of the existing global hstate attribute initialization and handling, and the "nodes_allowed" constraint processing as possible. Calling set_max_huge_pages() with no node indicates a change to global hstate parameters. In this case, any non-default task mempolicy will be used to generate the nodes_allowed mask. A valid node id indicates an update to that node's hstate parameters, and the count argument specifies the target count for the specified node. From this info, we compute the target global count for the hstate and construct a nodes_allowed node mask contain only the specified node. Setting the node specific nr_hugepages via the per node attribute effectively ignores any task mempolicy or cpuset constraints. With this patch: (me):ls /sys/devices/system/node/node0/hugepages/hugepages-2048kB ./ ../ free_hugepages nr_hugepages surplus_hugepages Starting from: Node 0 HugePages_Total: 0 Node 0 HugePages_Free: 0 Node 0 HugePages_Surp: 0 Node 1 HugePages_Total: 0 Node 1 HugePages_Free: 0 Node 1 HugePages_Surp: 0 Node 2 HugePages_Total: 0 Node 2 HugePages_Free: 0 Node 2 HugePages_Surp: 0 Node 3 HugePages_Total: 0 Node 3 HugePages_Free: 0 Node 3 HugePages_Surp: 0 vm.nr_hugepages = 0 Allocate 16 persistent huge pages on node 2: (me):echo 16 >/sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages [Note that this is equivalent to: numactl -m 2 hugeadmin --pool-pages-min 2M:+16 ] Yields: Node 0 HugePages_Total: 0 Node 0 HugePages_Free: 0 Node 0 HugePages_Surp: 0 Node 1 HugePages_Total: 0 Node 1 HugePages_Free: 0 Node 1 HugePages_Surp: 0 Node 2 HugePages_Total: 16 Node 2 HugePages_Free: 16 Node 2 HugePages_Surp: 0 Node 3 HugePages_Total: 0 Node 3 HugePages_Free: 0 Node 3 HugePages_Surp: 0 vm.nr_hugepages = 16 Global controls work as expected--reduce pool to 8 persistent huge pages: (me):echo 8 >/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages Node 0 HugePages_Total: 0 Node 0 HugePages_Free: 0 Node 0 HugePages_Surp: 0 Node 1 HugePages_Total: 0 Node 1 HugePages_Free: 0 Node 1 HugePages_Surp: 0 Node 2 HugePages_Total: 8 Node 2 HugePages_Free: 8 Node 2 HugePages_Surp: 0 Node 3 HugePages_Total: 0 Node 3 HugePages_Free: 0 Node 3 HugePages_Surp: 0 Signed-off-by: Lee Schermerhorn Acked-by: Mel Gorman Reviewed-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: David Rientjes Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 39 ++++++++ include/linux/node.h | 11 +++ mm/hugetlb.c | 274 ++++++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 298 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/base/node.c b/drivers/base/node.c index 1fe5536d404f..f502711d28db 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -173,6 +173,43 @@ static ssize_t node_read_distance(struct sys_device * dev, } static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); +#ifdef CONFIG_HUGETLBFS +/* + * hugetlbfs per node attributes registration interface: + * When/if hugetlb[fs] subsystem initializes [sometime after this module], + * it will register its per node attributes for all nodes online at that + * time. It will also call register_hugetlbfs_with_node(), below, to + * register its attribute registration functions with this node driver. + * Once these hooks have been initialized, the node driver will call into + * the hugetlb module to [un]register attributes for hot-plugged nodes. + */ +static node_registration_func_t __hugetlb_register_node; +static node_registration_func_t __hugetlb_unregister_node; + +static inline void hugetlb_register_node(struct node *node) +{ + if (__hugetlb_register_node) + __hugetlb_register_node(node); +} + +static inline void hugetlb_unregister_node(struct node *node) +{ + if (__hugetlb_unregister_node) + __hugetlb_unregister_node(node); +} + +void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister) +{ + __hugetlb_register_node = doregister; + __hugetlb_unregister_node = unregister; +} +#else +static inline void hugetlb_register_node(struct node *node) {} + +static inline void hugetlb_unregister_node(struct node *node) {} +#endif + /* * register_node - Setup a sysfs device for a node. @@ -196,6 +233,7 @@ int register_node(struct node *node, int num, struct node *parent) sysdev_create_file(&node->sysdev, &attr_distance); scan_unevictable_register_node(node); + hugetlb_register_node(node); } return error; } @@ -216,6 +254,7 @@ void unregister_node(struct node *node) sysdev_remove_file(&node->sysdev, &attr_distance); scan_unevictable_unregister_node(node); + hugetlb_unregister_node(node); sysdev_unregister(&node->sysdev); } diff --git a/include/linux/node.h b/include/linux/node.h index 681a697b9a86..dae1521e1f05 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -28,6 +28,7 @@ struct node { struct memory_block; extern struct node node_devices[]; +typedef void (*node_registration_func_t)(struct node *); extern int register_node(struct node *, int, struct node *); extern void unregister_node(struct node *node); @@ -39,6 +40,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); extern int register_mem_sect_under_node(struct memory_block *mem_blk, int nid); extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk); + +#ifdef CONFIG_HUGETLBFS +extern void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister); +#endif #else static inline int register_one_node(int nid) { @@ -65,6 +71,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) { return 0; } + +static inline void register_hugetlbfs_with_node(node_registration_func_t reg, + node_registration_func_t unreg) +{ +} #endif #define to_node(sys_device) container_of(sys_device, struct node, sysdev) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1125d818ea06..544f7bcb615e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -24,6 +24,7 @@ #include #include +#include #include "internal.h" const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; @@ -1320,39 +1321,71 @@ out: static struct kobject *hugepages_kobj; static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; -static struct hstate *kobj_to_hstate(struct kobject *kobj) +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); + +static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) { int i; + for (i = 0; i < HUGE_MAX_HSTATE; i++) - if (hstate_kobjs[i] == kobj) + if (hstate_kobjs[i] == kobj) { + if (nidp) + *nidp = NUMA_NO_NODE; return &hstates[i]; - BUG(); - return NULL; + } + + return kobj_to_node_hstate(kobj, nidp); } static ssize_t nr_hugepages_show_common(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); - return sprintf(buf, "%lu\n", h->nr_huge_pages); + struct hstate *h; + unsigned long nr_huge_pages; + int nid; + + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) + nr_huge_pages = h->nr_huge_pages; + else + nr_huge_pages = h->nr_huge_pages_node[nid]; + + return sprintf(buf, "%lu\n", nr_huge_pages); } static ssize_t nr_hugepages_store_common(bool obey_mempolicy, struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { int err; + int nid; unsigned long count; - struct hstate *h = kobj_to_hstate(kobj); + struct hstate *h; NODEMASK_ALLOC(nodemask_t, nodes_allowed); err = strict_strtoul(buf, 10, &count); if (err) return 0; - if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) { - NODEMASK_FREE(nodes_allowed); - nodes_allowed = &node_online_map; - } + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) { + /* + * global hstate attribute + */ + if (!(obey_mempolicy && + init_nodemask_of_mempolicy(nodes_allowed))) { + NODEMASK_FREE(nodes_allowed); + nodes_allowed = &node_states[N_HIGH_MEMORY]; + } + } else if (nodes_allowed) { + /* + * per node hstate attribute: adjust count to global, + * but restrict alloc/free to the specified node. + */ + count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; + init_nodemask_of_node(nodes_allowed, nid); + } else + nodes_allowed = &node_states[N_HIGH_MEMORY]; + h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); if (nodes_allowed != &node_online_map) @@ -1398,7 +1431,7 @@ HSTATE_ATTR(nr_hugepages_mempolicy); static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); + struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); } static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, @@ -1406,7 +1439,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, { int err; unsigned long input; - struct hstate *h = kobj_to_hstate(kobj); + struct hstate *h = kobj_to_hstate(kobj, NULL); err = strict_strtoul(buf, 10, &input); if (err) @@ -1423,15 +1456,24 @@ HSTATE_ATTR(nr_overcommit_hugepages); static ssize_t free_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); - return sprintf(buf, "%lu\n", h->free_huge_pages); + struct hstate *h; + unsigned long free_huge_pages; + int nid; + + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) + free_huge_pages = h->free_huge_pages; + else + free_huge_pages = h->free_huge_pages_node[nid]; + + return sprintf(buf, "%lu\n", free_huge_pages); } HSTATE_ATTR_RO(free_hugepages); static ssize_t resv_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); + struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->resv_huge_pages); } HSTATE_ATTR_RO(resv_hugepages); @@ -1439,8 +1481,17 @@ HSTATE_ATTR_RO(resv_hugepages); static ssize_t surplus_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - struct hstate *h = kobj_to_hstate(kobj); - return sprintf(buf, "%lu\n", h->surplus_huge_pages); + struct hstate *h; + unsigned long surplus_huge_pages; + int nid; + + h = kobj_to_hstate(kobj, &nid); + if (nid == NUMA_NO_NODE) + surplus_huge_pages = h->surplus_huge_pages; + else + surplus_huge_pages = h->surplus_huge_pages_node[nid]; + + return sprintf(buf, "%lu\n", surplus_huge_pages); } HSTATE_ATTR_RO(surplus_hugepages); @@ -1460,19 +1511,21 @@ static struct attribute_group hstate_attr_group = { .attrs = hstate_attrs, }; -static int __init hugetlb_sysfs_add_hstate(struct hstate *h) +static int __init hugetlb_sysfs_add_hstate(struct hstate *h, + struct kobject *parent, + struct kobject **hstate_kobjs, + struct attribute_group *hstate_attr_group) { int retval; + int hi = h - hstates; - hstate_kobjs[h - hstates] = kobject_create_and_add(h->name, - hugepages_kobj); - if (!hstate_kobjs[h - hstates]) + hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); + if (!hstate_kobjs[hi]) return -ENOMEM; - retval = sysfs_create_group(hstate_kobjs[h - hstates], - &hstate_attr_group); + retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); if (retval) - kobject_put(hstate_kobjs[h - hstates]); + kobject_put(hstate_kobjs[hi]); return retval; } @@ -1487,17 +1540,184 @@ static void __init hugetlb_sysfs_init(void) return; for_each_hstate(h) { - err = hugetlb_sysfs_add_hstate(h); + err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, + hstate_kobjs, &hstate_attr_group); if (err) printk(KERN_ERR "Hugetlb: Unable to add hstate %s", h->name); } } +#ifdef CONFIG_NUMA + +/* + * node_hstate/s - associate per node hstate attributes, via their kobjects, + * with node sysdevs in node_devices[] using a parallel array. The array + * index of a node sysdev or _hstate == node id. + * This is here to avoid any static dependency of the node sysdev driver, in + * the base kernel, on the hugetlb module. + */ +struct node_hstate { + struct kobject *hugepages_kobj; + struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; +}; +struct node_hstate node_hstates[MAX_NUMNODES]; + +/* + * A subset of global hstate attributes for node sysdevs + */ +static struct attribute *per_node_hstate_attrs[] = { + &nr_hugepages_attr.attr, + &free_hugepages_attr.attr, + &surplus_hugepages_attr.attr, + NULL, +}; + +static struct attribute_group per_node_hstate_attr_group = { + .attrs = per_node_hstate_attrs, +}; + +/* + * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj. + * Returns node id via non-NULL nidp. + */ +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) +{ + int nid; + + for (nid = 0; nid < nr_node_ids; nid++) { + struct node_hstate *nhs = &node_hstates[nid]; + int i; + for (i = 0; i < HUGE_MAX_HSTATE; i++) + if (nhs->hstate_kobjs[i] == kobj) { + if (nidp) + *nidp = nid; + return &hstates[i]; + } + } + + BUG(); + return NULL; +} + +/* + * Unregister hstate attributes from a single node sysdev. + * No-op if no hstate attributes attached. + */ +void hugetlb_unregister_node(struct node *node) +{ + struct hstate *h; + struct node_hstate *nhs = &node_hstates[node->sysdev.id]; + + if (!nhs->hugepages_kobj) + return; + + for_each_hstate(h) + if (nhs->hstate_kobjs[h - hstates]) { + kobject_put(nhs->hstate_kobjs[h - hstates]); + nhs->hstate_kobjs[h - hstates] = NULL; + } + + kobject_put(nhs->hugepages_kobj); + nhs->hugepages_kobj = NULL; +} + +/* + * hugetlb module exit: unregister hstate attributes from node sysdevs + * that have them. + */ +static void hugetlb_unregister_all_nodes(void) +{ + int nid; + + /* + * disable node sysdev registrations. + */ + register_hugetlbfs_with_node(NULL, NULL); + + /* + * remove hstate attributes from any nodes that have them. + */ + for (nid = 0; nid < nr_node_ids; nid++) + hugetlb_unregister_node(&node_devices[nid]); +} + +/* + * Register hstate attributes for a single node sysdev. + * No-op if attributes already registered. + */ +void hugetlb_register_node(struct node *node) +{ + struct hstate *h; + struct node_hstate *nhs = &node_hstates[node->sysdev.id]; + int err; + + if (nhs->hugepages_kobj) + return; /* already allocated */ + + nhs->hugepages_kobj = kobject_create_and_add("hugepages", + &node->sysdev.kobj); + if (!nhs->hugepages_kobj) + return; + + for_each_hstate(h) { + err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, + nhs->hstate_kobjs, + &per_node_hstate_attr_group); + if (err) { + printk(KERN_ERR "Hugetlb: Unable to add hstate %s" + " for node %d\n", + h->name, node->sysdev.id); + hugetlb_unregister_node(node); + break; + } + } +} + +/* + * hugetlb init time: register hstate attributes for all registered + * node sysdevs. All on-line nodes should have registered their + * associated sysdev by the time the hugetlb module initializes. + */ +static void hugetlb_register_all_nodes(void) +{ + int nid; + + for (nid = 0; nid < nr_node_ids; nid++) { + struct node *node = &node_devices[nid]; + if (node->sysdev.id == nid) + hugetlb_register_node(node); + } + + /* + * Let the node sysdev driver know we're here so it can + * [un]register hstate attributes on node hotplug. + */ + register_hugetlbfs_with_node(hugetlb_register_node, + hugetlb_unregister_node); +} +#else /* !CONFIG_NUMA */ + +static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) +{ + BUG(); + if (nidp) + *nidp = -1; + return NULL; +} + +static void hugetlb_unregister_all_nodes(void) { } + +static void hugetlb_register_all_nodes(void) { } + +#endif + static void __exit hugetlb_exit(void) { struct hstate *h; + hugetlb_unregister_all_nodes(); + for_each_hstate(h) { kobject_put(hstate_kobjs[h - hstates]); } @@ -1532,6 +1752,8 @@ static int __init hugetlb_init(void) hugetlb_sysfs_init(); + hugetlb_register_all_nodes(); + return 0; } module_init(hugetlb_init); -- cgit v1.2.3 From 8fe23e057172223fe2048768a4d87ab7de7477bc Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 14 Dec 2009 17:58:33 -0800 Subject: mm: clear node in N_HIGH_MEMORY and stop kswapd when all memory is offlined When memory is hot-removed, its node must be cleared in N_HIGH_MEMORY if there are no present pages left. In such a situation, kswapd must also be stopped since it has nothing left to do. Signed-off-by: David Rientjes Signed-off-by: Lee Schermerhorn Cc: Christoph Lameter Cc: Yasunori Goto Cc: Mel Gorman Cc: Rafael J. Wysocki Cc: Rik van Riel Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Mel Gorman Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: Andi Kleen Cc: David Rientjes Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 1 + mm/memory_hotplug.c | 4 ++++ mm/vmscan.c | 28 ++++++++++++++++++++++------ 3 files changed, 27 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 4ec90019c1a4..abce8a0b2507 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -273,6 +273,7 @@ extern int scan_unevictable_register_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node); extern int kswapd_run(int nid); +extern void kswapd_stop(int nid); #ifdef CONFIG_MMU /* linux/mm/shmem.c */ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e8116f8bdffa..bc5a08138f1e 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -853,6 +853,10 @@ repeat: setup_per_zone_wmarks(); calculate_zone_inactive_ratio(zone); + if (!node_present_pages(node)) { + node_clear_state(node, N_HIGH_MEMORY); + kswapd_stop(node); + } vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); diff --git a/mm/vmscan.c b/mm/vmscan.c index 777af57fd8c8..d0a631a428a0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2173,6 +2173,7 @@ static int kswapd(void *p) order = 0; for ( ; ; ) { unsigned long new_order; + int ret; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; @@ -2184,19 +2185,23 @@ static int kswapd(void *p) */ order = new_order; } else { - if (!freezing(current)) + if (!freezing(current) && !kthread_should_stop()) schedule(); order = pgdat->kswapd_max_order; } finish_wait(&pgdat->kswapd_wait, &wait); - if (!try_to_freeze()) { - /* We can speed up thawing tasks if we don't call - * balance_pgdat after returning from the refrigerator - */ + ret = try_to_freeze(); + if (kthread_should_stop()) + break; + + /* + * We can speed up thawing tasks if we don't call balance_pgdat + * after returning from the refrigerator + */ + if (!ret) balance_pgdat(pgdat, order); - } } return 0; } @@ -2451,6 +2456,17 @@ int kswapd_run(int nid) return ret; } +/* + * Called by memory hotplug when all memory in a node is offlined. + */ +void kswapd_stop(int nid) +{ + struct task_struct *kswapd = NODE_DATA(nid)->kswapd; + + if (kswapd) + kthread_stop(kswapd); +} + static int __init kswapd_init(void) { int nid; -- cgit v1.2.3 From 39da08cb074cf19cb249832a2a955dfb28837e65 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:36 -0800 Subject: hugetlb: offload per node attribute registrations Offload the registration and unregistration of per node hstate sysfs attributes to a worker thread rather than attempt the allocation/attachment or detachment/freeing of the attributes in the context of the memory hotplug handler. I don't know that this is absolutely required, but the registration can sleep in allocations and other mem hot plug handlers do it this way. If it turns out this is NOT required, we can drop this patch. N.B., Only tested build, boot, libhugetlbfs regression. i.e., no memory hotplug testing. Signed-off-by: Lee Schermerhorn Reviewed-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Mel Gorman Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: David Rientjes Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 57 +++++++++++++++++++++++++++++++++++++++++++--------- include/linux/node.h | 5 +++++ 2 files changed, 52 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/base/node.c b/drivers/base/node.c index 9e218a6d4a5b..54e5d8eaf70e 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -186,11 +186,14 @@ static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); static node_registration_func_t __hugetlb_register_node; static node_registration_func_t __hugetlb_unregister_node; -static inline void hugetlb_register_node(struct node *node) +static inline bool hugetlb_register_node(struct node *node) { if (__hugetlb_register_node && - node_state(node->sysdev.id, N_HIGH_MEMORY)) + node_state(node->sysdev.id, N_HIGH_MEMORY)) { __hugetlb_register_node(node); + return true; + } + return false; } static inline void hugetlb_unregister_node(struct node *node) @@ -387,10 +390,31 @@ static int link_mem_sections(int nid) return err; } +#ifdef CONFIG_HUGETLBFS /* * Handle per node hstate attribute [un]registration on transistions * to/from memoryless state. */ +static void node_hugetlb_work(struct work_struct *work) +{ + struct node *node = container_of(work, struct node, node_work); + + /* + * We only get here when a node transitions to/from memoryless state. + * We can detect which transition occurred by examining whether the + * node has memory now. hugetlb_register_node() already check this + * so we try to register the attributes. If that fails, then the + * node has transitioned to memoryless, try to unregister the + * attributes. + */ + if (!hugetlb_register_node(node)) + hugetlb_unregister_node(node); +} + +static void init_node_hugetlb_work(int nid) +{ + INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work); +} static int node_memory_callback(struct notifier_block *self, unsigned long action, void *arg) @@ -399,14 +423,16 @@ static int node_memory_callback(struct notifier_block *self, int nid = mnb->status_change_nid; switch (action) { - case MEM_ONLINE: /* memory successfully brought online */ + case MEM_ONLINE: + case MEM_OFFLINE: + /* + * offload per node hstate [un]registration to a work thread + * when transitioning to/from memoryless state. + */ if (nid != NUMA_NO_NODE) - hugetlb_register_node(&node_devices[nid]); - break; - case MEM_OFFLINE: /* or offline */ - if (nid != NUMA_NO_NODE) - hugetlb_unregister_node(&node_devices[nid]); + schedule_work(&node_devices[nid].node_work); break; + case MEM_GOING_ONLINE: case MEM_GOING_OFFLINE: case MEM_CANCEL_ONLINE: @@ -417,15 +443,23 @@ static int node_memory_callback(struct notifier_block *self, return NOTIFY_OK; } -#else +#endif /* CONFIG_HUGETLBFS */ +#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */ + static int link_mem_sections(int nid) { return 0; } +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ +#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ + !defined(CONFIG_HUGETLBFS) static inline int node_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { return NOTIFY_OK; } -#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +static void init_node_hugetlb_work(int nid) { } + +#endif int register_one_node(int nid) { @@ -449,6 +483,9 @@ int register_one_node(int nid) /* link memory sections under this node */ error = link_mem_sections(nid); + + /* initialize work queue for memory hot plug */ + init_node_hugetlb_work(nid); } return error; diff --git a/include/linux/node.h b/include/linux/node.h index dae1521e1f05..06292dac3eab 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -21,9 +21,14 @@ #include #include +#include struct node { struct sys_device sysdev; + +#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) + struct work_struct node_work; +#endif }; struct memory_block; -- cgit v1.2.3 From bad44b5be84cf3bb1ff900bec02ee61e1993328c Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 14 Dec 2009 17:58:38 -0800 Subject: mm: add gfp flags for NODEMASK_ALLOC slab allocations Objects passed to NODEMASK_ALLOC() are relatively small in size and are backed by slab caches that are not of large order, traditionally never greater than PAGE_ALLOC_COSTLY_ORDER. Thus, using GFP_KERNEL for these allocations on large machines when CONFIG_NODES_SHIFT > 8 will cause the page allocator to loop endlessly in the allocation attempt, each time invoking both direct reclaim or the oom killer. This is of particular interest when using NODEMASK_ALLOC() from a mempolicy context (either directly in mm/mempolicy.c or the mempolicy constrained hugetlb allocations) since the oom killer always kills current when allocations are constrained by mempolicies. So for all present use cases in the kernel, current would end up being oom killed when direct reclaim fails. That would allow the NODEMASK_ALLOC() to succeed but current would have sacrificed itself upon returning. This patch adds gfp flags to NODEMASK_ALLOC() to pass to kmalloc() on CONFIG_NODES_SHIFT > 8; this parameter is a nop on other configurations. All current use cases either directly from hugetlb code or indirectly via NODEMASK_SCRATCH() union __GFP_NORETRY to avoid direct reclaim and the oom killer when the slab allocator needs to allocate additional pages. The side-effect of this change is that all current use cases of either NODEMASK_ALLOC() or NODEMASK_SCRATCH() need appropriate -ENOMEM handling when the allocation fails (never for CONFIG_NODES_SHIFT <= 8). All current use cases were audited and do have appropriate error handling at this time. Signed-off-by: David Rientjes Acked-by: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Mel Gorman Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: Andi Kleen Cc: David Rientjes Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/nodemask.h | 21 ++++++++++++--------- mm/hugetlb.c | 5 +++-- 2 files changed, 15 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index cbd521a03127..454997cccbd8 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -485,15 +485,17 @@ static inline int num_node_state(enum node_states state) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* - * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) - * NODEMASK_ALLOC(x, m) allocates an object of type 'x' with the name 'm'. + * For nodemask scrach area. + * NODEMASK_ALLOC(type, name) allocates an object with a specified type and + * name. */ -#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ -#define NODEMASK_ALLOC(x, m) x *m = kmalloc(sizeof(*m), GFP_KERNEL) -#define NODEMASK_FREE(m) kfree(m) +#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */ +#define NODEMASK_ALLOC(type, name, gfp_flags) \ + type *name = kmalloc(sizeof(*name), gfp_flags) +#define NODEMASK_FREE(m) kfree(m) #else -#define NODEMASK_ALLOC(x, m) x _m, *m = &_m -#define NODEMASK_FREE(m) do {} while (0) +#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name +#define NODEMASK_FREE(m) do {} while (0) #endif /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ @@ -502,8 +504,9 @@ struct nodemask_scratch { nodemask_t mask2; }; -#define NODEMASK_SCRATCH(x) \ - NODEMASK_ALLOC(struct nodemask_scratch, x) +#define NODEMASK_SCRATCH(x) \ + NODEMASK_ALLOC(struct nodemask_scratch, x, \ + GFP_KERNEL | __GFP_NORETRY) #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b4a263512cb7..450493d25572 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1361,7 +1361,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy, int nid; unsigned long count; struct hstate *h; - NODEMASK_ALLOC(nodemask_t, nodes_allowed); + NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); err = strict_strtoul(buf, 10, &count); if (err) @@ -1857,7 +1857,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, proc_doulongvec_minmax(table, write, buffer, length, ppos); if (write) { - NODEMASK_ALLOC(nodemask_t, nodes_allowed); + NODEMASK_ALLOC(nodemask_t, nodes_allowed, + GFP_KERNEL | __GFP_NORETRY); if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) { NODEMASK_FREE(nodes_allowed); -- cgit v1.2.3 From f29ad6a99b596b8169744d107bf088e8be9e8d0d Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:40 -0800 Subject: swap_info: private to swapfile.c The swap_info_struct is mostly private to mm/swapfile.c, with only one other in-tree user: get_swap_bio(). Adjust its interface to map_swap_page(), so that we can then remove get_swap_info_struct(). But there is a popular user out-of-tree, TuxOnIce: so leave the declaration of swap_info_struct in linux/swap.h. Signed-off-by: Hugh Dickins Cc: Nigel Cunningham Cc: KAMEZAWA Hiroyuki Reviewed-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 3 +-- mm/page_io.c | 19 +++++++------------ mm/swapfile.c | 29 +++++++++++++++++------------ 3 files changed, 25 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index abce8a0b2507..82aa7e121c05 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -318,9 +318,8 @@ extern void swapcache_free(swp_entry_t, struct page *page); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); -extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); +extern sector_t map_swap_page(swp_entry_t, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); -extern struct swap_info_struct *get_swap_info_struct(unsigned); extern int reuse_swap_page(struct page *); extern int try_to_free_swap(struct page *); struct backing_dev_info; diff --git a/mm/page_io.c b/mm/page_io.c index c6f3e5071de3..afeed89a0a5d 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -19,20 +19,17 @@ #include #include -static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, +static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { struct bio *bio; bio = bio_alloc(gfp_flags, 1); if (bio) { - struct swap_info_struct *sis; - swp_entry_t entry = { .val = index, }; - - sis = get_swap_info_struct(swp_type(entry)); - bio->bi_sector = map_swap_page(sis, swp_offset(entry)) * - (PAGE_SIZE >> 9); - bio->bi_bdev = sis->bdev; + swp_entry_t entry; + entry.val = page_private(page); + bio->bi_sector = map_swap_page(entry, &bio->bi_bdev); + bio->bi_sector <<= PAGE_SHIFT - 9; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = PAGE_SIZE; bio->bi_io_vec[0].bv_offset = 0; @@ -102,8 +99,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) unlock_page(page); goto out; } - bio = get_swap_bio(GFP_NOIO, page_private(page), page, - end_swap_bio_write); + bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); @@ -127,8 +123,7 @@ int swap_readpage(struct page *page) VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); - bio = get_swap_bio(GFP_KERNEL, page_private(page), page, - end_swap_bio_read); + bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); ret = -ENOMEM; diff --git a/mm/swapfile.c b/mm/swapfile.c index 9c590eef7912..f83f1c6f6196 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1283,12 +1283,22 @@ static void drain_mmlist(void) /* * Use this swapdev's extent info to locate the (PAGE_SIZE) block which - * corresponds to page offset `offset'. + * corresponds to page offset `offset'. Note that the type of this function + * is sector_t, but it returns page offset into the bdev, not sector offset. */ -sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) +sector_t map_swap_page(swp_entry_t entry, struct block_device **bdev) { - struct swap_extent *se = sis->curr_swap_extent; - struct swap_extent *start_se = se; + struct swap_info_struct *sis; + struct swap_extent *start_se; + struct swap_extent *se; + pgoff_t offset; + + sis = swap_info + swp_type(entry); + *bdev = sis->bdev; + + offset = swp_offset(entry); + start_se = sis->curr_swap_extent; + se = start_se; for ( ; ; ) { struct list_head *lh; @@ -1314,12 +1324,14 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) sector_t swapdev_block(int swap_type, pgoff_t offset) { struct swap_info_struct *sis; + struct block_device *bdev; if (swap_type >= nr_swapfiles) return 0; sis = swap_info + swap_type; - return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0; + return (sis->flags & SWP_WRITEOK) ? + map_swap_page(swp_entry(swap_type, offset), &bdev) : 0; } #endif /* CONFIG_HIBERNATION */ @@ -2159,13 +2171,6 @@ int swapcache_prepare(swp_entry_t entry) return __swap_duplicate(entry, SWAP_CACHE); } - -struct swap_info_struct * -get_swap_info_struct(unsigned type) -{ - return &swap_info[type]; -} - /* * swap_lock prevents swap_map being freed. Don't grab an extra * reference on the swaphandle, it doesn't matter if it becomes unused. -- cgit v1.2.3 From efa90a981bbc891efad96db2a75b5487e00852ca Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:41 -0800 Subject: swap_info: change to array of pointers The swap_info_struct is only 76 or 104 bytes, but it does seem wrong to reserve an array of about 30 of them in bss, when most people will want only one. Change swap_info[] to an array of pointers. That does need a "type" field in the structure: pack it as a char with next type and short prio (aha, char is unsigned by default on PowerPC). Use the (admittedly peculiar) name "type" throughout for this index. /proc/swaps does not take swap_lock: I wouldn't want it to, but do take care with barriers when adding a new item to the array (never removed). Signed-off-by: Hugh Dickins Reviewed-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 7 +- mm/swapfile.c | 204 ++++++++++++++++++++++++++++----------------------- 2 files changed, 117 insertions(+), 94 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 82aa7e121c05..f1c248796fb8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -159,9 +159,10 @@ enum { * The in-memory structure used to track swap areas. */ struct swap_info_struct { - unsigned long flags; - int prio; /* swap priority */ - int next; /* next entry on swap list */ + unsigned long flags; /* SWP_USED etc: see above */ + signed short prio; /* swap priority of this type */ + signed char type; /* strange name for an index */ + signed char next; /* next type on the swap list */ struct file *swap_file; struct block_device *bdev; struct list_head extent_list; diff --git a/mm/swapfile.c b/mm/swapfile.c index f83f1c6f6196..dc88a7e4257e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -49,7 +49,7 @@ static const char Unused_offset[] = "Unused swap offset entry "; static struct swap_list_t swap_list = {-1, -1}; -static struct swap_info_struct swap_info[MAX_SWAPFILES]; +static struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); @@ -79,12 +79,11 @@ static inline unsigned short encode_swapmap(int count, bool has_cache) return ret; } -/* returnes 1 if swap entry is freed */ +/* returns 1 if swap entry is freed */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) { - int type = si - swap_info; - swp_entry_t entry = swp_entry(type, offset); + swp_entry_t entry = swp_entry(si->type, offset); struct page *page; int ret = 0; @@ -120,7 +119,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) down_read(&swap_unplug_sem); entry.val = page_private(page); if (PageSwapCache(page)) { - struct block_device *bdev = swap_info[swp_type(entry)].bdev; + struct block_device *bdev = swap_info[swp_type(entry)]->bdev; struct backing_dev_info *bdi; /* @@ -467,10 +466,10 @@ swp_entry_t get_swap_page(void) nr_swap_pages--; for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { - si = swap_info + type; + si = swap_info[type]; next = si->next; if (next < 0 || - (!wrapped && si->prio != swap_info[next].prio)) { + (!wrapped && si->prio != swap_info[next]->prio)) { next = swap_list.head; wrapped++; } @@ -503,8 +502,8 @@ swp_entry_t get_swap_page_of_type(int type) pgoff_t offset; spin_lock(&swap_lock); - si = swap_info + type; - if (si->flags & SWP_WRITEOK) { + si = swap_info[type]; + if (si && (si->flags & SWP_WRITEOK)) { nr_swap_pages--; /* This is called for allocating swap entry, not cache */ offset = scan_swap_map(si, SWAP_MAP); @@ -528,7 +527,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry) type = swp_type(entry); if (type >= nr_swapfiles) goto bad_nofile; - p = & swap_info[type]; + p = swap_info[type]; if (!(p->flags & SWP_USED)) goto bad_device; offset = swp_offset(entry); @@ -581,8 +580,9 @@ static int swap_entry_free(struct swap_info_struct *p, p->lowest_bit = offset; if (offset > p->highest_bit) p->highest_bit = offset; - if (p->prio > swap_info[swap_list.next].prio) - swap_list.next = p - swap_info; + if (swap_list.next >= 0 && + p->prio > swap_info[swap_list.next]->prio) + swap_list.next = p->type; nr_swap_pages++; p->inuse_pages--; } @@ -741,14 +741,14 @@ int free_swap_and_cache(swp_entry_t entry) int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) { struct block_device *bdev = NULL; - int i; + int type; if (device) bdev = bdget(device); spin_lock(&swap_lock); - for (i = 0; i < nr_swapfiles; i++) { - struct swap_info_struct *sis = swap_info + i; + for (type = 0; type < nr_swapfiles; type++) { + struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; @@ -758,7 +758,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) *bdev_p = bdgrab(sis->bdev); spin_unlock(&swap_lock); - return i; + return type; } if (bdev == sis->bdev) { struct swap_extent *se; @@ -771,7 +771,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) spin_unlock(&swap_lock); bdput(bdev); - return i; + return type; } } } @@ -792,15 +792,17 @@ unsigned int count_swap_pages(int type, int free) { unsigned int n = 0; - if (type < nr_swapfiles) { - spin_lock(&swap_lock); - if (swap_info[type].flags & SWP_WRITEOK) { - n = swap_info[type].pages; + spin_lock(&swap_lock); + if ((unsigned int)type < nr_swapfiles) { + struct swap_info_struct *sis = swap_info[type]; + + if (sis->flags & SWP_WRITEOK) { + n = sis->pages; if (free) - n -= swap_info[type].inuse_pages; + n -= sis->inuse_pages; } - spin_unlock(&swap_lock); } + spin_unlock(&swap_lock); return n; } #endif @@ -1024,7 +1026,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, */ static int try_to_unuse(unsigned int type) { - struct swap_info_struct * si = &swap_info[type]; + struct swap_info_struct *si = swap_info[type]; struct mm_struct *start_mm; unsigned short *swap_map; unsigned short swcount; @@ -1270,10 +1272,10 @@ retry: static void drain_mmlist(void) { struct list_head *p, *next; - unsigned int i; + unsigned int type; - for (i = 0; i < nr_swapfiles; i++) - if (swap_info[i].inuse_pages) + for (type = 0; type < nr_swapfiles; type++) + if (swap_info[type]->inuse_pages) return; spin_lock(&mmlist_lock); list_for_each_safe(p, next, &init_mm.mmlist) @@ -1293,7 +1295,7 @@ sector_t map_swap_page(swp_entry_t entry, struct block_device **bdev) struct swap_extent *se; pgoff_t offset; - sis = swap_info + swp_type(entry); + sis = swap_info[swp_type(entry)]; *bdev = sis->bdev; offset = swp_offset(entry); @@ -1321,17 +1323,15 @@ sector_t map_swap_page(swp_entry_t entry, struct block_device **bdev) * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev * corresponding to given index in swap_info (swap type). */ -sector_t swapdev_block(int swap_type, pgoff_t offset) +sector_t swapdev_block(int type, pgoff_t offset) { - struct swap_info_struct *sis; struct block_device *bdev; - if (swap_type >= nr_swapfiles) + if ((unsigned int)type >= nr_swapfiles) return 0; - - sis = swap_info + swap_type; - return (sis->flags & SWP_WRITEOK) ? - map_swap_page(swp_entry(swap_type, offset), &bdev) : 0; + if (!(swap_info[type]->flags & SWP_WRITEOK)) + return 0; + return map_swap_page(swp_entry(type, offset), &bdev); } #endif /* CONFIG_HIBERNATION */ @@ -1547,8 +1547,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) mapping = victim->f_mapping; prev = -1; spin_lock(&swap_lock); - for (type = swap_list.head; type >= 0; type = swap_info[type].next) { - p = swap_info + type; + for (type = swap_list.head; type >= 0; type = swap_info[type]->next) { + p = swap_info[type]; if (p->flags & SWP_WRITEOK) { if (p->swap_file->f_mapping == mapping) break; @@ -1567,18 +1567,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); goto out_dput; } - if (prev < 0) { + if (prev < 0) swap_list.head = p->next; - } else { - swap_info[prev].next = p->next; - } + else + swap_info[prev]->next = p->next; if (type == swap_list.next) { /* just pick something that's safe... */ swap_list.next = swap_list.head; } if (p->prio < 0) { - for (i = p->next; i >= 0; i = swap_info[i].next) - swap_info[i].prio = p->prio--; + for (i = p->next; i >= 0; i = swap_info[i]->next) + swap_info[i]->prio = p->prio--; least_priority++; } nr_swap_pages -= p->pages; @@ -1596,16 +1595,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) if (p->prio < 0) p->prio = --least_priority; prev = -1; - for (i = swap_list.head; i >= 0; i = swap_info[i].next) { - if (p->prio >= swap_info[i].prio) + for (i = swap_list.head; i >= 0; i = swap_info[i]->next) { + if (p->prio >= swap_info[i]->prio) break; prev = i; } p->next = i; if (prev < 0) - swap_list.head = swap_list.next = p - swap_info; + swap_list.head = swap_list.next = type; else - swap_info[prev].next = p - swap_info; + swap_info[prev]->next = type; nr_swap_pages += p->pages; total_swap_pages += p->pages; p->flags |= SWP_WRITEOK; @@ -1665,8 +1664,8 @@ out: /* iterator */ static void *swap_start(struct seq_file *swap, loff_t *pos) { - struct swap_info_struct *ptr = swap_info; - int i; + struct swap_info_struct *si; + int type; loff_t l = *pos; mutex_lock(&swapon_mutex); @@ -1674,11 +1673,13 @@ static void *swap_start(struct seq_file *swap, loff_t *pos) if (!l) return SEQ_START_TOKEN; - for (i = 0; i < nr_swapfiles; i++, ptr++) { - if (!(ptr->flags & SWP_USED) || !ptr->swap_map) + for (type = 0; type < nr_swapfiles; type++) { + smp_rmb(); /* read nr_swapfiles before swap_info[type] */ + si = swap_info[type]; + if (!(si->flags & SWP_USED) || !si->swap_map) continue; if (!--l) - return ptr; + return si; } return NULL; @@ -1686,21 +1687,21 @@ static void *swap_start(struct seq_file *swap, loff_t *pos) static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) { - struct swap_info_struct *ptr; - struct swap_info_struct *endptr = swap_info + nr_swapfiles; + struct swap_info_struct *si = v; + int type; if (v == SEQ_START_TOKEN) - ptr = swap_info; - else { - ptr = v; - ptr++; - } + type = 0; + else + type = si->type + 1; - for (; ptr < endptr; ptr++) { - if (!(ptr->flags & SWP_USED) || !ptr->swap_map) + for (; type < nr_swapfiles; type++) { + smp_rmb(); /* read nr_swapfiles before swap_info[type] */ + si = swap_info[type]; + if (!(si->flags & SWP_USED) || !si->swap_map) continue; ++*pos; - return ptr; + return si; } return NULL; @@ -1713,24 +1714,24 @@ static void swap_stop(struct seq_file *swap, void *v) static int swap_show(struct seq_file *swap, void *v) { - struct swap_info_struct *ptr = v; + struct swap_info_struct *si = v; struct file *file; int len; - if (ptr == SEQ_START_TOKEN) { + if (si == SEQ_START_TOKEN) { seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); return 0; } - file = ptr->swap_file; + file = si->swap_file; len = seq_path(swap, &file->f_path, " \t\n\\"); seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? "partition" : "file\t", - ptr->pages << (PAGE_SHIFT - 10), - ptr->inuse_pages << (PAGE_SHIFT - 10), - ptr->prio); + si->pages << (PAGE_SHIFT - 10), + si->inuse_pages << (PAGE_SHIFT - 10), + si->prio); return 0; } @@ -1798,23 +1799,45 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + spin_lock(&swap_lock); - p = swap_info; - for (type = 0 ; type < nr_swapfiles ; type++,p++) - if (!(p->flags & SWP_USED)) + for (type = 0; type < nr_swapfiles; type++) { + if (!(swap_info[type]->flags & SWP_USED)) break; + } error = -EPERM; if (type >= MAX_SWAPFILES) { spin_unlock(&swap_lock); + kfree(p); goto out; } - if (type >= nr_swapfiles) - nr_swapfiles = type+1; - memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->extent_list); + if (type >= nr_swapfiles) { + p->type = type; + swap_info[type] = p; + /* + * Write swap_info[type] before nr_swapfiles, in case a + * racing procfs swap_start() or swap_next() is reading them. + * (We never shrink nr_swapfiles, we never free this entry.) + */ + smp_wmb(); + nr_swapfiles++; + } else { + kfree(p); + p = swap_info[type]; + /* + * Do not memset this entry: a racing procfs swap_next() + * would be relying on p->type to remain valid. + */ + } p->flags = SWP_USED; p->next = -1; spin_unlock(&swap_lock); + name = getname(specialfile); error = PTR_ERR(name); if (IS_ERR(name)) { @@ -1834,7 +1857,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = -EBUSY; for (i = 0; i < nr_swapfiles; i++) { - struct swap_info_struct *q = &swap_info[i]; + struct swap_info_struct *q = swap_info[i]; if (i == type || !q->swap_file) continue; @@ -1909,6 +1932,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->lowest_bit = 1; p->cluster_next = 1; + p->cluster_nr = 0; /* * Find out how many pages are allowed for a single swap @@ -2015,18 +2039,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) /* insert swap space into swap_list: */ prev = -1; - for (i = swap_list.head; i >= 0; i = swap_info[i].next) { - if (p->prio >= swap_info[i].prio) { + for (i = swap_list.head; i >= 0; i = swap_info[i]->next) { + if (p->prio >= swap_info[i]->prio) break; - } prev = i; } p->next = i; - if (prev < 0) { - swap_list.head = swap_list.next = p - swap_info; - } else { - swap_info[prev].next = p - swap_info; - } + if (prev < 0) + swap_list.head = swap_list.next = type; + else + swap_info[prev]->next = type; spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); error = 0; @@ -2063,15 +2085,15 @@ out: void si_swapinfo(struct sysinfo *val) { - unsigned int i; + unsigned int type; unsigned long nr_to_be_unused = 0; spin_lock(&swap_lock); - for (i = 0; i < nr_swapfiles; i++) { - if (!(swap_info[i].flags & SWP_USED) || - (swap_info[i].flags & SWP_WRITEOK)) - continue; - nr_to_be_unused += swap_info[i].inuse_pages; + for (type = 0; type < nr_swapfiles; type++) { + struct swap_info_struct *si = swap_info[type]; + + if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) + nr_to_be_unused += si->inuse_pages; } val->freeswap = nr_swap_pages + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; @@ -2104,7 +2126,7 @@ static int __swap_duplicate(swp_entry_t entry, bool cache) type = swp_type(entry); if (type >= nr_swapfiles) goto bad_file; - p = type + swap_info; + p = swap_info[type]; offset = swp_offset(entry); spin_lock(&swap_lock); @@ -2186,7 +2208,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) if (!our_page_cluster) /* no readahead */ return 0; - si = &swap_info[swp_type(entry)]; + si = swap_info[swp_type(entry)]; target = swp_offset(entry); base = (target >> our_page_cluster) << our_page_cluster; end = base + (1 << our_page_cluster); -- cgit v1.2.3 From 9625a5f289f7c3c100b59c317e2bcc3c7e2e51fb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:42 -0800 Subject: swap_info: include first_swap_extent Make better use of the space by folding first swap_extent into its swap_info_struct, instead of just the list_head: swap partitions need only that one, and for others it's used as a circular list anyway. [jirislaby@gmail.com: fix crash on double swapon] Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 2 +- mm/swapfile.c | 70 +++++++++++++++++++++++++++------------------------- 2 files changed, 37 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index f1c248796fb8..109dfe794237 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -165,7 +165,7 @@ struct swap_info_struct { signed char next; /* next type on the swap list */ struct file *swap_file; struct block_device *bdev; - struct list_head extent_list; + struct swap_extent first_swap_extent; struct swap_extent *curr_swap_extent; unsigned short *swap_map; unsigned int lowest_bit; diff --git a/mm/swapfile.c b/mm/swapfile.c index dc88a7e4257e..16de84b56644 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -145,23 +145,28 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) static int discard_swap(struct swap_info_struct *si) { struct swap_extent *se; + sector_t start_block; + sector_t nr_blocks; int err = 0; - list_for_each_entry(se, &si->extent_list, list) { - sector_t start_block = se->start_block << (PAGE_SHIFT - 9); - sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); + /* Do not discard the swap header page! */ + se = &si->first_swap_extent; + start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); + nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); + if (nr_blocks) { + err = blkdev_issue_discard(si->bdev, start_block, + nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER); + if (err) + return err; + cond_resched(); + } - if (se->start_page == 0) { - /* Do not discard the swap header page! */ - start_block += 1 << (PAGE_SHIFT - 9); - nr_blocks -= 1 << (PAGE_SHIFT - 9); - if (!nr_blocks) - continue; - } + list_for_each_entry(se, &si->first_swap_extent.list, list) { + start_block = se->start_block << (PAGE_SHIFT - 9); + nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_KERNEL, - DISCARD_FL_BARRIER); + nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER); if (err) break; @@ -200,14 +205,11 @@ static void discard_swap_cluster(struct swap_info_struct *si, start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_NOIO, - DISCARD_FL_BARRIER)) + nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER)) break; } lh = se->list.next; - if (lh == &si->extent_list) - lh = lh->next; se = list_entry(lh, struct swap_extent, list); } } @@ -761,10 +763,8 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) return type; } if (bdev == sis->bdev) { - struct swap_extent *se; + struct swap_extent *se = &sis->first_swap_extent; - se = list_entry(sis->extent_list.next, - struct swap_extent, list); if (se->start_block == offset) { if (bdev_p) *bdev_p = bdgrab(sis->bdev); @@ -1310,8 +1310,6 @@ sector_t map_swap_page(swp_entry_t entry, struct block_device **bdev) return se->start_block + (offset - se->start_page); } lh = se->list.next; - if (lh == &sis->extent_list) - lh = lh->next; se = list_entry(lh, struct swap_extent, list); sis->curr_swap_extent = se; BUG_ON(se == start_se); /* It *must* be present */ @@ -1340,10 +1338,10 @@ sector_t swapdev_block(int type, pgoff_t offset) */ static void destroy_swap_extents(struct swap_info_struct *sis) { - while (!list_empty(&sis->extent_list)) { + while (!list_empty(&sis->first_swap_extent.list)) { struct swap_extent *se; - se = list_entry(sis->extent_list.next, + se = list_entry(sis->first_swap_extent.list.next, struct swap_extent, list); list_del(&se->list); kfree(se); @@ -1364,8 +1362,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, struct swap_extent *new_se; struct list_head *lh; - lh = sis->extent_list.prev; /* The highest page extent */ - if (lh != &sis->extent_list) { + if (start_page == 0) { + se = &sis->first_swap_extent; + sis->curr_swap_extent = se; + se->start_page = 0; + se->nr_pages = nr_pages; + se->start_block = start_block; + return 1; + } else { + lh = sis->first_swap_extent.list.prev; /* Highest extent */ se = list_entry(lh, struct swap_extent, list); BUG_ON(se->start_page + se->nr_pages != start_page); if (se->start_block + se->nr_pages == start_block) { @@ -1385,7 +1390,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, new_se->nr_pages = nr_pages; new_se->start_block = start_block; - list_add_tail(&new_se->list, &sis->extent_list); + list_add_tail(&new_se->list, &sis->first_swap_extent.list); return 1; } @@ -1437,7 +1442,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) if (S_ISBLK(inode->i_mode)) { ret = add_swap_extent(sis, 0, sis->max, 0); *span = sis->pages; - goto done; + goto out; } blkbits = inode->i_blkbits; @@ -1508,15 +1513,12 @@ reprobe: sis->max = page_no; sis->pages = page_no - 1; sis->highest_bit = page_no - 1; -done: - sis->curr_swap_extent = list_entry(sis->extent_list.prev, - struct swap_extent, list); - goto out; +out: + return ret; bad_bmap: printk(KERN_ERR "swapon: swapfile has holes\n"); ret = -EINVAL; -out: - return ret; + goto out; } SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) @@ -1815,7 +1817,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) kfree(p); goto out; } - INIT_LIST_HEAD(&p->extent_list); if (type >= nr_swapfiles) { p->type = type; swap_info[type] = p; @@ -1834,6 +1835,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) * would be relying on p->type to remain valid. */ } + INIT_LIST_HEAD(&p->first_swap_extent.list); p->flags = SWP_USED; p->next = -1; spin_unlock(&swap_lock); -- cgit v1.2.3 From 253d553ba75ab26b3e9e2f70cbf6fbf0813f7e86 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:44 -0800 Subject: swap_info: SWAP_HAS_CACHE cleanups Though swap_count() is useful, I'm finding that swap_has_cache() and encode_swapmap() obscure what happens in the swap_map entry, just at those points where I need to understand it. Remove them, and pass more usable "usage" values to scan_swap_map(), swap_entry_free() and __swap_duplicate(), instead of the SWAP_MAP and SWAP_CACHE enum. Signed-off-by: Hugh Dickins Reviewed-by: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 2 +- mm/swapfile.c | 155 +++++++++++++++++++++------------------------------ 2 files changed, 65 insertions(+), 92 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 109dfe794237..c9d8870892b8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -154,7 +154,7 @@ enum { #define SWAP_MAP_MAX 0x7ffe #define SWAP_MAP_BAD 0x7fff #define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ -#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) + /* * The in-memory structure used to track swap areas. */ diff --git a/mm/swapfile.c b/mm/swapfile.c index fa5f10b9c28b..52497490a7ca 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -53,30 +53,9 @@ static struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); -/* For reference count accounting in swap_map */ -/* enum for swap_map[] handling. internal use only */ -enum { - SWAP_MAP = 0, /* ops for reference from swap users */ - SWAP_CACHE, /* ops for reference from swap cache */ -}; - static inline int swap_count(unsigned short ent) { - return ent & SWAP_COUNT_MASK; -} - -static inline bool swap_has_cache(unsigned short ent) -{ - return !!(ent & SWAP_HAS_CACHE); -} - -static inline unsigned short encode_swapmap(int count, bool has_cache) -{ - unsigned short ret = count; - - if (has_cache) - return SWAP_HAS_CACHE | ret; - return ret; + return ent & ~SWAP_HAS_CACHE; } /* returns 1 if swap entry is freed */ @@ -224,7 +203,7 @@ static int wait_for_discard(void *word) #define LATENCY_LIMIT 256 static inline unsigned long scan_swap_map(struct swap_info_struct *si, - int cache) + unsigned short usage) { unsigned long offset; unsigned long scan_base; @@ -355,10 +334,7 @@ checks: si->lowest_bit = si->max; si->highest_bit = 0; } - if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */ - si->swap_map[offset] = encode_swapmap(0, true); - else /* at suspend */ - si->swap_map[offset] = encode_swapmap(1, false); + si->swap_map[offset] = usage; si->cluster_next = offset + 1; si->flags -= SWP_SCANNING; @@ -483,7 +459,7 @@ swp_entry_t get_swap_page(void) swap_list.next = next; /* This is called for allocating swap entry for cache */ - offset = scan_swap_map(si, SWAP_CACHE); + offset = scan_swap_map(si, SWAP_HAS_CACHE); if (offset) { spin_unlock(&swap_lock); return swp_entry(type, offset); @@ -508,7 +484,7 @@ swp_entry_t get_swap_page_of_type(int type) if (si && (si->flags & SWP_WRITEOK)) { nr_swap_pages--; /* This is called for allocating swap entry, not cache */ - offset = scan_swap_map(si, SWAP_MAP); + offset = scan_swap_map(si, 1); if (offset) { spin_unlock(&swap_lock); return swp_entry(type, offset); @@ -555,29 +531,31 @@ out: return NULL; } -static int swap_entry_free(struct swap_info_struct *p, - swp_entry_t ent, int cache) +static unsigned short swap_entry_free(struct swap_info_struct *p, + swp_entry_t entry, unsigned short usage) { - unsigned long offset = swp_offset(ent); - int count = swap_count(p->swap_map[offset]); - bool has_cache; + unsigned long offset = swp_offset(entry); + unsigned short count; + unsigned short has_cache; - has_cache = swap_has_cache(p->swap_map[offset]); + count = p->swap_map[offset]; + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; - if (cache == SWAP_MAP) { /* dropping usage count of swap */ - if (count < SWAP_MAP_MAX) { - count--; - p->swap_map[offset] = encode_swapmap(count, has_cache); - } - } else { /* dropping swap cache flag */ + if (usage == SWAP_HAS_CACHE) { VM_BUG_ON(!has_cache); - p->swap_map[offset] = encode_swapmap(count, false); + has_cache = 0; + } else if (count < SWAP_MAP_MAX) + count--; + + if (!count) + mem_cgroup_uncharge_swap(entry); + + usage = count | has_cache; + p->swap_map[offset] = usage; - } - /* return code. */ - count = p->swap_map[offset]; /* free if no reference */ - if (!count) { + if (!usage) { if (offset < p->lowest_bit) p->lowest_bit = offset; if (offset > p->highest_bit) @@ -588,9 +566,8 @@ static int swap_entry_free(struct swap_info_struct *p, nr_swap_pages++; p->inuse_pages--; } - if (!swap_count(count)) - mem_cgroup_uncharge_swap(ent); - return count; + + return usage; } /* @@ -603,7 +580,7 @@ void swap_free(swp_entry_t entry) p = swap_info_get(entry); if (p) { - swap_entry_free(p, entry, SWAP_MAP); + swap_entry_free(p, entry, 1); spin_unlock(&swap_lock); } } @@ -614,19 +591,13 @@ void swap_free(swp_entry_t entry) void swapcache_free(swp_entry_t entry, struct page *page) { struct swap_info_struct *p; - int ret; + unsigned short count; p = swap_info_get(entry); if (p) { - ret = swap_entry_free(p, entry, SWAP_CACHE); - if (page) { - bool swapout; - if (ret) - swapout = true; /* the end of swap out */ - else - swapout = false; /* no more swap users! */ - mem_cgroup_uncharge_swapcache(page, entry, swapout); - } + count = swap_entry_free(p, entry, SWAP_HAS_CACHE); + if (page) + mem_cgroup_uncharge_swapcache(page, entry, count != 0); spin_unlock(&swap_lock); } } @@ -705,7 +676,7 @@ int free_swap_and_cache(swp_entry_t entry) p = swap_info_get(entry); if (p) { - if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) { + if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) { page = find_get_page(&swapper_space, entry.val); if (page && !trylock_page(page)) { page_cache_release(page); @@ -1212,7 +1183,7 @@ static int try_to_unuse(unsigned int type) if (swap_count(*swap_map) == SWAP_MAP_MAX) { spin_lock(&swap_lock); - *swap_map = encode_swapmap(0, true); + *swap_map = SWAP_HAS_CACHE; spin_unlock(&swap_lock); reset_overflow = 1; } @@ -2111,16 +2082,16 @@ void si_swapinfo(struct sysinfo *val) * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT */ -static int __swap_duplicate(swp_entry_t entry, bool cache) +static int __swap_duplicate(swp_entry_t entry, unsigned short usage) { struct swap_info_struct *p; unsigned long offset, type; - int result = -EINVAL; - int count; - bool has_cache; + unsigned short count; + unsigned short has_cache; + int err = -EINVAL; if (non_swap_entry(entry)) - return -EINVAL; + goto out; type = swp_type(entry); if (type >= nr_swapfiles) @@ -2129,54 +2100,56 @@ static int __swap_duplicate(swp_entry_t entry, bool cache) offset = swp_offset(entry); spin_lock(&swap_lock); - if (unlikely(offset >= p->max)) goto unlock_out; - count = swap_count(p->swap_map[offset]); - has_cache = swap_has_cache(p->swap_map[offset]); + count = p->swap_map[offset]; + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; + err = 0; - if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */ + if (usage == SWAP_HAS_CACHE) { /* set SWAP_HAS_CACHE if there is no cache and entry is used */ - if (!has_cache && count) { - p->swap_map[offset] = encode_swapmap(count, true); - result = 0; - } else if (has_cache) /* someone added cache */ - result = -EEXIST; - else if (!count) /* no users */ - result = -ENOENT; + if (!has_cache && count) + has_cache = SWAP_HAS_CACHE; + else if (has_cache) /* someone else added cache */ + err = -EEXIST; + else /* no users remaining */ + err = -ENOENT; } else if (count || has_cache) { - if (count < SWAP_MAP_MAX - 1) { - p->swap_map[offset] = encode_swapmap(count + 1, - has_cache); - result = 0; - } else if (count <= SWAP_MAP_MAX) { + + if (count < SWAP_MAP_MAX - 1) + count++; + else if (count <= SWAP_MAP_MAX) { if (swap_overflow++ < 5) printk(KERN_WARNING "swap_dup: swap entry overflow\n"); - p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX, - has_cache); - result = 0; - } + count = SWAP_MAP_MAX; + } else + err = -EINVAL; } else - result = -ENOENT; /* unused swap entry */ + err = -ENOENT; /* unused swap entry */ + + p->swap_map[offset] = count | has_cache; + unlock_out: spin_unlock(&swap_lock); out: - return result; + return err; bad_file: printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); goto out; } + /* * increase reference count of swap entry by 1. */ void swap_duplicate(swp_entry_t entry) { - __swap_duplicate(entry, SWAP_MAP); + __swap_duplicate(entry, 1); } /* @@ -2189,7 +2162,7 @@ void swap_duplicate(swp_entry_t entry) */ int swapcache_prepare(swp_entry_t entry) { - return __swap_duplicate(entry, SWAP_CACHE); + return __swap_duplicate(entry, SWAP_HAS_CACHE); } /* -- cgit v1.2.3 From 8d69aaee80c123b460918816cbfa2e83224c3646 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:45 -0800 Subject: swap_info: swap_map of chars not shorts Halve the vmalloc'ed swap_map array from unsigned shorts to unsigned chars: it's still very unusual to reach a swap count of 126, and the next patch allows it to be extended indefinitely. Signed-off-by: Hugh Dickins Reviewed-by: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 8 ++++---- mm/swapfile.c | 40 +++++++++++++++++++++++----------------- 2 files changed, 27 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index c9d8870892b8..f733deb10748 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -151,9 +151,9 @@ enum { #define SWAP_CLUSTER_MAX 32 -#define SWAP_MAP_MAX 0x7ffe -#define SWAP_MAP_BAD 0x7fff -#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ +#define SWAP_MAP_MAX 0x7e +#define SWAP_MAP_BAD 0x7f +#define SWAP_HAS_CACHE 0x80 /* There is a swap cache of entry. */ /* * The in-memory structure used to track swap areas. @@ -167,7 +167,7 @@ struct swap_info_struct { struct block_device *bdev; struct swap_extent first_swap_extent; struct swap_extent *curr_swap_extent; - unsigned short *swap_map; + unsigned char *swap_map; unsigned int lowest_bit; unsigned int highest_bit; unsigned int lowest_alloc; /* while preparing discard cluster */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 52497490a7ca..c0d7b9ed0c16 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -53,7 +53,7 @@ static struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); -static inline int swap_count(unsigned short ent) +static inline unsigned char swap_count(unsigned char ent) { return ent & ~SWAP_HAS_CACHE; } @@ -203,7 +203,7 @@ static int wait_for_discard(void *word) #define LATENCY_LIMIT 256 static inline unsigned long scan_swap_map(struct swap_info_struct *si, - unsigned short usage) + unsigned char usage) { unsigned long offset; unsigned long scan_base; @@ -531,12 +531,12 @@ out: return NULL; } -static unsigned short swap_entry_free(struct swap_info_struct *p, - swp_entry_t entry, unsigned short usage) +static unsigned char swap_entry_free(struct swap_info_struct *p, + swp_entry_t entry, unsigned char usage) { unsigned long offset = swp_offset(entry); - unsigned short count; - unsigned short has_cache; + unsigned char count; + unsigned char has_cache; count = p->swap_map[offset]; has_cache = count & SWAP_HAS_CACHE; @@ -591,7 +591,7 @@ void swap_free(swp_entry_t entry) void swapcache_free(swp_entry_t entry, struct page *page) { struct swap_info_struct *p; - unsigned short count; + unsigned char count; p = swap_info_get(entry); if (p) { @@ -975,7 +975,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, { unsigned int max = si->max; unsigned int i = prev; - int count; + unsigned char count; /* * No need for swap_lock here: we're just looking @@ -1013,8 +1013,8 @@ static int try_to_unuse(unsigned int type) { struct swap_info_struct *si = swap_info[type]; struct mm_struct *start_mm; - unsigned short *swap_map; - unsigned short swcount; + unsigned char *swap_map; + unsigned char swcount; struct page *page; swp_entry_t entry; unsigned int i = 0; @@ -1174,6 +1174,12 @@ static int try_to_unuse(unsigned int type) * If that's wrong, then we should worry more about * exit_mmap() and do_munmap() cases described above: * we might be resetting SWAP_MAP_MAX too early here. + * + * Yes, that's wrong: though very unlikely, swap count 0x7ffe + * could surely occur if pid_max raised from PID_MAX_DEFAULT; + * and we are now lowering SWAP_MAP_MAX to 0x7e, making it + * much easier to reach. But the next patch will fix that. + * * We know "Undead"s can happen, they're okay, so don't * report them; but do report if we reset SWAP_MAP_MAX. */ @@ -1492,7 +1498,7 @@ bad_bmap: SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; - unsigned short *swap_map; + unsigned char *swap_map; struct file *swap_file, *victim; struct address_space *mapping; struct inode *inode; @@ -1762,7 +1768,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) sector_t span; unsigned long maxpages = 1; unsigned long swapfilepages; - unsigned short *swap_map = NULL; + unsigned char *swap_map = NULL; struct page *page = NULL; struct inode *inode = NULL; int did_down = 0; @@ -1938,13 +1944,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; /* OK, set up the swap map and apply the bad block list */ - swap_map = vmalloc(maxpages * sizeof(short)); + swap_map = vmalloc(maxpages); if (!swap_map) { error = -ENOMEM; goto bad_swap; } - memset(swap_map, 0, maxpages * sizeof(short)); + memset(swap_map, 0, maxpages); for (i = 0; i < swap_header->info.nr_badpages; i++) { int page_nr = swap_header->info.badpages[i]; if (page_nr <= 0 || page_nr >= swap_header->info.last_page) { @@ -2082,12 +2088,12 @@ void si_swapinfo(struct sysinfo *val) * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT */ -static int __swap_duplicate(swp_entry_t entry, unsigned short usage) +static int __swap_duplicate(swp_entry_t entry, unsigned char usage) { struct swap_info_struct *p; unsigned long offset, type; - unsigned short count; - unsigned short has_cache; + unsigned char count; + unsigned char has_cache; int err = -EINVAL; if (non_swap_entry(entry)) -- cgit v1.2.3 From 570a335b8e22579e2a51a68136d2b1f907a20eec Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:46 -0800 Subject: swap_info: swap count continuations Swap is duplicated (reference count incremented by one) whenever the same swap page is inserted into another mm (when forking finds a swap entry in place of a pte, or when reclaim unmaps a pte to insert the swap entry). swap_info_struct's vmalloc'ed swap_map is the array of these reference counts: but what happens when the unsigned short (or unsigned char since the preceding patch) is full? (and its high bit is kept for a cache flag) We then lose track of it, never freeing, leaving it in use until swapoff: at which point we _hope_ that a single pass will have found all instances, assume there are no more, and will lose user data if we're wrong. Swapping of KSM pages has not yet been enabled; but it is implemented, and makes it very easy for a user to overflow the maximum swap count: possible with ordinary process pages, but unlikely, even when pid_max has been raised from PID_MAX_DEFAULT. This patch implements swap count continuations: when the count overflows, a continuation page is allocated and linked to the original vmalloc'ed map page, and this used to hold the continuation counts for that entry and its neighbours. These continuation pages are seldom referenced: the common paths all work on the original swap_map, only referring to a continuation page when the low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 22 +++- mm/memory.c | 19 +++- mm/rmap.c | 6 +- mm/swapfile.c | 304 ++++++++++++++++++++++++++++++++++++++++++--------- 4 files changed, 287 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index f733deb10748..389e7bd92cca 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -145,15 +145,18 @@ enum { SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ + SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ /* add others here before... */ SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32 -#define SWAP_MAP_MAX 0x7e -#define SWAP_MAP_BAD 0x7f -#define SWAP_HAS_CACHE 0x80 /* There is a swap cache of entry. */ +#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ +#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ +#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ +#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ +#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ /* * The in-memory structure used to track swap areas. @@ -311,9 +314,10 @@ extern long total_swap_pages; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); -extern void swap_duplicate(swp_entry_t); -extern int swapcache_prepare(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); +extern int add_swap_count_continuation(swp_entry_t, gfp_t); +extern int swap_duplicate(swp_entry_t); +extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free(swp_entry_t, struct page *page); extern int free_swap_and_cache(swp_entry_t); @@ -385,8 +389,14 @@ static inline void show_swap_cache_info(void) #define free_swap_and_cache(swp) is_migration_entry(swp) #define swapcache_prepare(swp) is_migration_entry(swp) -static inline void swap_duplicate(swp_entry_t swp) +static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { + return 0; +} + +static inline int swap_duplicate(swp_entry_t swp) +{ + return 0; } static inline void swap_free(swp_entry_t swp) diff --git a/mm/memory.c b/mm/memory.c index 6ab19dd4a199..543c446bf4ed 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -572,7 +572,7 @@ out: * covered by this vma. */ -static inline void +static inline unsigned long copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) @@ -586,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (!pte_file(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); - swap_duplicate(entry); + if (swap_duplicate(entry) < 0) + return entry.val; + /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); @@ -635,6 +637,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, out_set_pte: set_pte_at(dst_mm, addr, dst_pte, pte); + return 0; } static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -646,6 +649,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, spinlock_t *src_ptl, *dst_ptl; int progress = 0; int rss[2]; + swp_entry_t entry = (swp_entry_t){0}; again: rss[1] = rss[0] = 0; @@ -674,7 +678,10 @@ again: progress++; continue; } - copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); + entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, + vma, addr, rss); + if (entry.val) + break; progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); @@ -684,6 +691,12 @@ again: add_mm_rss(dst_mm, rss[0], rss[1]); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); + + if (entry.val) { + if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) + return -ENOMEM; + progress = 0; + } if (addr != end) goto again; return 0; diff --git a/mm/rmap.c b/mm/rmap.c index dd43373a483f..710bb4b2adf1 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -822,7 +822,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * Store the swap location in the pte. * See handle_pte_fault() ... */ - swap_duplicate(entry); + if (swap_duplicate(entry) < 0) { + set_pte_at(mm, address, pte, pteval); + ret = SWAP_FAIL; + goto out_unmap; + } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) diff --git a/mm/swapfile.c b/mm/swapfile.c index c0d7b9ed0c16..cc5e7ebf2d2c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -35,11 +35,14 @@ #include #include +static bool swap_count_continued(struct swap_info_struct *, pgoff_t, + unsigned char); +static void free_swap_count_continuations(struct swap_info_struct *); + static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; long nr_swap_pages; long total_swap_pages; -static int swap_overflow; static int least_priority; static const char Bad_file[] = "Bad swap file entry "; @@ -55,7 +58,7 @@ static DEFINE_MUTEX(swapon_mutex); static inline unsigned char swap_count(unsigned char ent) { - return ent & ~SWAP_HAS_CACHE; + return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ } /* returns 1 if swap entry is freed */ @@ -545,8 +548,15 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, if (usage == SWAP_HAS_CACHE) { VM_BUG_ON(!has_cache); has_cache = 0; - } else if (count < SWAP_MAP_MAX) - count--; + } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { + if (count == COUNT_CONTINUED) { + if (swap_count_continued(p, offset, count)) + count = SWAP_MAP_MAX | COUNT_CONTINUED; + else + count = SWAP_MAP_MAX; + } else + count--; + } if (!count) mem_cgroup_uncharge_swap(entry); @@ -604,6 +614,8 @@ void swapcache_free(swp_entry_t entry, struct page *page) /* * How many references to page are currently swapped out? + * This does not give an exact answer when swap count is continued, + * but does include the high COUNT_CONTINUED flag to allow for that. */ static inline int page_swapcount(struct page *page) { @@ -1019,7 +1031,6 @@ static int try_to_unuse(unsigned int type) swp_entry_t entry; unsigned int i = 0; int retval = 0; - int reset_overflow = 0; int shmem; /* @@ -1034,8 +1045,7 @@ static int try_to_unuse(unsigned int type) * together, child after parent. If we race with dup_mmap(), we * prefer to resolve parent before child, lest we miss entries * duplicated after we scanned child: using last mm would invert - * that. Though it's only a serious concern when an overflowed - * swap count is reset from SWAP_MAP_MAX, preventing a rescan. + * that. */ start_mm = &init_mm; atomic_inc(&init_mm.mm_users); @@ -1164,36 +1174,6 @@ static int try_to_unuse(unsigned int type) break; } - /* - * How could swap count reach 0x7ffe ? - * There's no way to repeat a swap page within an mm - * (except in shmem, where it's the shared object which takes - * the reference count)? - * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned - * short is too small....) - * If that's wrong, then we should worry more about - * exit_mmap() and do_munmap() cases described above: - * we might be resetting SWAP_MAP_MAX too early here. - * - * Yes, that's wrong: though very unlikely, swap count 0x7ffe - * could surely occur if pid_max raised from PID_MAX_DEFAULT; - * and we are now lowering SWAP_MAP_MAX to 0x7e, making it - * much easier to reach. But the next patch will fix that. - * - * We know "Undead"s can happen, they're okay, so don't - * report them; but do report if we reset SWAP_MAP_MAX. - */ - /* We might release the lock_page() in unuse_mm(). */ - if (!PageSwapCache(page) || page_private(page) != entry.val) - goto retry; - - if (swap_count(*swap_map) == SWAP_MAP_MAX) { - spin_lock(&swap_lock); - *swap_map = SWAP_HAS_CACHE; - spin_unlock(&swap_lock); - reset_overflow = 1; - } - /* * If a reference remains (rare), we would like to leave * the page in the swap cache; but try_to_unmap could @@ -1235,7 +1215,6 @@ static int try_to_unuse(unsigned int type) * mark page dirty so shrink_page_list will preserve it. */ SetPageDirty(page); -retry: unlock_page(page); page_cache_release(page); @@ -1247,10 +1226,6 @@ retry: } mmput(start_mm); - if (reset_overflow) { - printk(KERN_WARNING "swapoff: cleared swap entry overflow\n"); - swap_overflow = 0; - } return retval; } @@ -1593,6 +1568,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) up_write(&swap_unplug_sem); destroy_swap_extents(p); + if (p->flags & SWP_CONTINUED) + free_swap_count_continuations(p); + mutex_lock(&swapon_mutex); spin_lock(&swap_lock); drain_mmlist(); @@ -2079,14 +2057,13 @@ void si_swapinfo(struct sysinfo *val) /* * Verify that a swap entry is valid and increment its swap map count. * - * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as - * "permanent", but will be reclaimed by the next swapoff. * Returns error code in following case. * - success -> 0 * - swp_entry is invalid -> EINVAL * - swp_entry is migration entry -> EINVAL * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT + * - swap-mapped reference requested but needs continued swap count. -> ENOMEM */ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) { @@ -2126,15 +2103,14 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) } else if (count || has_cache) { - if (count < SWAP_MAP_MAX - 1) - count++; - else if (count <= SWAP_MAP_MAX) { - if (swap_overflow++ < 5) - printk(KERN_WARNING - "swap_dup: swap entry overflow\n"); - count = SWAP_MAP_MAX; - } else + if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) + count += usage; + else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) err = -EINVAL; + else if (swap_count_continued(p, offset, count)) + count = COUNT_CONTINUED; + else + err = -ENOMEM; } else err = -ENOENT; /* unused swap entry */ @@ -2153,9 +2129,13 @@ bad_file: /* * increase reference count of swap entry by 1. */ -void swap_duplicate(swp_entry_t entry) +int swap_duplicate(swp_entry_t entry) { - __swap_duplicate(entry, 1); + int err = 0; + + while (!err && __swap_duplicate(entry, 1) == -ENOMEM) + err = add_swap_count_continuation(entry, GFP_ATOMIC); + return err; } /* @@ -2222,3 +2202,219 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) *offset = ++toff; return nr_pages? ++nr_pages: 0; } + +/* + * add_swap_count_continuation - called when a swap count is duplicated + * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's + * page of the original vmalloc'ed swap_map, to hold the continuation count + * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called + * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. + * + * These continuation pages are seldom referenced: the common paths all work + * on the original swap_map, only referring to a continuation page when the + * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. + * + * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding + * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) + * can be called after dropping locks. + */ +int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) +{ + struct swap_info_struct *si; + struct page *head; + struct page *page; + struct page *list_page; + pgoff_t offset; + unsigned char count; + + /* + * When debugging, it's easier to use __GFP_ZERO here; but it's better + * for latency not to zero a page while GFP_ATOMIC and holding locks. + */ + page = alloc_page(gfp_mask | __GFP_HIGHMEM); + + si = swap_info_get(entry); + if (!si) { + /* + * An acceptable race has occurred since the failing + * __swap_duplicate(): the swap entry has been freed, + * perhaps even the whole swap_map cleared for swapoff. + */ + goto outer; + } + + offset = swp_offset(entry); + count = si->swap_map[offset] & ~SWAP_HAS_CACHE; + + if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { + /* + * The higher the swap count, the more likely it is that tasks + * will race to add swap count continuation: we need to avoid + * over-provisioning. + */ + goto out; + } + + if (!page) { + spin_unlock(&swap_lock); + return -ENOMEM; + } + + /* + * We are fortunate that although vmalloc_to_page uses pte_offset_map, + * no architecture is using highmem pages for kernel pagetables: so it + * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps. + */ + head = vmalloc_to_page(si->swap_map + offset); + offset &= ~PAGE_MASK; + + /* + * Page allocation does not initialize the page's lru field, + * but it does always reset its private field. + */ + if (!page_private(head)) { + BUG_ON(count & COUNT_CONTINUED); + INIT_LIST_HEAD(&head->lru); + set_page_private(head, SWP_CONTINUED); + si->flags |= SWP_CONTINUED; + } + + list_for_each_entry(list_page, &head->lru, lru) { + unsigned char *map; + + /* + * If the previous map said no continuation, but we've found + * a continuation page, free our allocation and use this one. + */ + if (!(count & COUNT_CONTINUED)) + goto out; + + map = kmap_atomic(list_page, KM_USER0) + offset; + count = *map; + kunmap_atomic(map, KM_USER0); + + /* + * If this continuation count now has some space in it, + * free our allocation and use this one. + */ + if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) + goto out; + } + + list_add_tail(&page->lru, &head->lru); + page = NULL; /* now it's attached, don't free it */ +out: + spin_unlock(&swap_lock); +outer: + if (page) + __free_page(page); + return 0; +} + +/* + * swap_count_continued - when the original swap_map count is incremented + * from SWAP_MAP_MAX, check if there is already a continuation page to carry + * into, carry if so, or else fail until a new continuation page is allocated; + * when the original swap_map count is decremented from 0 with continuation, + * borrow from the continuation and report whether it still holds more. + * Called while __swap_duplicate() or swap_entry_free() holds swap_lock. + */ +static bool swap_count_continued(struct swap_info_struct *si, + pgoff_t offset, unsigned char count) +{ + struct page *head; + struct page *page; + unsigned char *map; + + head = vmalloc_to_page(si->swap_map + offset); + if (page_private(head) != SWP_CONTINUED) { + BUG_ON(count & COUNT_CONTINUED); + return false; /* need to add count continuation */ + } + + offset &= ~PAGE_MASK; + page = list_entry(head->lru.next, struct page, lru); + map = kmap_atomic(page, KM_USER0) + offset; + + if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ + goto init_map; /* jump over SWAP_CONT_MAX checks */ + + if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ + /* + * Think of how you add 1 to 999 + */ + while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.next, struct page, lru); + BUG_ON(page == head); + map = kmap_atomic(page, KM_USER0) + offset; + } + if (*map == SWAP_CONT_MAX) { + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.next, struct page, lru); + if (page == head) + return false; /* add count continuation */ + map = kmap_atomic(page, KM_USER0) + offset; +init_map: *map = 0; /* we didn't zero the page */ + } + *map += 1; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + while (page != head) { + map = kmap_atomic(page, KM_USER0) + offset; + *map = COUNT_CONTINUED; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + } + return true; /* incremented */ + + } else { /* decrementing */ + /* + * Think of how you subtract 1 from 1000 + */ + BUG_ON(count != COUNT_CONTINUED); + while (*map == COUNT_CONTINUED) { + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.next, struct page, lru); + BUG_ON(page == head); + map = kmap_atomic(page, KM_USER0) + offset; + } + BUG_ON(*map == 0); + *map -= 1; + if (*map == 0) + count = 0; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + while (page != head) { + map = kmap_atomic(page, KM_USER0) + offset; + *map = SWAP_CONT_MAX | count; + count = COUNT_CONTINUED; + kunmap_atomic(map, KM_USER0); + page = list_entry(page->lru.prev, struct page, lru); + } + return count == COUNT_CONTINUED; + } +} + +/* + * free_swap_count_continuations - swapoff free all the continuation pages + * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. + */ +static void free_swap_count_continuations(struct swap_info_struct *si) +{ + pgoff_t offset; + + for (offset = 0; offset < si->max; offset += PAGE_SIZE) { + struct page *head; + head = vmalloc_to_page(si->swap_map + offset); + if (page_private(head)) { + struct list_head *this, *next; + list_for_each_safe(this, next, &head->lru) { + struct page *page; + page = list_entry(this, struct page, lru); + list_del(this); + __free_page(page); + } + } + } +} -- cgit v1.2.3 From aaa468653b4a0d11c603c48d716f765177a5a9e4 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:47 -0800 Subject: swap_info: note SWAP_MAP_SHMEM While we're fiddling with the swap_map values, let's assign a particular value to shmem/tmpfs swap pages: their swap counts are never incremented, and it helps swapoff's try_to_unuse() a little if it can immediately distinguish those pages from process pages. Since we've no use for SWAP_MAP_BAD | COUNT_CONTINUED, we might as well use that 0xbf value for SWAP_MAP_SHMEM. Signed-off-by: Hugh Dickins Reviewed-by: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 6 ++++++ mm/shmem.c | 11 +++++++++-- mm/swapfile.c | 47 +++++++++++++++++++++++++++-------------------- 3 files changed, 42 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 389e7bd92cca..ac43d87b89b0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -157,6 +157,7 @@ enum { #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ +#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ /* * The in-memory structure used to track swap areas. @@ -316,6 +317,7 @@ extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern int add_swap_count_continuation(swp_entry_t, gfp_t); +extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); @@ -394,6 +396,10 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) return 0; } +static inline void swap_shmem_alloc(swp_entry_t swp) +{ +} + static inline int swap_duplicate(swp_entry_t swp) { return 0; diff --git a/mm/shmem.c b/mm/shmem.c index 356dd99566ec..4fb41c83daca 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page) goto out; } mutex_unlock(&shmem_swaplist_mutex); -out: return found; /* 0 or 1 or -ENOMEM */ + /* + * Can some race bring us here? We've been holding page lock, + * so I think not; but would rather try again later than BUG() + */ + unlock_page(page); + page_cache_release(page); +out: + return (found < 0) ? found : 0; } /* @@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) else inode = NULL; spin_unlock(&info->lock); - swap_duplicate(swap); + swap_shmem_alloc(swap); BUG_ON(page_mapped(page)); page_cache_release(page); /* pagecache ref */ swap_writepage(page, wbc); diff --git a/mm/swapfile.c b/mm/swapfile.c index cc5e7ebf2d2c..58bec6600167 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -548,6 +548,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, if (usage == SWAP_HAS_CACHE) { VM_BUG_ON(!has_cache); has_cache = 0; + } else if (count == SWAP_MAP_SHMEM) { + /* + * Or we could insist on shmem.c using a special + * swap_shmem_free() and free_shmem_swap_and_cache()... + */ + count = 0; } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { if (count == COUNT_CONTINUED) { if (swap_count_continued(p, offset, count)) @@ -1031,7 +1037,6 @@ static int try_to_unuse(unsigned int type) swp_entry_t entry; unsigned int i = 0; int retval = 0; - int shmem; /* * When searching mms for an entry, a good strategy is to @@ -1107,17 +1112,18 @@ static int try_to_unuse(unsigned int type) /* * Remove all references to entry. - * Whenever we reach init_mm, there's no address space - * to search, but use it as a reminder to search shmem. */ - shmem = 0; swcount = *swap_map; - if (swap_count(swcount)) { - if (start_mm == &init_mm) - shmem = shmem_unuse(entry, page); - else - retval = unuse_mm(start_mm, entry, page); + if (swap_count(swcount) == SWAP_MAP_SHMEM) { + retval = shmem_unuse(entry, page); + /* page has already been unlocked and released */ + if (retval < 0) + break; + continue; } + if (swap_count(swcount) && start_mm != &init_mm) + retval = unuse_mm(start_mm, entry, page); + if (swap_count(*swap_map)) { int set_start_mm = (*swap_map >= swcount); struct list_head *p = &start_mm->mmlist; @@ -1128,7 +1134,7 @@ static int try_to_unuse(unsigned int type) atomic_inc(&new_start_mm->mm_users); atomic_inc(&prev_mm->mm_users); spin_lock(&mmlist_lock); - while (swap_count(*swap_map) && !retval && !shmem && + while (swap_count(*swap_map) && !retval && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); if (!atomic_inc_not_zero(&mm->mm_users)) @@ -1142,10 +1148,9 @@ static int try_to_unuse(unsigned int type) swcount = *swap_map; if (!swap_count(swcount)) /* any usage ? */ ; - else if (mm == &init_mm) { + else if (mm == &init_mm) set_start_mm = 1; - shmem = shmem_unuse(entry, page); - } else + else retval = unuse_mm(mm, entry, page); if (set_start_mm && *swap_map < swcount) { @@ -1161,13 +1166,6 @@ static int try_to_unuse(unsigned int type) mmput(start_mm); start_mm = new_start_mm; } - if (shmem) { - /* page has already been unlocked and released */ - if (shmem > 0) - continue; - retval = shmem; - break; - } if (retval) { unlock_page(page); page_cache_release(page); @@ -2126,6 +2124,15 @@ bad_file: goto out; } +/* + * Help swapoff by noting that swap entry belongs to shmem/tmpfs + * (in which case its reference count is never incremented). + */ +void swap_shmem_alloc(swp_entry_t entry) +{ + __swap_duplicate(entry, SWAP_MAP_SHMEM); +} + /* * increase reference count of swap entry by 1. */ -- cgit v1.2.3 From 7509765a29cfb1a4c506c09b304aaf3b4111c653 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:48 -0800 Subject: swap_info: reorder its fields Reorder (and comment) the fields of swap_info_struct, to make better use of its cachelines: it's good for swap_duplicate() in particular if unsigned int max and swap_map are near the start. Signed-off-by: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index ac43d87b89b0..9f0ca325e30d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -167,21 +167,21 @@ struct swap_info_struct { signed short prio; /* swap priority of this type */ signed char type; /* strange name for an index */ signed char next; /* next type on the swap list */ - struct file *swap_file; - struct block_device *bdev; - struct swap_extent first_swap_extent; - struct swap_extent *curr_swap_extent; - unsigned char *swap_map; - unsigned int lowest_bit; - unsigned int highest_bit; + unsigned int max; /* extent of the swap_map */ + unsigned char *swap_map; /* vmalloc'ed array of usage counts */ + unsigned int lowest_bit; /* index of first free in swap_map */ + unsigned int highest_bit; /* index of last free in swap_map */ + unsigned int pages; /* total of usable pages of swap */ + unsigned int inuse_pages; /* number of those currently in use */ + unsigned int cluster_next; /* likely index for next allocation */ + unsigned int cluster_nr; /* countdown to next cluster search */ unsigned int lowest_alloc; /* while preparing discard cluster */ unsigned int highest_alloc; /* while preparing discard cluster */ - unsigned int cluster_next; - unsigned int cluster_nr; - unsigned int pages; - unsigned int max; - unsigned int inuse_pages; - unsigned int old_block_size; + struct swap_extent *curr_swap_extent; + struct swap_extent first_swap_extent; + struct block_device *bdev; /* swap device or bdev of swap file */ + struct file *swap_file; /* seldom referenced */ + unsigned int old_block_size; /* seldom referenced */ }; struct swap_list_t { -- cgit v1.2.3 From d4906e1aa516cc965292b43b5a26122dd4344e7e Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:49 -0800 Subject: swap: rework map_swap_page() again Seems that page_io.c doesn't really need to know that page_private(page) is the swp_entry 'val'. Rework map_swap_page() to do what its name says and map a page to a page offset in the swap space. The only other caller of map_swap_page() is internal to mm/swapfile.c and it does want to map a swap entry to the 'sector'. So rename map_swap_page() to map_swap_entry(), make it 'static' and and implement map_swap_page() as a wrapper around that. Signed-off-by: Lee Schermerhorn Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 2 +- mm/page_io.c | 4 +--- mm/swapfile.c | 20 ++++++++++++++++---- 3 files changed, 18 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 9f0ca325e30d..a2602a8207a6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -325,7 +325,7 @@ extern void swapcache_free(swp_entry_t, struct page *page); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); -extern sector_t map_swap_page(swp_entry_t, struct block_device **); +extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int reuse_swap_page(struct page *); extern int try_to_free_swap(struct page *); diff --git a/mm/page_io.c b/mm/page_io.c index afeed89a0a5d..a19af956ee1b 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -26,9 +26,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, bio = bio_alloc(gfp_flags, 1); if (bio) { - swp_entry_t entry; - entry.val = page_private(page); - bio->bi_sector = map_swap_page(entry, &bio->bi_bdev); + bio->bi_sector = map_swap_page(page, &bio->bi_bdev); bio->bi_sector <<= PAGE_SHIFT - 9; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = PAGE_SIZE; diff --git a/mm/swapfile.c b/mm/swapfile.c index 58bec6600167..d5eb2e85600b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -38,6 +38,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); +static sector_t map_swap_entry(swp_entry_t, struct block_device**); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -782,7 +783,7 @@ sector_t swapdev_block(int type, pgoff_t offset) return 0; if (!(swap_info[type]->flags & SWP_WRITEOK)) return 0; - return map_swap_page(swp_entry(type, offset), &bdev); + return map_swap_entry(swp_entry(type, offset), &bdev); } /* @@ -1249,10 +1250,11 @@ static void drain_mmlist(void) /* * Use this swapdev's extent info to locate the (PAGE_SIZE) block which - * corresponds to page offset `offset'. Note that the type of this function - * is sector_t, but it returns page offset into the bdev, not sector offset. + * corresponds to page offset for the specified swap entry. + * Note that the type of this function is sector_t, but it returns page offset + * into the bdev, not sector offset. */ -sector_t map_swap_page(swp_entry_t entry, struct block_device **bdev) +static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) { struct swap_info_struct *sis; struct swap_extent *start_se; @@ -1280,6 +1282,16 @@ sector_t map_swap_page(swp_entry_t entry, struct block_device **bdev) } } +/* + * Returns the page offset into bdev for the specified page's swap entry. + */ +sector_t map_swap_page(struct page *page, struct block_device **bdev) +{ + swp_entry_t entry; + entry.val = page_private(page); + return map_swap_entry(entry, bdev); +} + /* * Free all of a swapdev's extent information */ -- cgit v1.2.3 From f50de2d3811081957156b5d736778799379c29de Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 14 Dec 2009 17:58:53 -0800 Subject: vmscan: have kswapd sleep for a short interval and double check it should be asleep After kswapd balances all zones in a pgdat, it goes to sleep. In the event of no IO congestion, kswapd can go to sleep very shortly after the high watermark was reached. If there are a constant stream of allocations from parallel processes, it can mean that kswapd went to sleep too quickly and the high watermark is not being maintained for sufficient length time. This patch makes kswapd go to sleep as a two-stage process. It first tries to sleep for HZ/10. If it is woken up by another process or the high watermark is no longer met, it's considered a premature sleep and kswapd continues work. Otherwise it goes fully to sleep. This adds more counters to distinguish between fast and slow breaches of watermarks. A "fast" premature sleep is one where the low watermark was hit in a very short time after kswapd going to sleep. A "slow" premature sleep indicates that the high watermark was breached after a very short interval. Signed-off-by: Mel Gorman Cc: Frans Pop Cc: KOSAKI Motohiro Cc: Rik van Riel Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 1 + mm/vmscan.c | 44 ++++++++++++++++++++++++++++++++++++++++++-- mm/vmstat.c | 2 ++ 3 files changed, 45 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d85889710f9b..fd5be240c0b7 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -40,6 +40,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSCAN_ZONE_RECLAIM_FAILED, #endif PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, + KSWAPD_PREMATURE_FAST, KSWAPD_PREMATURE_SLOW, PAGEOUTRUN, ALLOCSTALL, PGROTATED, #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, diff --git a/mm/vmscan.c b/mm/vmscan.c index 61d3a9a0d96f..e176bd3936da 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1904,6 +1904,24 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, } #endif +/* is kswapd sleeping prematurely? */ +static int sleeping_prematurely(int order, long remaining) +{ + struct zone *zone; + + /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ + if (remaining) + return 1; + + /* If after HZ/10, a zone is below the high mark, it's premature */ + for_each_populated_zone(zone) + if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), + 0, 0)) + return 1; + + return 0; +} + /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at high_wmark_pages(zone). @@ -2185,8 +2203,30 @@ static int kswapd(void *p) */ order = new_order; } else { - if (!freezing(current) && !kthread_should_stop()) - schedule(); + if (!freezing(current) && !kthread_should_stop()) { + long remaining = 0; + + /* Try to sleep for a short interval */ + if (!sleeping_prematurely(order, remaining)) { + remaining = schedule_timeout(HZ/10); + finish_wait(&pgdat->kswapd_wait, &wait); + prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); + } + + /* + * After a short sleep, check if it was a + * premature sleep. If not, then go fully + * to sleep until explicitly woken up + */ + if (!sleeping_prematurely(order, remaining)) + schedule(); + else { + if (remaining) + count_vm_event(KSWAPD_PREMATURE_FAST); + else + count_vm_event(KSWAPD_PREMATURE_SLOW); + } + } order = pgdat->kswapd_max_order; } diff --git a/mm/vmstat.c b/mm/vmstat.c index dad2327e4580..63ab71455c5b 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -683,6 +683,8 @@ static const char * const vmstat_text[] = { "slabs_scanned", "kswapd_steal", "kswapd_inodesteal", + "kswapd_slept_prematurely_fast", + "kswapd_slept_prematurely_slow", "pageoutrun", "allocstall", -- cgit v1.2.3 From bb3ab596832b920c703d1aea1ce76d69c0f71fb7 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 14 Dec 2009 17:58:55 -0800 Subject: vmscan: stop kswapd waiting on congestion when the min watermark is not being met If reclaim fails to make sufficient progress, the priority is raised. Once the priority is higher, kswapd starts waiting on congestion. However, if the zone is below the min watermark then kswapd needs to continue working without delay as there is a danger of an increased rate of GFP_ATOMIC allocation failure. This patch changes the conditions under which kswapd waits on congestion by only going to sleep if the min watermarks are being met. [mel@csn.ul.ie: add stats to track how relevant the logic is] [mel@csn.ul.ie: make kswapd only check its own zones and rename the relevant counters] Signed-off-by: KOSAKI Motohiro Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 3 ++- mm/vmscan.c | 38 +++++++++++++++++++++++++++++--------- mm/vmstat.c | 5 +++-- 3 files changed, 34 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index fd5be240c0b7..ee03bba9c5df 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -40,7 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSCAN_ZONE_RECLAIM_FAILED, #endif PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, - KSWAPD_PREMATURE_FAST, KSWAPD_PREMATURE_SLOW, + KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, + KSWAPD_SKIP_CONGESTION_WAIT, PAGEOUTRUN, ALLOCSTALL, PGROTATED, #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, diff --git a/mm/vmscan.c b/mm/vmscan.c index e176bd3936da..cb69f717799f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1905,19 +1905,25 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, #endif /* is kswapd sleeping prematurely? */ -static int sleeping_prematurely(int order, long remaining) +static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) { - struct zone *zone; + int i; /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ if (remaining) return 1; /* If after HZ/10, a zone is below the high mark, it's premature */ - for_each_populated_zone(zone) + for (i = 0; i < pgdat->nr_zones; i++) { + struct zone *zone = pgdat->node_zones + i; + + if (!populated_zone(zone)) + continue; + if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 0, 0)) return 1; + } return 0; } @@ -1979,6 +1985,7 @@ loop_again: for (priority = DEF_PRIORITY; priority >= 0; priority--) { int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long lru_pages = 0; + int has_under_min_watermark_zone = 0; /* The swap token gets in the way of swapout... */ if (!priority) @@ -2085,6 +2092,15 @@ loop_again: if (total_scanned > SWAP_CLUSTER_MAX * 2 && total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) sc.may_writepage = 1; + + /* + * We are still under min water mark. it mean we have + * GFP_ATOMIC allocation failure risk. Hurry up! + */ + if (!zone_watermark_ok(zone, order, min_wmark_pages(zone), + end_zone, 0)) + has_under_min_watermark_zone = 1; + } if (all_zones_ok) break; /* kswapd: all done */ @@ -2092,8 +2108,12 @@ loop_again: * OK, kswapd is getting into trouble. Take a nap, then take * another pass across the zones. */ - if (total_scanned && priority < DEF_PRIORITY - 2) - congestion_wait(BLK_RW_ASYNC, HZ/10); + if (total_scanned && (priority < DEF_PRIORITY - 2)) { + if (has_under_min_watermark_zone) + count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); + else + congestion_wait(BLK_RW_ASYNC, HZ/10); + } /* * We do this so kswapd doesn't build up large priorities for @@ -2207,7 +2227,7 @@ static int kswapd(void *p) long remaining = 0; /* Try to sleep for a short interval */ - if (!sleeping_prematurely(order, remaining)) { + if (!sleeping_prematurely(pgdat, order, remaining)) { remaining = schedule_timeout(HZ/10); finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); @@ -2218,13 +2238,13 @@ static int kswapd(void *p) * premature sleep. If not, then go fully * to sleep until explicitly woken up */ - if (!sleeping_prematurely(order, remaining)) + if (!sleeping_prematurely(pgdat, order, remaining)) schedule(); else { if (remaining) - count_vm_event(KSWAPD_PREMATURE_FAST); + count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); else - count_vm_event(KSWAPD_PREMATURE_SLOW); + count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); } } diff --git a/mm/vmstat.c b/mm/vmstat.c index 63ab71455c5b..6051fbab67ba 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -683,8 +683,9 @@ static const char * const vmstat_text[] = { "slabs_scanned", "kswapd_steal", "kswapd_inodesteal", - "kswapd_slept_prematurely_fast", - "kswapd_slept_prematurely_slow", + "kswapd_low_wmark_hit_quickly", + "kswapd_high_wmark_hit_quickly", + "kswapd_skip_congestion_wait", "pageoutrun", "allocstall", -- cgit v1.2.3 From 3ca7b3c5b64d35fe02c35b5d44c2c58b49499fee Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:57 -0800 Subject: mm: define PAGE_MAPPING_FLAGS At present we define PageAnon(page) by the low PAGE_MAPPING_ANON bit set in page->mapping, with the higher bits a pointer to the anon_vma; and have defined PageKsm(page) as that with NULL anon_vma. But KSM swapping will need to store a pointer there: so in preparation for that, now define PAGE_MAPPING_FLAGS as the low two bits, including PAGE_MAPPING_KSM (always set along with PAGE_MAPPING_ANON, until some other use for the bit emerges). Declare page_rmapping(page) to return the pointer part of page->mapping, and page_anon_vma(page) to return the anon_vma pointer when that's what it is. Use these in a few appropriate places: notably, unuse_vma() has been testing page->mapping, but is better to be testing page_anon_vma() (cases may be added in which flag bits are set without any pointer). Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Cc: Nick Piggin Cc: KOSAKI Motohiro Reviewed-by: Rik van Riel Cc: Lee Schermerhorn Cc: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Wu Fengguang Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 5 +++-- include/linux/mm.h | 17 ++++++++++++++++- include/linux/rmap.h | 8 ++++++++ mm/migrate.c | 11 ++++------- mm/rmap.c | 7 +++---- mm/swapfile.c | 2 +- 6 files changed, 35 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index a485c14ecd5d..1401a313fa77 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -38,7 +38,8 @@ static inline void ksm_exit(struct mm_struct *mm) */ static inline int PageKsm(struct page *page) { - return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); } /* @@ -47,7 +48,7 @@ static inline int PageKsm(struct page *page) static inline void page_add_ksm_rmap(struct page *page) { if (atomic_inc_and_test(&page->_mapcount)) { - page->mapping = (void *) PAGE_MAPPING_ANON; + page->mapping = (void *) (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); __inc_zone_page_state(page, NR_ANON_PAGES); } } diff --git a/include/linux/mm.h b/include/linux/mm.h index 24c395694f4d..1202cd3121e1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -620,13 +620,22 @@ void page_address_init(void); /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; - * with the PAGE_MAPPING_ANON bit set to distinguish it. + * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. + * + * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, + * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; + * and then page->mapping points, not to an anon_vma, but to a private + * structure which KSM associates with that merged page. See ksm.h. + * + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ #define PAGE_MAPPING_ANON 1 +#define PAGE_MAPPING_KSM 2 +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) extern struct address_space swapper_space; static inline struct address_space *page_mapping(struct page *page) @@ -644,6 +653,12 @@ static inline struct address_space *page_mapping(struct page *page) return mapping; } +/* Neutral page->mapping pointer to address_space or anon_vma or other */ +static inline void *page_rmapping(struct page *page) +{ + return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); +} + static inline int PageAnon(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index cb0ba7032609..1f65af44c6d2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -39,6 +39,14 @@ struct anon_vma { #ifdef CONFIG_MMU +static inline struct anon_vma *page_anon_vma(struct page *page) +{ + if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != + PAGE_MAPPING_ANON) + return NULL; + return page_rmapping(page); +} + static inline void anon_vma_lock(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; diff --git a/mm/migrate.c b/mm/migrate.c index 576c25eeb1ca..367272d04423 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -172,17 +172,14 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new) { struct anon_vma *anon_vma; struct vm_area_struct *vma; - unsigned long mapping; - - mapping = (unsigned long)new->mapping; - - if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) - return; /* * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. */ - anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); + anon_vma = page_anon_vma(new); + if (!anon_vma) + return; + spin_lock(&anon_vma->lock); list_for_each_entry(vma, &anon_vma->head, anon_vma_node) diff --git a/mm/rmap.c b/mm/rmap.c index 1a0ee6e634c2..f06cee48eca7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -203,7 +203,7 @@ struct anon_vma *page_lock_anon_vma(struct page *page) rcu_read_lock(); anon_mapping = (unsigned long) page->mapping; - if (!(anon_mapping & PAGE_MAPPING_ANON)) + if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!page_mapped(page)) goto out; @@ -248,8 +248,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { if (PageAnon(page)) { - if ((void *)vma->anon_vma != - (void *)page->mapping - PAGE_MAPPING_ANON) + if (vma->anon_vma != page_anon_vma(page)) return -EFAULT; } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { if (!vma->vm_file || @@ -513,7 +512,7 @@ int page_referenced(struct page *page, referenced++; *vm_flags = 0; - if (page_mapped(page) && page->mapping) { + if (page_mapped(page) && page_rmapping(page)) { if (PageAnon(page)) referenced += page_referenced_anon(page, mem_cont, vm_flags); diff --git a/mm/swapfile.c b/mm/swapfile.c index d5eb2e85600b..e74112e8e5f4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -938,7 +938,7 @@ static int unuse_vma(struct vm_area_struct *vma, unsigned long addr, end, next; int ret; - if (page->mapping) { + if (page_anon_vma(page)) { addr = page_address_in_vma(page, vma); if (addr == -EFAULT) return 0; -- cgit v1.2.3 From af8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:59 -0800 Subject: mm: CONFIG_MMU for PG_mlocked Remove three degrees of obfuscation, left over from when we had CONFIG_UNEVICTABLE_LRU. MLOCK_PAGES is CONFIG_HAVE_MLOCKED_PAGE_BIT is CONFIG_HAVE_MLOCK is CONFIG_MMU. rmap.o (and memory-failure.o) are only built when CONFIG_MMU, so don't need such conditions at all. Somehow, I feel no compulsion to remove the CONFIG_HAVE_MLOCK* lines from 169 defconfigs: leave those to evolve in due course. Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Cc: Nick Piggin Reviewed-by: KOSAKI Motohiro Cc: Rik van Riel Cc: Lee Schermerhorn Cc: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Wu Fengguang Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 8 +++----- mm/Kconfig | 8 -------- mm/internal.h | 26 ++++++++++++-------------- mm/memory-failure.c | 2 -- mm/page_alloc.c | 4 ---- mm/rmap.c | 15 ++++----------- 6 files changed, 19 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b202b173955..49e907bd067f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -99,7 +99,7 @@ enum pageflags { PG_buddy, /* Page is free, on buddy lists */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT +#ifdef CONFIG_MMU PG_mlocked, /* Page is vma mlocked */ #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED @@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache) PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) TESTCLEARFLAG(Unevictable, unevictable) -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT -#define MLOCK_PAGES 1 +#ifdef CONFIG_MMU PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) #else -#define MLOCK_PAGES 0 PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) #endif @@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page) #endif /* !PAGEFLAGS_EXTENDED */ -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT +#ifdef CONFIG_MMU #define __PG_MLOCKED (1 << PG_mlocked) #else #define __PG_MLOCKED 0 diff --git a/mm/Kconfig b/mm/Kconfig index 44cf6f0a3a6d..77b4980d6143 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -200,14 +200,6 @@ config VIRT_TO_BUS def_bool y depends on !ARCH_NO_VIRT_TO_BUS -config HAVE_MLOCK - bool - default y if MMU=y - -config HAVE_MLOCKED_PAGE_BIT - bool - default y if HAVE_MLOCK=y - config MMU_NOTIFIER bool diff --git a/mm/internal.h b/mm/internal.h index 22ec8d2b0fb8..cb7d92d0a46d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page) return page_private(page); } -#ifdef CONFIG_HAVE_MLOCK -extern long mlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -extern void munlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -static inline void munlock_vma_pages_all(struct vm_area_struct *vma) -{ - munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); -} -#endif - /* * unevictable_migrate_page() called only from migrate_page_copy() to * migrate unevictable flag to new page. @@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) SetPageUnevictable(new); } -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT +#ifdef CONFIG_MMU +extern long mlock_vma_pages_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void munlock_vma_pages_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +static inline void munlock_vma_pages_all(struct vm_area_struct *vma) +{ + munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); +} + /* * Called only in fault path via page_evictable() for a new page * to determine if it's being mapped into a LOCKED vma. @@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) } } -#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#else /* !CONFIG_MMU */ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) { return 0; @@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } -#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#endif /* !CONFIG_MMU */ /* * Return the mem_map entry representing the 'offset' subpage within diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1ac49fef95ab..50d4f8d7024a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -582,10 +582,8 @@ static struct page_state { { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, { unevict, unevict, "unevictable LRU", me_pagecache_clean}, -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, { mlock, mlock, "mlocked LRU", me_pagecache_clean }, -#endif { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, { lru|dirty, lru, "clean LRU", me_pagecache_clean }, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2bc2ac63f41e..59d2e88fb47c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page, zone->free_area[order].nr_free++; } -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT /* * free_page_mlock() -- clean up attempts to free and mlocked() page. * Page should not be on lru, so no need to fix that up. @@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page) __dec_zone_page_state(page, NR_MLOCK); __count_vm_event(UNEVICTABLE_MLOCKFREED); } -#else -static void free_page_mlock(struct page *page) { } -#endif static inline int free_pages_check(struct page *page) { diff --git a/mm/rmap.c b/mm/rmap.c index c3d6dc4223a4..eb3dfc8355ea 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -788,7 +788,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ret = SWAP_MLOCK; goto out_unmap; } - if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK) + if (TTU_ACTION(flags) == TTU_MUNLOCK) goto out_unmap; } if (!(flags & TTU_IGNORE_ACCESS)) { @@ -861,7 +861,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, out_unmap: pte_unmap_unlock(pte, ptl); - if (MLOCK_PAGES && ret == SWAP_MLOCK) { + if (ret == SWAP_MLOCK) { ret = SWAP_AGAIN; if (down_read_trylock(&vma->vm_mm->mmap_sem)) { if (vma->vm_flags & VM_LOCKED) { @@ -938,11 +938,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, return ret; /* - * MLOCK_PAGES => feature is configured. - * if we can acquire the mmap_sem for read, and vma is VM_LOCKED, + * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, * keep the sem while scanning the cluster for mlocking pages. */ - if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { + if (down_read_trylock(&vma->vm_mm->mmap_sem)) { locked_vma = (vma->vm_flags & VM_LOCKED); if (!locked_vma) up_read(&vma->vm_mm->mmap_sem); /* don't need it */ @@ -1075,9 +1074,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && - (vma->vm_flags & VM_LOCKED)) - continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) max_nl_cursor = cursor; @@ -1110,9 +1106,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && - (vma->vm_flags & VM_LOCKED)) - continue; cursor = (unsigned long) vma->vm_private_data; while ( cursor < max_nl_cursor && cursor < vma->vm_end - vma->vm_start) { -- cgit v1.2.3 From 08beca44dfb0ab008e365163df70dbd302ae1508 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:59:21 -0800 Subject: ksm: stable_node point to page and back Add a pointer to the ksm page into struct stable_node, holding a reference to the page while the node exists. Put a pointer to the stable_node into the ksm page's ->mapping. Then we don't need get_ksm_page() while traversing the stable tree: the page to compare against is sure to be present and correct, even if it's no longer visible through any of its existing rmap_items. And we can handle the forked ksm page case more efficiently: no need to memcmp our way through the tree to find its match. Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 24 +++++++++---- mm/ksm.c | 99 ++++++++++++++++++----------------------------------- 2 files changed, 51 insertions(+), 72 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 1401a313fa77..ef55ce14a2ce 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,6 +12,8 @@ #include #include +struct stable_node; + #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); @@ -34,7 +36,8 @@ static inline void ksm_exit(struct mm_struct *mm) /* * A KSM page is one of those write-protected "shared pages" or "merged pages" * which KSM maps into multiple mms, wherever identical anonymous page content - * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. + * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any + * anon_vma, but to that page's node of the stable tree. */ static inline int PageKsm(struct page *page) { @@ -42,15 +45,22 @@ static inline int PageKsm(struct page *page) (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); } -/* - * But we have to avoid the checking which page_add_anon_rmap() performs. - */ +static inline struct stable_node *page_stable_node(struct page *page) +{ + return PageKsm(page) ? page_rmapping(page) : NULL; +} + +static inline void set_page_stable_node(struct page *page, + struct stable_node *stable_node) +{ + page->mapping = (void *)stable_node + + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); +} + static inline void page_add_ksm_rmap(struct page *page) { - if (atomic_inc_and_test(&page->_mapcount)) { - page->mapping = (void *) (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + if (atomic_inc_and_test(&page->_mapcount)) __inc_zone_page_state(page, NR_ANON_PAGES); - } } #else /* !CONFIG_KSM */ diff --git a/mm/ksm.c b/mm/ksm.c index 9b7af2eb4280..748785683399 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -107,10 +107,12 @@ struct ksm_scan { /** * struct stable_node - node of the stable rbtree + * @page: pointer to struct page of the ksm page * @node: rb node of this ksm page in the stable tree * @hlist: hlist head of rmap_items using this ksm page */ struct stable_node { + struct page *page; struct rb_node node; struct hlist_head hlist; }; @@ -434,23 +436,6 @@ out: page = NULL; return page; } -/* - * get_ksm_page: checks if the page at the virtual address in rmap_item - * is still PageKsm, in which case we can trust the content of the page, - * and it returns the gotten page; but NULL if the page has been zapped. - */ -static struct page *get_ksm_page(struct rmap_item *rmap_item) -{ - struct page *page; - - page = get_mergeable_page(rmap_item); - if (page && !PageKsm(page)) { - put_page(page); - page = NULL; - } - return page; -} - /* * Removing rmap_item from stable or unstable tree. * This function will clean the information from the stable/unstable tree. @@ -465,6 +450,9 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) if (stable_node->hlist.first) ksm_pages_sharing--; else { + set_page_stable_node(stable_node->page, NULL); + put_page(stable_node->page); + rb_erase(&stable_node->node, &root_stable_tree); free_stable_node(stable_node); ksm_pages_shared--; @@ -740,8 +728,7 @@ out: * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page * @page: the PageAnon page that we want to replace with kpage - * @kpage: the PageKsm page (or newly allocated page which page_add_ksm_rmap - * will make PageKsm) that we want to map instead of page + * @kpage: the PageKsm page that we want to map instead of page * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ @@ -793,6 +780,9 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, struct vm_area_struct *vma; int err = -EFAULT; + if (page == kpage) /* ksm page forked */ + return 0; + down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) goto out; @@ -846,6 +836,9 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, goto up; copy_user_highpage(kpage, page, rmap_item->address, vma); + + set_page_stable_node(kpage, NULL); /* mark it PageKsm */ + err = try_to_merge_one_page(vma, page, kpage); up: up_read(&mm->mmap_sem); @@ -876,41 +869,31 @@ up: * This function returns the stable tree node of identical content if found, * NULL otherwise. */ -static struct stable_node *stable_tree_search(struct page *page, - struct page **tree_pagep) +static struct stable_node *stable_tree_search(struct page *page) { struct rb_node *node = root_stable_tree.rb_node; struct stable_node *stable_node; + stable_node = page_stable_node(page); + if (stable_node) { /* ksm page forked */ + get_page(page); + return stable_node; + } + while (node) { - struct hlist_node *hlist, *hnext; - struct rmap_item *tree_rmap_item; - struct page *tree_page; int ret; + cond_resched(); stable_node = rb_entry(node, struct stable_node, node); - hlist_for_each_entry_safe(tree_rmap_item, hlist, hnext, - &stable_node->hlist, hlist) { - BUG_ON(!in_stable_tree(tree_rmap_item)); - cond_resched(); - tree_page = get_ksm_page(tree_rmap_item); - if (tree_page) - break; - remove_rmap_item_from_tree(tree_rmap_item); - } - if (!hlist) - return NULL; - ret = memcmp_pages(page, tree_page); + ret = memcmp_pages(page, stable_node->page); - if (ret < 0) { - put_page(tree_page); + if (ret < 0) node = node->rb_left; - } else if (ret > 0) { - put_page(tree_page); + else if (ret > 0) node = node->rb_right; - } else { - *tree_pagep = tree_page; + else { + get_page(stable_node->page); return stable_node; } } @@ -932,26 +915,12 @@ static struct stable_node *stable_tree_insert(struct page *kpage) struct stable_node *stable_node; while (*new) { - struct hlist_node *hlist, *hnext; - struct rmap_item *tree_rmap_item; - struct page *tree_page; int ret; + cond_resched(); stable_node = rb_entry(*new, struct stable_node, node); - hlist_for_each_entry_safe(tree_rmap_item, hlist, hnext, - &stable_node->hlist, hlist) { - BUG_ON(!in_stable_tree(tree_rmap_item)); - cond_resched(); - tree_page = get_ksm_page(tree_rmap_item); - if (tree_page) - break; - remove_rmap_item_from_tree(tree_rmap_item); - } - if (!hlist) - return NULL; - ret = memcmp_pages(kpage, tree_page); - put_page(tree_page); + ret = memcmp_pages(kpage, stable_node->page); parent = *new; if (ret < 0) @@ -977,6 +946,10 @@ static struct stable_node *stable_tree_insert(struct page *kpage) INIT_HLIST_HEAD(&stable_node->hlist); + get_page(kpage); + stable_node->page = kpage; + set_page_stable_node(kpage, stable_node); + return stable_node; } @@ -1085,14 +1058,10 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) remove_rmap_item_from_tree(rmap_item); /* We first start with searching the page inside the stable tree */ - stable_node = stable_tree_search(page, &tree_page); + stable_node = stable_tree_search(page); if (stable_node) { - kpage = tree_page; - if (page == kpage) /* forked */ - err = 0; - else - err = try_to_merge_with_ksm_page(rmap_item, - page, kpage); + kpage = stable_node->page; + err = try_to_merge_with_ksm_page(rmap_item, page, kpage); if (!err) { /* * The page was successfully merged: -- cgit v1.2.3 From 5ad6468801d28c4d4ac9f48ec19297817c915f6a Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:59:24 -0800 Subject: ksm: let shared pages be swappable Initial implementation for swapping out KSM's shared pages: add page_referenced_ksm() and try_to_unmap_ksm(), which rmap.c calls when faced with a PageKsm page. Most of what's needed can be got from the rmap_items listed from the stable_node of the ksm page, without discovering the actual vma: so in this patch just fake up a struct vma for page_referenced_one() or try_to_unmap_one(), then refine that in the next patch. Add VM_NONLINEAR to ksm_madvise()'s list of exclusions: it has always been implicit there (being only set with VM_SHARED, already excluded), but let's make it explicit, to help justify the lack of nonlinear unmap. Rely on the page lock to protect against concurrent modifications to that page's node of the stable tree. The awkward part is not swapout but swapin: do_swap_page() and page_add_anon_rmap() now have to allow for new possibilities - perhaps a ksm page still in swapcache, perhaps a swapcache page associated with one location in one anon_vma now needed for another location or anon_vma. (And the vma might even be no longer VM_MERGEABLE when that happens.) ksm_might_need_to_copy() checks for that case, and supplies a duplicate page when necessary, simply leaving it to a subsequent pass of ksmd to rediscover the identity and merge them back into one ksm page. Disappointingly primitive: but the alternative would have to accumulate unswappable info about the swapped out ksm pages, limiting swappability. Remove page_add_ksm_rmap(): page_add_anon_rmap() now has to allow for the particular case it was handling, so just use it instead. Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Cc: Chris Wright Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 54 ++++++++++++++-- include/linux/rmap.h | 5 ++ mm/ksm.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++----- mm/memory.c | 6 ++ mm/rmap.c | 65 +++++++++++-------- mm/swapfile.c | 11 +++- 6 files changed, 264 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index ef55ce14a2ce..157d83dbaef8 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -9,10 +9,12 @@ #include #include +#include +#include #include -#include struct stable_node; +struct mem_cgroup; #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -57,11 +59,36 @@ static inline void set_page_stable_node(struct page *page, (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); } -static inline void page_add_ksm_rmap(struct page *page) +/* + * When do_swap_page() first faults in from swap what used to be a KSM page, + * no problem, it will be assigned to this vma's anon_vma; but thereafter, + * it might be faulted into a different anon_vma (or perhaps to a different + * offset in the same anon_vma). do_swap_page() cannot do all the locking + * needed to reconstitute a cross-anon_vma KSM page: for now it has to make + * a copy, and leave remerging the pages to a later pass of ksmd. + * + * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, + * but what if the vma was unmerged while the page was swapped out? + */ +struct page *ksm_does_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address); +static inline struct page *ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) { - if (atomic_inc_and_test(&page->_mapcount)) - __inc_zone_page_state(page, NR_ANON_PAGES); + struct anon_vma *anon_vma = page_anon_vma(page); + + if (!anon_vma || + (anon_vma == vma->anon_vma && + page->index == linear_page_index(vma, address))) + return page; + + return ksm_does_need_to_copy(page, vma, address); } + +int page_referenced_ksm(struct page *page, + struct mem_cgroup *memcg, unsigned long *vm_flags); +int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); + #else /* !CONFIG_KSM */ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -84,7 +111,22 @@ static inline int PageKsm(struct page *page) return 0; } -/* No stub required for page_add_ksm_rmap(page) */ +static inline struct page *ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + return page; +} + +static inline int page_referenced_ksm(struct page *page, + struct mem_cgroup *memcg, unsigned long *vm_flags) +{ + return 0; +} + +static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) +{ + return 0; +} #endif /* !CONFIG_KSM */ -#endif +#endif /* __LINUX_KSM_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1f65af44c6d2..0b4913a4a344 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -89,6 +89,9 @@ static inline void page_dup_rmap(struct page *page) */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt, unsigned long *vm_flags); +int page_referenced_one(struct page *, struct vm_area_struct *, + unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); + enum ttu_flags { TTU_UNMAP = 0, /* unmap mode */ TTU_MIGRATION = 1, /* migration mode */ @@ -102,6 +105,8 @@ enum ttu_flags { #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) int try_to_unmap(struct page *, enum ttu_flags flags); +int try_to_unmap_one(struct page *, struct vm_area_struct *, + unsigned long address, enum ttu_flags flags); /* * Called from mm/filemap_xip.c to unmap empty zero page diff --git a/mm/ksm.c b/mm/ksm.c index af5f571185d5..2f58ceebfe8f 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -196,6 +196,13 @@ static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); +/* + * Temporary hack for page_referenced_ksm() and try_to_unmap_ksm(), + * later we rework things a little to get the right vma to them. + */ +static DEFINE_SPINLOCK(ksm_fallback_vma_lock); +static struct vm_area_struct ksm_fallback_vma; + #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) @@ -445,14 +452,20 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct stable_node *stable_node; + struct page *page; stable_node = rmap_item->head; + page = stable_node->page; + lock_page(page); + hlist_del(&rmap_item->hlist); - if (stable_node->hlist.first) + if (stable_node->hlist.first) { + unlock_page(page); ksm_pages_sharing--; - else { - set_page_stable_node(stable_node->page, NULL); - put_page(stable_node->page); + } else { + set_page_stable_node(page, NULL); + unlock_page(page); + put_page(page); rb_erase(&stable_node->node, &root_stable_tree); free_stable_node(stable_node); @@ -710,7 +723,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, } get_page(kpage); - page_add_ksm_rmap(kpage); + page_add_anon_rmap(kpage, vma, addr); flush_cache_page(vma, addr, pte_pfn(*ptep)); ptep_clear_flush(vma, addr, ptep); @@ -763,8 +776,16 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, pages_identical(page, kpage)) err = replace_page(vma, page, kpage, orig_pte); - if ((vma->vm_flags & VM_LOCKED) && !err) + if ((vma->vm_flags & VM_LOCKED) && !err) { munlock_vma_page(page); + if (!PageMlocked(kpage)) { + unlock_page(page); + lru_add_drain(); + lock_page(kpage); + mlock_vma_page(kpage); + page = kpage; /* for final unlock */ + } + } unlock_page(page); out: @@ -841,7 +862,11 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, copy_user_highpage(kpage, page, rmap_item->address, vma); + SetPageDirty(kpage); + __SetPageUptodate(kpage); + SetPageSwapBacked(kpage); set_page_stable_node(kpage, NULL); /* mark it PageKsm */ + lru_cache_add_lru(kpage, LRU_ACTIVE_ANON); err = try_to_merge_one_page(vma, page, kpage); up: @@ -1071,7 +1096,9 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) * The page was successfully merged: * add its rmap_item to the stable tree. */ + lock_page(kpage); stable_tree_append(rmap_item, stable_node); + unlock_page(kpage); } put_page(kpage); return; @@ -1112,11 +1139,13 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) if (kpage) { remove_rmap_item_from_tree(tree_rmap_item); + lock_page(kpage); stable_node = stable_tree_insert(kpage); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node); stable_tree_append(rmap_item, stable_node); } + unlock_page(kpage); put_page(kpage); /* @@ -1285,14 +1314,6 @@ static void ksm_do_scan(unsigned int scan_npages) return; if (!PageKsm(page) || !in_stable_tree(rmap_item)) cmp_and_merge_page(page, rmap_item); - else if (page_mapcount(page) == 1) { - /* - * Replace now-unshared ksm page by ordinary page. - */ - break_cow(rmap_item); - remove_rmap_item_from_tree(rmap_item); - rmap_item->oldchecksum = calc_checksum(page); - } put_page(page); } } @@ -1337,7 +1358,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | - VM_MIXEDMAP | VM_SAO)) + VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) return 0; /* just ignore the advice */ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { @@ -1435,6 +1456,127 @@ void __ksm_exit(struct mm_struct *mm) } } +struct page *ksm_does_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + struct page *new_page; + + unlock_page(page); /* any racers will COW it, not modify it */ + + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + if (new_page) { + copy_user_highpage(new_page, page, address, vma); + + SetPageDirty(new_page); + __SetPageUptodate(new_page); + SetPageSwapBacked(new_page); + __set_page_locked(new_page); + + if (page_evictable(new_page, vma)) + lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); + else + add_page_to_unevictable_list(new_page); + } + + page_cache_release(page); + return new_page; +} + +int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, + unsigned long *vm_flags) +{ + struct stable_node *stable_node; + struct rmap_item *rmap_item; + struct hlist_node *hlist; + unsigned int mapcount = page_mapcount(page); + int referenced = 0; + struct vm_area_struct *vma; + + VM_BUG_ON(!PageKsm(page)); + VM_BUG_ON(!PageLocked(page)); + + stable_node = page_stable_node(page); + if (!stable_node) + return 0; + + /* + * Temporary hack: really we need anon_vma in rmap_item, to + * provide the correct vma, and to find recently forked instances. + * Use zalloc to avoid weirdness if any other fields are involved. + */ + vma = kmem_cache_zalloc(vm_area_cachep, GFP_ATOMIC); + if (!vma) { + spin_lock(&ksm_fallback_vma_lock); + vma = &ksm_fallback_vma; + } + + hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + if (memcg && !mm_match_cgroup(rmap_item->mm, memcg)) + continue; + + vma->vm_mm = rmap_item->mm; + vma->vm_start = rmap_item->address; + vma->vm_end = vma->vm_start + PAGE_SIZE; + + referenced += page_referenced_one(page, vma, + rmap_item->address, &mapcount, vm_flags); + if (!mapcount) + goto out; + } +out: + if (vma == &ksm_fallback_vma) + spin_unlock(&ksm_fallback_vma_lock); + else + kmem_cache_free(vm_area_cachep, vma); + return referenced; +} + +int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) +{ + struct stable_node *stable_node; + struct hlist_node *hlist; + struct rmap_item *rmap_item; + int ret = SWAP_AGAIN; + struct vm_area_struct *vma; + + VM_BUG_ON(!PageKsm(page)); + VM_BUG_ON(!PageLocked(page)); + + stable_node = page_stable_node(page); + if (!stable_node) + return SWAP_FAIL; + + /* + * Temporary hack: really we need anon_vma in rmap_item, to + * provide the correct vma, and to find recently forked instances. + * Use zalloc to avoid weirdness if any other fields are involved. + */ + if (TTU_ACTION(flags) != TTU_UNMAP) + return SWAP_FAIL; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_ATOMIC); + if (!vma) { + spin_lock(&ksm_fallback_vma_lock); + vma = &ksm_fallback_vma; + } + + hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + vma->vm_mm = rmap_item->mm; + vma->vm_start = rmap_item->address; + vma->vm_end = vma->vm_start + PAGE_SIZE; + + ret = try_to_unmap_one(page, vma, rmap_item->address, flags); + if (ret != SWAP_AGAIN || !page_mapped(page)) + goto out; + } +out: + if (vma == &ksm_fallback_vma) + spin_unlock(&ksm_fallback_vma_lock); + else + kmem_cache_free(vm_area_cachep, vma); + return ret; +} + #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. diff --git a/mm/memory.c b/mm/memory.c index 1c9dc46da3db..a54b2c498444 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2561,6 +2561,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + page = ksm_might_need_to_copy(page, vma, address); + if (!page) { + ret = VM_FAULT_OOM; + goto out; + } + if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { ret = VM_FAULT_OOM; goto out_page; diff --git a/mm/rmap.c b/mm/rmap.c index ebee81688736..869aaa3206a2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -336,9 +337,9 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) * Subfunctions of page_referenced: page_referenced_one called * repeatedly from either page_referenced_anon or page_referenced_file. */ -static int page_referenced_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, unsigned int *mapcount, - unsigned long *vm_flags) +int page_referenced_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, unsigned int *mapcount, + unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; pte_t *pte; @@ -507,28 +508,33 @@ int page_referenced(struct page *page, unsigned long *vm_flags) { int referenced = 0; + int we_locked = 0; if (TestClearPageReferenced(page)) referenced++; *vm_flags = 0; if (page_mapped(page) && page_rmapping(page)) { - if (PageAnon(page)) + if (!is_locked && (!PageAnon(page) || PageKsm(page))) { + we_locked = trylock_page(page); + if (!we_locked) { + referenced++; + goto out; + } + } + if (unlikely(PageKsm(page))) + referenced += page_referenced_ksm(page, mem_cont, + vm_flags); + else if (PageAnon(page)) referenced += page_referenced_anon(page, mem_cont, vm_flags); - else if (is_locked) + else if (page->mapping) referenced += page_referenced_file(page, mem_cont, vm_flags); - else if (!trylock_page(page)) - referenced++; - else { - if (page->mapping) - referenced += page_referenced_file(page, - mem_cont, vm_flags); + if (we_locked) unlock_page(page); - } } - +out: if (page_test_and_clear_young(page)) referenced++; @@ -620,14 +626,7 @@ static void __page_set_anon_rmap(struct page *page, BUG_ON(!anon_vma); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; - page->index = linear_page_index(vma, address); - - /* - * nr_mapped state can be updated without turning off - * interrupts because it is not modified via interrupt. - */ - __inc_zone_page_state(page, NR_ANON_PAGES); } /** @@ -665,14 +664,21 @@ static void __page_check_anon_rmap(struct page *page, * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * The caller needs to hold the pte lock and the page must be locked. + * The caller needs to hold the pte lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting. */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + int first = atomic_inc_and_test(&page->_mapcount); + if (first) + __inc_zone_page_state(page, NR_ANON_PAGES); + if (unlikely(PageKsm(page))) + return; + VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); - if (atomic_inc_and_test(&page->_mapcount)) + if (first) __page_set_anon_rmap(page, vma, address); else __page_check_anon_rmap(page, vma, address); @@ -694,6 +700,7 @@ void page_add_new_anon_rmap(struct page *page, VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ + __inc_zone_page_state(page, NR_ANON_PAGES); __page_set_anon_rmap(page, vma, address); if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE_ANON); @@ -760,8 +767,8 @@ void page_remove_rmap(struct page *page) * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, enum ttu_flags flags) +int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, enum ttu_flags flags) { struct mm_struct *mm = vma->vm_mm; pte_t *pte; @@ -1156,7 +1163,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) BUG_ON(!PageLocked(page)); - if (PageAnon(page)) + if (unlikely(PageKsm(page))) + ret = try_to_unmap_ksm(page, flags); + else if (PageAnon(page)) ret = try_to_unmap_anon(page, flags); else ret = try_to_unmap_file(page, flags); @@ -1177,15 +1186,17 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) * * SWAP_AGAIN - no vma is holding page mlocked, or, * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem + * SWAP_FAIL - page cannot be located at present * SWAP_MLOCK - page is now mlocked. */ int try_to_munlock(struct page *page) { VM_BUG_ON(!PageLocked(page) || PageLRU(page)); - if (PageAnon(page)) + if (unlikely(PageKsm(page))) + return try_to_unmap_ksm(page, TTU_MUNLOCK); + else if (PageAnon(page)) return try_to_unmap_anon(page, TTU_MUNLOCK); else return try_to_unmap_file(page, TTU_MUNLOCK); } - diff --git a/mm/swapfile.c b/mm/swapfile.c index e74112e8e5f4..6c0585b16418 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -650,6 +651,8 @@ int reuse_swap_page(struct page *page) int count; VM_BUG_ON(!PageLocked(page)); + if (unlikely(PageKsm(page))) + return 0; count = page_mapcount(page); if (count <= 1 && PageSwapCache(page)) { count += page_swapcount(page); @@ -658,7 +661,7 @@ int reuse_swap_page(struct page *page) SetPageDirty(page); } } - return count == 1; + return count <= 1; } /* @@ -1185,6 +1188,12 @@ static int try_to_unuse(unsigned int type) * read from disk into another page. Splitting into two * pages would be incorrect if swap supported "shared * private" pages, but they are handled by tmpfs files. + * + * Given how unuse_vma() targets one particular offset + * in an anon_vma, once the anon_vma has been determined, + * this splitting happens to be just what is needed to + * handle where KSM pages have been swapped out: re-reading + * is unnecessarily slow, but we can fix that later on. */ if (swap_count(*swap_map) && PageDirty(page) && PageSwapCache(page)) { -- cgit v1.2.3 From db114b83ab6064d9b1d6ec5650e096c89bd95e25 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:59:25 -0800 Subject: ksm: hold anon_vma in rmap_item For full functionality, page_referenced_one() and try_to_unmap_one() need to know the vma: to pass vma down to arch-dependent flushes, or to observe VM_LOCKED or VM_EXEC. But KSM keeps no record of vma: nor can it, since vmas get split and merged without its knowledge. Instead, note page's anon_vma in its rmap_item when adding to stable tree: all the vmas which might map that page are listed by its anon_vma. page_referenced_ksm() and try_to_unmap_ksm() then traverse the anon_vma, first to find the probable vma, that which matches rmap_item's mm; but if that is not enough to locate all instances, traverse again to try the others. This catches those occasions when fork has duplicated a pte of a ksm page, but ksmd has not yet come around to assign it an rmap_item. But each rmap_item in the stable tree which refers to an anon_vma needs to take a reference to it. Andrea's anon_vma design cleverly avoided a reference count (an anon_vma was free when its list of vmas was empty), but KSM now needs to add that. Is a 32-bit count sufficient? I believe so - the anon_vma is only free when both count is 0 and list is empty. Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Cc: Chris Wright Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 24 ++++++++ mm/ksm.c | 157 +++++++++++++++++++++++++++++++-------------------- mm/rmap.c | 5 +- 3 files changed, 122 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 0b4913a4a344..980094a527ee 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -26,6 +26,9 @@ */ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ +#ifdef CONFIG_KSM + atomic_t ksm_refcount; +#endif /* * NOTE: the LSB of the head.next is set by * mm_take_all_locks() _after_ taking the above lock. So the @@ -38,6 +41,26 @@ struct anon_vma { }; #ifdef CONFIG_MMU +#ifdef CONFIG_KSM +static inline void ksm_refcount_init(struct anon_vma *anon_vma) +{ + atomic_set(&anon_vma->ksm_refcount, 0); +} + +static inline int ksm_refcount(struct anon_vma *anon_vma) +{ + return atomic_read(&anon_vma->ksm_refcount); +} +#else +static inline void ksm_refcount_init(struct anon_vma *anon_vma) +{ +} + +static inline int ksm_refcount(struct anon_vma *anon_vma) +{ + return 0; +} +#endif /* CONFIG_KSM */ static inline struct anon_vma *page_anon_vma(struct page *page) { @@ -70,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); void anon_vma_unlink(struct vm_area_struct *); void anon_vma_link(struct vm_area_struct *); void __anon_vma_link(struct vm_area_struct *); +void anon_vma_free(struct anon_vma *); /* * rmap interfaces called when adding or removing pte of page diff --git a/mm/ksm.c b/mm/ksm.c index 2f58ceebfe8f..f7d121c42d01 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -121,7 +121,7 @@ struct stable_node { /** * struct rmap_item - reverse mapping item for virtual addresses * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list - * @filler: unused space we're making available in this patch + * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree * @mm: the memory structure this rmap_item is pointing into * @address: the virtual address this rmap_item tracks (+ flags in low bits) * @oldchecksum: previous checksum of the page at that virtual address @@ -131,7 +131,7 @@ struct stable_node { */ struct rmap_item { struct rmap_item *rmap_list; - unsigned long filler; + struct anon_vma *anon_vma; /* when stable */ struct mm_struct *mm; unsigned long address; /* + low bits used for flags below */ unsigned int oldchecksum; /* when unstable */ @@ -196,13 +196,6 @@ static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); -/* - * Temporary hack for page_referenced_ksm() and try_to_unmap_ksm(), - * later we rework things a little to get the right vma to them. - */ -static DEFINE_SPINLOCK(ksm_fallback_vma_lock); -static struct vm_area_struct ksm_fallback_vma; - #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) @@ -323,6 +316,25 @@ static inline int in_stable_tree(struct rmap_item *rmap_item) return rmap_item->address & STABLE_FLAG; } +static void hold_anon_vma(struct rmap_item *rmap_item, + struct anon_vma *anon_vma) +{ + rmap_item->anon_vma = anon_vma; + atomic_inc(&anon_vma->ksm_refcount); +} + +static void drop_anon_vma(struct rmap_item *rmap_item) +{ + struct anon_vma *anon_vma = rmap_item->anon_vma; + + if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) { + int empty = list_empty(&anon_vma->head); + spin_unlock(&anon_vma->lock); + if (empty) + anon_vma_free(anon_vma); + } +} + /* * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's * page tables after it has passed through ksm_exit() - which, if necessary, @@ -472,6 +484,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) ksm_pages_shared--; } + drop_anon_vma(rmap_item); rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { @@ -752,6 +765,9 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, pte_t orig_pte = __pte(0); int err = -EFAULT; + if (page == kpage) /* ksm page forked */ + return 0; + if (!(vma->vm_flags & VM_MERGEABLE)) goto out; if (!PageAnon(page)) @@ -805,9 +821,6 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, struct vm_area_struct *vma; int err = -EFAULT; - if (page == kpage) /* ksm page forked */ - return 0; - down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) goto out; @@ -816,6 +829,11 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, goto out; err = try_to_merge_one_page(vma, page, kpage); + if (err) + goto out; + + /* Must get reference to anon_vma while still holding mmap_sem */ + hold_anon_vma(rmap_item, vma->anon_vma); out: up_read(&mm->mmap_sem); return err; @@ -869,6 +887,11 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, lru_cache_add_lru(kpage, LRU_ACTIVE_ANON); err = try_to_merge_one_page(vma, page, kpage); + if (err) + goto up; + + /* Must get reference to anon_vma while still holding mmap_sem */ + hold_anon_vma(rmap_item, vma->anon_vma); up: up_read(&mm->mmap_sem); @@ -879,8 +902,10 @@ up: * If that fails, we have a ksm page with only one pte * pointing to it: so break it. */ - if (err) + if (err) { + drop_anon_vma(rmap_item); break_cow(rmap_item); + } } if (err) { put_page(kpage); @@ -1155,7 +1180,9 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) * in which case we need to break_cow on both. */ if (!stable_node) { + drop_anon_vma(tree_rmap_item); break_cow(tree_rmap_item); + drop_anon_vma(rmap_item); break_cow(rmap_item); } } @@ -1490,7 +1517,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, struct hlist_node *hlist; unsigned int mapcount = page_mapcount(page); int referenced = 0; - struct vm_area_struct *vma; + int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); @@ -1498,36 +1525,40 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, stable_node = page_stable_node(page); if (!stable_node) return 0; - - /* - * Temporary hack: really we need anon_vma in rmap_item, to - * provide the correct vma, and to find recently forked instances. - * Use zalloc to avoid weirdness if any other fields are involved. - */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_ATOMIC); - if (!vma) { - spin_lock(&ksm_fallback_vma_lock); - vma = &ksm_fallback_vma; - } - +again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { - if (memcg && !mm_match_cgroup(rmap_item->mm, memcg)) - continue; + struct anon_vma *anon_vma = rmap_item->anon_vma; + struct vm_area_struct *vma; + + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + if (rmap_item->address < vma->vm_start || + rmap_item->address >= vma->vm_end) + continue; + /* + * Initially we examine only the vma which covers this + * rmap_item; but later, if there is still work to do, + * we examine covering vmas in other mms: in case they + * were forked from the original since ksmd passed. + */ + if ((rmap_item->mm == vma->vm_mm) == search_new_forks) + continue; - vma->vm_mm = rmap_item->mm; - vma->vm_start = rmap_item->address; - vma->vm_end = vma->vm_start + PAGE_SIZE; + if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) + continue; - referenced += page_referenced_one(page, vma, + referenced += page_referenced_one(page, vma, rmap_item->address, &mapcount, vm_flags); + if (!search_new_forks || !mapcount) + break; + } + spin_unlock(&anon_vma->lock); if (!mapcount) goto out; } + if (!search_new_forks++) + goto again; out: - if (vma == &ksm_fallback_vma) - spin_unlock(&ksm_fallback_vma_lock); - else - kmem_cache_free(vm_area_cachep, vma); return referenced; } @@ -1537,7 +1568,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) struct hlist_node *hlist; struct rmap_item *rmap_item; int ret = SWAP_AGAIN; - struct vm_area_struct *vma; + int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); VM_BUG_ON(!PageLocked(page)); @@ -1545,35 +1576,37 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) stable_node = page_stable_node(page); if (!stable_node) return SWAP_FAIL; - - /* - * Temporary hack: really we need anon_vma in rmap_item, to - * provide the correct vma, and to find recently forked instances. - * Use zalloc to avoid weirdness if any other fields are involved. - */ - if (TTU_ACTION(flags) != TTU_UNMAP) - return SWAP_FAIL; - - vma = kmem_cache_zalloc(vm_area_cachep, GFP_ATOMIC); - if (!vma) { - spin_lock(&ksm_fallback_vma_lock); - vma = &ksm_fallback_vma; - } - +again: hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { - vma->vm_mm = rmap_item->mm; - vma->vm_start = rmap_item->address; - vma->vm_end = vma->vm_start + PAGE_SIZE; + struct anon_vma *anon_vma = rmap_item->anon_vma; + struct vm_area_struct *vma; - ret = try_to_unmap_one(page, vma, rmap_item->address, flags); - if (ret != SWAP_AGAIN || !page_mapped(page)) - goto out; + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + if (rmap_item->address < vma->vm_start || + rmap_item->address >= vma->vm_end) + continue; + /* + * Initially we examine only the vma which covers this + * rmap_item; but later, if there is still work to do, + * we examine covering vmas in other mms: in case they + * were forked from the original since ksmd passed. + */ + if ((rmap_item->mm == vma->vm_mm) == search_new_forks) + continue; + + ret = try_to_unmap_one(page, vma, + rmap_item->address, flags); + if (ret != SWAP_AGAIN || !page_mapped(page)) { + spin_unlock(&anon_vma->lock); + goto out; + } + } + spin_unlock(&anon_vma->lock); } + if (!search_new_forks++) + goto again; out: - if (vma == &ksm_fallback_vma) - spin_unlock(&ksm_fallback_vma_lock); - else - kmem_cache_free(vm_area_cachep, vma); return ret; } diff --git a/mm/rmap.c b/mm/rmap.c index 869aaa3206a2..ebdf582ef185 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -68,7 +68,7 @@ static inline struct anon_vma *anon_vma_alloc(void) return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); } -static inline void anon_vma_free(struct anon_vma *anon_vma) +void anon_vma_free(struct anon_vma *anon_vma) { kmem_cache_free(anon_vma_cachep, anon_vma); } @@ -172,7 +172,7 @@ void anon_vma_unlink(struct vm_area_struct *vma) list_del(&vma->anon_vma_node); /* We must garbage collect the anon_vma if it's empty */ - empty = list_empty(&anon_vma->head); + empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); spin_unlock(&anon_vma->lock); if (empty) @@ -184,6 +184,7 @@ static void anon_vma_ctor(void *data) struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); + ksm_refcount_init(anon_vma); INIT_LIST_HEAD(&anon_vma->head); } -- cgit v1.2.3 From e9995ef978a7d5296fe04a9a2c5ca6e66d8bb4e5 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:59:31 -0800 Subject: ksm: rmap_walk to remove_migation_ptes A side-effect of making ksm pages swappable is that they have to be placed on the LRUs: which then exposes them to isolate_lru_page() and hence to page migration. Add rmap_walk() for remove_migration_ptes() to use: rmap_walk_anon() and rmap_walk_file() in rmap.c, but rmap_walk_ksm() in ksm.c. Perhaps some consolidation with existing code is possible, but don't attempt that yet (try_to_unmap needs to handle nonlinears, but migration pte removal does not). rmap_walk() is sadly less general than it appears: rmap_walk_anon(), like remove_anon_migration_ptes() which it replaces, avoids calling page_lock_anon_vma(), because that includes a page_mapped() test which fails when all migration ptes are in place. That was valid when NUMA page migration was introduced (holding mmap_sem provided the missing guarantee that anon_vma's slab had not already been destroyed), but I believe not valid in the memory hotremove case added since. For now do the same as before, and consider the best way to fix that unlikely race later on. When fixed, we can probably use rmap_walk() on hwpoisoned ksm pages too: for now, they remain among hwpoison's various exceptions (its PageKsm test comes before the page is locked, but its page_lock_anon_vma fails safely if an anon gets upgraded). Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Cc: Chris Wright Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 13 ++++++++ include/linux/rmap.h | 6 ++++ mm/ksm.c | 65 ++++++++++++++++++++++++++++++++++++++++ mm/migrate.c | 85 +++++++++++----------------------------------------- mm/rmap.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 181 insertions(+), 67 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 157d83dbaef8..bed5f16ba827 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -88,6 +88,9 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags); int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); +int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg); +void ksm_migrate_page(struct page *newpage, struct page *oldpage); #else /* !CONFIG_KSM */ @@ -127,6 +130,16 @@ static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) { return 0; } + +static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + return 0; +} + +static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) +{ +} #endif /* !CONFIG_KSM */ #endif /* __LINUX_KSM_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 980094a527ee..b019ae64e2ab 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -164,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page); void page_unlock_anon_vma(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +/* + * Called by migrate.c to remove migration ptes, but might be used more later. + */ +int rmap_walk(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg); + #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) diff --git a/mm/ksm.c b/mm/ksm.c index 20f46a7b2799..dfdc292d3626 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1656,6 +1656,71 @@ out: return ret; } +#ifdef CONFIG_MIGRATION +int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct stable_node *stable_node; + struct hlist_node *hlist; + struct rmap_item *rmap_item; + int ret = SWAP_AGAIN; + int search_new_forks = 0; + + VM_BUG_ON(!PageKsm(page)); + VM_BUG_ON(!PageLocked(page)); + + stable_node = page_stable_node(page); + if (!stable_node) + return ret; +again: + hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { + struct anon_vma *anon_vma = rmap_item->anon_vma; + struct vm_area_struct *vma; + + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + if (rmap_item->address < vma->vm_start || + rmap_item->address >= vma->vm_end) + continue; + /* + * Initially we examine only the vma which covers this + * rmap_item; but later, if there is still work to do, + * we examine covering vmas in other mms: in case they + * were forked from the original since ksmd passed. + */ + if ((rmap_item->mm == vma->vm_mm) == search_new_forks) + continue; + + ret = rmap_one(page, vma, rmap_item->address, arg); + if (ret != SWAP_AGAIN) { + spin_unlock(&anon_vma->lock); + goto out; + } + } + spin_unlock(&anon_vma->lock); + } + if (!search_new_forks++) + goto again; +out: + return ret; +} + +void ksm_migrate_page(struct page *newpage, struct page *oldpage) +{ + struct stable_node *stable_node; + + VM_BUG_ON(!PageLocked(oldpage)); + VM_BUG_ON(!PageLocked(newpage)); + VM_BUG_ON(newpage->mapping != oldpage->mapping); + + stable_node = page_stable_node(newpage); + if (stable_node) { + VM_BUG_ON(stable_node->page != oldpage); + stable_node->page = newpage; + } +} +#endif /* CONFIG_MIGRATION */ + #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. diff --git a/mm/migrate.c b/mm/migrate.c index 367272d04423..0b714747c028 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -78,8 +79,8 @@ int putback_lru_pages(struct list_head *l) /* * Restore a potential migration pte to a working pte entry */ -static void remove_migration_pte(struct vm_area_struct *vma, - struct page *old, struct page *new) +static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, + unsigned long addr, void *old) { struct mm_struct *mm = vma->vm_mm; swp_entry_t entry; @@ -88,40 +89,37 @@ static void remove_migration_pte(struct vm_area_struct *vma, pmd_t *pmd; pte_t *ptep, pte; spinlock_t *ptl; - unsigned long addr = page_address_in_vma(new, vma); - - if (addr == -EFAULT) - return; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) - return; + goto out; pud = pud_offset(pgd, addr); if (!pud_present(*pud)) - return; + goto out; pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) - return; + goto out; ptep = pte_offset_map(pmd, addr); if (!is_swap_pte(*ptep)) { pte_unmap(ptep); - return; + goto out; } ptl = pte_lockptr(mm, pmd); spin_lock(ptl); pte = *ptep; if (!is_swap_pte(pte)) - goto out; + goto unlock; entry = pte_to_swp_entry(pte); - if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) - goto out; + if (!is_migration_entry(entry) || + migration_entry_to_page(entry) != old) + goto unlock; get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); @@ -137,55 +135,10 @@ static void remove_migration_pte(struct vm_area_struct *vma, /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, pte); - -out: +unlock: pte_unmap_unlock(ptep, ptl); -} - -/* - * Note that remove_file_migration_ptes will only work on regular mappings, - * Nonlinear mappings do not use migration entries. - */ -static void remove_file_migration_ptes(struct page *old, struct page *new) -{ - struct vm_area_struct *vma; - struct address_space *mapping = new->mapping; - struct prio_tree_iter iter; - pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - - if (!mapping) - return; - - spin_lock(&mapping->i_mmap_lock); - - vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) - remove_migration_pte(vma, old, new); - - spin_unlock(&mapping->i_mmap_lock); -} - -/* - * Must hold mmap_sem lock on at least one of the vmas containing - * the page so that the anon_vma cannot vanish. - */ -static void remove_anon_migration_ptes(struct page *old, struct page *new) -{ - struct anon_vma *anon_vma; - struct vm_area_struct *vma; - - /* - * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. - */ - anon_vma = page_anon_vma(new); - if (!anon_vma) - return; - - spin_lock(&anon_vma->lock); - - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) - remove_migration_pte(vma, old, new); - - spin_unlock(&anon_vma->lock); +out: + return SWAP_AGAIN; } /* @@ -194,10 +147,7 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new) */ static void remove_migration_ptes(struct page *old, struct page *new) { - if (PageAnon(new)) - remove_anon_migration_ptes(old, new); - else - remove_file_migration_ptes(old, new); + rmap_walk(new, remove_migration_pte, old); } /* @@ -358,6 +308,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page) } mlock_migrate_page(newpage, page); + ksm_migrate_page(newpage, page); ClearPageSwapCache(page); ClearPagePrivate(page); @@ -577,9 +528,9 @@ static int move_to_new_page(struct page *newpage, struct page *page) else rc = fallback_migrate_page(mapping, newpage, page); - if (!rc) { + if (!rc) remove_migration_ptes(page, newpage); - } else + else newpage->mapping = NULL; unlock_page(newpage); diff --git a/mm/rmap.c b/mm/rmap.c index 2e38e9048327..c81bedd7d527 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1203,3 +1203,82 @@ int try_to_munlock(struct page *page) else return try_to_unmap_file(page, TTU_MUNLOCK); } + +#ifdef CONFIG_MIGRATION +/* + * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): + * Called by migrate.c to remove migration ptes, but might be used more later. + */ +static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct anon_vma *anon_vma; + struct vm_area_struct *vma; + int ret = SWAP_AGAIN; + + /* + * Note: remove_migration_ptes() cannot use page_lock_anon_vma() + * because that depends on page_mapped(); but not all its usages + * are holding mmap_sem, which also gave the necessary guarantee + * (that this anon_vma's slab has not already been destroyed). + * This needs to be reviewed later: avoiding page_lock_anon_vma() + * is risky, and currently limits the usefulness of rmap_walk(). + */ + anon_vma = page_anon_vma(page); + if (!anon_vma) + return ret; + spin_lock(&anon_vma->lock); + list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = rmap_one(page, vma, address, arg); + if (ret != SWAP_AGAIN) + break; + } + spin_unlock(&anon_vma->lock); + return ret; +} + +static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct address_space *mapping = page->mapping; + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + struct prio_tree_iter iter; + int ret = SWAP_AGAIN; + + if (!mapping) + return ret; + spin_lock(&mapping->i_mmap_lock); + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = rmap_one(page, vma, address, arg); + if (ret != SWAP_AGAIN) + break; + } + /* + * No nonlinear handling: being always shared, nonlinear vmas + * never contain migration ptes. Decide what to do about this + * limitation to linear when we need rmap_walk() on nonlinear. + */ + spin_unlock(&mapping->i_mmap_lock); + return ret; +} + +int rmap_walk(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + VM_BUG_ON(!PageLocked(page)); + + if (unlikely(PageKsm(page))) + return rmap_walk_ksm(page, rmap_one, arg); + else if (PageAnon(page)) + return rmap_walk_anon(page, rmap_one, arg); + else + return rmap_walk_file(page, rmap_one, arg); +} +#endif /* CONFIG_MIGRATION */ -- cgit v1.2.3 From 62b61f611eb5e20f7e9f8619bfd03bdfe8af6348 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:59:33 -0800 Subject: ksm: memory hotremove migration only The previous patch enables page migration of ksm pages, but that soon gets into trouble: not surprising, since we're using the ksm page lock to lock operations on its stable_node, but page migration switches the page whose lock is to be used for that. Another layer of locking would fix it, but do we need that yet? Do we actually need page migration of ksm pages? Yes, memory hotremove needs to offline sections of memory: and since we stopped allocating ksm pages with GFP_HIGHUSER, they will tend to be GFP_HIGHUSER_MOVABLE candidates for migration. But KSM is currently unconscious of NUMA issues, happily merging pages from different NUMA nodes: at present the rule must be, not to use MADV_MERGEABLE where you care about NUMA. So no, NUMA page migration of ksm pages does not make sense yet. So, to complete support for ksm swapping we need to make hotremove safe. ksm_memory_callback() take ksm_thread_mutex when MEM_GOING_OFFLINE and release it when MEM_OFFLINE or MEM_CANCEL_OFFLINE. But if mapped pages are freed before migration reaches them, stable_nodes may be left still pointing to struct pages which have been removed from the system: the stable_node needs to identify a page by pfn rather than page pointer, then it can safely prune them when MEM_OFFLINE. And make NUMA migration skip PageKsm pages where it skips PageReserved. But it's only when we reach unmap_and_move() that the page lock is taken and we can be sure that raised pagecount has prevented a PageAnon from being upgraded: so add offlining arg to migrate_pages(), to migrate ksm page when offlining (has sufficient locking) but reject it otherwise. Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Cc: Chris Wright Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 8 ++--- mm/ksm.c | 84 +++++++++++++++++++++++++++++++++++++++++-------- mm/memory_hotplug.c | 2 +- mm/mempolicy.c | 19 +++++------ mm/migrate.c | 27 ++++++++++++---- 5 files changed, 103 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 527602cdea1c..7f085c97c799 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); extern int putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, struct page *, struct page *); -extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long); +extern int migrate_pages(struct list_head *l, new_page_t x, + unsigned long private, int offlining); extern int fail_migrate_page(struct address_space *, struct page *, struct page *); @@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm, static inline int putback_lru_pages(struct list_head *l) { return 0; } static inline int migrate_pages(struct list_head *l, new_page_t x, - unsigned long private) { return -ENOSYS; } - -static inline int migrate_pages_to(struct list_head *pagelist, - struct vm_area_struct *vma, int dest) { return 0; } + unsigned long private, int offlining) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; } diff --git a/mm/ksm.c b/mm/ksm.c index dfdc292d3626..d4c228a9d278 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -108,14 +109,14 @@ struct ksm_scan { /** * struct stable_node - node of the stable rbtree - * @page: pointer to struct page of the ksm page * @node: rb node of this ksm page in the stable tree * @hlist: hlist head of rmap_items using this ksm page + * @kpfn: page frame number of this ksm page */ struct stable_node { - struct page *page; struct rb_node node; struct hlist_head hlist; + unsigned long kpfn; }; /** @@ -515,7 +516,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node) struct page *page; void *expected_mapping; - page = stable_node->page; + page = pfn_to_page(stable_node->kpfn); expected_mapping = (void *)stable_node + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); rcu_read_lock(); @@ -973,7 +974,7 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, * This function returns the stable tree node of identical content if found, * NULL otherwise. */ -static struct stable_node *stable_tree_search(struct page *page) +static struct page *stable_tree_search(struct page *page) { struct rb_node *node = root_stable_tree.rb_node; struct stable_node *stable_node; @@ -981,7 +982,7 @@ static struct stable_node *stable_tree_search(struct page *page) stable_node = page_stable_node(page); if (stable_node) { /* ksm page forked */ get_page(page); - return stable_node; + return page; } while (node) { @@ -1003,7 +1004,7 @@ static struct stable_node *stable_tree_search(struct page *page) put_page(tree_page); node = node->rb_right; } else - return stable_node; + return tree_page; } return NULL; @@ -1059,7 +1060,7 @@ static struct stable_node *stable_tree_insert(struct page *kpage) INIT_HLIST_HEAD(&stable_node->hlist); - stable_node->page = kpage; + stable_node->kpfn = page_to_pfn(kpage); set_page_stable_node(kpage, stable_node); return stable_node; @@ -1170,9 +1171,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) remove_rmap_item_from_tree(rmap_item); /* We first start with searching the page inside the stable tree */ - stable_node = stable_tree_search(page); - if (stable_node) { - kpage = stable_node->page; + kpage = stable_tree_search(page); + if (kpage) { err = try_to_merge_with_ksm_page(rmap_item, page, kpage); if (!err) { /* @@ -1180,7 +1180,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) * add its rmap_item to the stable tree. */ lock_page(kpage); - stable_tree_append(rmap_item, stable_node); + stable_tree_append(rmap_item, page_stable_node(kpage)); unlock_page(kpage); } put_page(kpage); @@ -1715,12 +1715,63 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage) stable_node = page_stable_node(newpage); if (stable_node) { - VM_BUG_ON(stable_node->page != oldpage); - stable_node->page = newpage; + VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); + stable_node->kpfn = page_to_pfn(newpage); } } #endif /* CONFIG_MIGRATION */ +#ifdef CONFIG_MEMORY_HOTREMOVE +static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, + unsigned long end_pfn) +{ + struct rb_node *node; + + for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { + struct stable_node *stable_node; + + stable_node = rb_entry(node, struct stable_node, node); + if (stable_node->kpfn >= start_pfn && + stable_node->kpfn < end_pfn) + return stable_node; + } + return NULL; +} + +static int ksm_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct memory_notify *mn = arg; + struct stable_node *stable_node; + + switch (action) { + case MEM_GOING_OFFLINE: + /* + * Keep it very simple for now: just lock out ksmd and + * MADV_UNMERGEABLE while any memory is going offline. + */ + mutex_lock(&ksm_thread_mutex); + break; + + case MEM_OFFLINE: + /* + * Most of the work is done by page migration; but there might + * be a few stable_nodes left over, still pointing to struct + * pages which have been offlined: prune those from the tree. + */ + while ((stable_node = ksm_check_stable_tree(mn->start_pfn, + mn->start_pfn + mn->nr_pages)) != NULL) + remove_node_from_stable_tree(stable_node); + /* fallthrough */ + + case MEM_CANCEL_OFFLINE: + mutex_unlock(&ksm_thread_mutex); + break; + } + return NOTIFY_OK; +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ + #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. @@ -1946,6 +1997,13 @@ static int __init ksm_init(void) #endif /* CONFIG_SYSFS */ +#ifdef CONFIG_MEMORY_HOTREMOVE + /* + * Choose a high priority since the callback takes ksm_thread_mutex: + * later callbacks could only be taking locks which nest within that. + */ + hotplug_memory_notifier(ksm_memory_callback, 100); +#endif return 0; out_free2: diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index bc5a08138f1e..67e941d7882c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -698,7 +698,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (list_empty(&source)) goto out; /* this function returns # of failed pages */ - ret = migrate_pages(&source, hotremove_migrate_alloc, 0); + ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); out: return ret; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f11fdad06204..290fb5bf0440 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -85,6 +85,7 @@ #include #include #include +#include #include #include #include @@ -413,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (!page) continue; /* - * The check for PageReserved here is important to avoid - * handling zero pages and other pages that may have been - * marked special by the system. - * - * If the PageReserved would not be checked here then f.e. - * the location of the zero page could have an influence - * on MPOL_MF_STRICT, zero pages would be counted for - * the per node stats, and there would be useless attempts - * to put zero pages on the migration list. + * vm_normal_page() filters out zero pages, but there might + * still be PageReserved pages to skip, perhaps in a VDSO. + * And we cannot move PageKsm pages sensibly or safely yet. */ - if (PageReserved(page)) + if (PageReserved(page) || PageKsm(page)) continue; nid = page_to_nid(page); if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) @@ -839,7 +834,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) - err = migrate_pages(&pagelist, new_node_page, dest); + err = migrate_pages(&pagelist, new_node_page, dest, 0); return err; } @@ -1056,7 +1051,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) nr_failed = migrate_pages(&pagelist, new_vma_page, - (unsigned long)vma); + (unsigned long)vma, 0); if (!err && nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; diff --git a/mm/migrate.c b/mm/migrate.c index 0b714747c028..2a0ea3ef509e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -543,7 +543,7 @@ static int move_to_new_page(struct page *newpage, struct page *page) * to the newly allocated page in newpage. */ static int unmap_and_move(new_page_t get_new_page, unsigned long private, - struct page *page, int force) + struct page *page, int force, int offlining) { int rc = 0; int *result = NULL; @@ -569,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, lock_page(page); } + /* + * Only memory hotplug's offline_pages() caller has locked out KSM, + * and can safely migrate a KSM page. The other cases have skipped + * PageKsm along with PageReserved - but it is only now when we have + * the page lock that we can be certain it will not go KSM beneath us + * (KSM will not upgrade a page from PageAnon to PageKsm when it sees + * its pagecount raised, but only here do we take the page lock which + * serializes that). + */ + if (PageKsm(page) && !offlining) { + rc = -EBUSY; + goto unlock; + } + /* charge against new page */ charge = mem_cgroup_prepare_migration(page, &mem); if (charge == -ENOMEM) { @@ -685,7 +699,7 @@ move_newpage: * Return: Number of pages not migrated or error code. */ int migrate_pages(struct list_head *from, - new_page_t get_new_page, unsigned long private) + new_page_t get_new_page, unsigned long private, int offlining) { int retry = 1; int nr_failed = 0; @@ -705,7 +719,7 @@ int migrate_pages(struct list_head *from, cond_resched(); rc = unmap_and_move(get_new_page, private, - page, pass > 2); + page, pass > 2, offlining); switch(rc) { case -ENOMEM: @@ -801,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm, if (!page) goto set_status; - if (PageReserved(page)) /* Check for zero page */ + /* Use PageReserved to check for zero page */ + if (PageReserved(page) || PageKsm(page)) goto put_and_set; pp->page = page; @@ -838,7 +853,7 @@ set_status: err = 0; if (!list_empty(&pagelist)) err = migrate_pages(&pagelist, new_page_node, - (unsigned long)pm); + (unsigned long)pm, 0); up_read(&mm->mmap_sem); return err; @@ -959,7 +974,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, err = -ENOENT; /* Use PageReserved to check for zero page */ - if (!page || PageReserved(page)) + if (!page || PageReserved(page) || PageKsm(page)) goto set_status; err = page_to_nid(page); -- cgit v1.2.3 From b4e655a4aaa327810110457cef92681447dd13e4 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 14 Dec 2009 17:59:35 -0800 Subject: mm: memory_hotplug: make offline_pages() static It has no references outside memory_hotplug.c. Cc: "Rafael J. Wysocki" Cc: Andi Kleen Cc: Gerald Schaefer Cc: KOSAKI Motohiro Cc: Yasunori Goto Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 1 - mm/memory_hotplug.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index fed969281a41..35b07b773e6c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -69,7 +69,6 @@ extern void online_page(struct page *page); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long); extern void __offline_isolated_pages(unsigned long, unsigned long); -extern int offline_pages(unsigned long, unsigned long, unsigned long); /* reasonably generic interface to expand the physical pages in a zone */ extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 67e941d7882c..f827cf4cb4e5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -751,7 +751,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) return offlined; } -int offline_pages(unsigned long start_pfn, +static int offline_pages(unsigned long start_pfn, unsigned long end_pfn, unsigned long timeout) { unsigned long pfn, nr_pages, expire; -- cgit v1.2.3 From f096e59e844ba3c5d5a7b54b3deafd2aeeebf921 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Mon, 14 Dec 2009 17:59:51 -0800 Subject: include/linux/mm.h: remove unneeded ifdef The check code for CONFIG_SWAP is redundant, because there is a non-CONFIG_SWAP version for PageSwapCache() which just returns 0. Signed-off-by: Huang Shijie Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1202cd3121e1..52b264563cd9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -643,12 +643,9 @@ static inline struct address_space *page_mapping(struct page *page) struct address_space *mapping = page->mapping; VM_BUG_ON(PageSlab(page)); -#ifdef CONFIG_SWAP if (unlikely(PageSwapCache(page))) mapping = &swapper_space; - else -#endif - if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) + else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; } -- cgit v1.2.3 From 5dc37642cbce34619e4588a9f0bdad1d2f870956 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Mon, 14 Dec 2009 18:00:01 -0800 Subject: mm hugetlb: add hugepage support to pagemap This patch enables extraction of the pfn of a hugepage from /proc/pid/pagemap in an architecture independent manner. Details ------- My test program (leak_pagemap) works as follows: - creat() and mmap() a file on hugetlbfs (file size is 200MB == 100 hugepages,) - read()/write() something on it, - call page-types with option -p, - munmap() and unlink() the file on hugetlbfs Without my patches ------------------ $ ./leak_pagemap flags page-count MB symbolic-flags long-symbolic-flags 0x0000000000000000 1 0 __________________________________ 0x0000000000000804 1 0 __R________M______________________ referenced,mmap 0x000000000000086c 81 0 __RU_lA____M______________________ referenced,uptodate,lru,active,mmap 0x0000000000005808 5 0 ___U_______Ma_b___________________ uptodate,mmap,anonymous,swapbacked 0x0000000000005868 12 0 ___U_lA____Ma_b___________________ uptodate,lru,active,mmap,anonymous,swapbacked 0x000000000000586c 1 0 __RU_lA____Ma_b___________________ referenced,uptodate,lru,active,mmap,anonymous,swapbacked total 101 0 The output of page-types don't show any hugepage. With my patches --------------- $ ./leak_pagemap flags page-count MB symbolic-flags long-symbolic-flags 0x0000000000000000 1 0 __________________________________ 0x0000000000030000 51100 199 ________________TG________________ compound_tail,huge 0x0000000000028018 100 0 ___UD__________H_G________________ uptodate,dirty,compound_head,huge 0x0000000000000804 1 0 __R________M______________________ referenced,mmap 0x000000000000080c 1 0 __RU_______M______________________ referenced,uptodate,mmap 0x000000000000086c 80 0 __RU_lA____M______________________ referenced,uptodate,lru,active,mmap 0x0000000000005808 4 0 ___U_______Ma_b___________________ uptodate,mmap,anonymous,swapbacked 0x0000000000005868 12 0 ___U_lA____Ma_b___________________ uptodate,lru,active,mmap,anonymous,swapbacked 0x000000000000586c 1 0 __RU_lA____Ma_b___________________ referenced,uptodate,lru,active,mmap,anonymous,swapbacked total 51300 200 The output of page-types shows 51200 pages contributing to hugepages, containing 100 head pages and 51100 tail pages as expected. [akpm@linux-foundation.org: build fix] Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Wu Fengguang Cc: Hugh Dickins Cc: Mel Gorman Cc: Lee Schermerhorn Cc: Andy Whitcroft Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/mm.h | 3 +++ mm/pagewalk.c | 22 +++++++++++++++++++--- 3 files changed, 67 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 2a1bef9203c6..47c03f4336b8 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -650,6 +650,50 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return err; } +static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) +{ + u64 pme = 0; + if (pte_present(pte)) + pme = PM_PFRAME(pte_pfn(pte) + offset) + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; + return pme; +} + +static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct vm_area_struct *vma; + struct pagemapread *pm = walk->private; + struct hstate *hs = NULL; + int err = 0; + + vma = find_vma(walk->mm, addr); + if (vma) + hs = hstate_vma(vma); + for (; addr != end; addr += PAGE_SIZE) { + u64 pfn = PM_NOT_PRESENT; + + if (vma && (addr >= vma->vm_end)) { + vma = find_vma(walk->mm, addr); + if (vma) + hs = hstate_vma(vma); + } + + if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) { + /* calculate pfn of the "raw" page in the hugepage. */ + int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT; + pfn = huge_pte_to_pagemap_entry(*pte, offset); + } + err = add_to_pagemap(addr, pfn, pm); + if (err) + return err; + } + + cond_resched(); + + return err; +} + /* * /proc/pid/pagemap - an array mapping virtual pages to pfns * @@ -742,6 +786,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, pagemap_walk.pmd_entry = pagemap_pte_range; pagemap_walk.pte_hole = pagemap_pte_hole; + pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; pagemap_walk.mm = mm; pagemap_walk.private = ± diff --git a/include/linux/mm.h b/include/linux/mm.h index 52b264563cd9..9d65ae4ba0e0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -770,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry * @pte_entry: if set, called for each non-empty PTE (4th-level) entry * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry * * (see walk_page_range for more details) */ @@ -779,6 +780,8 @@ struct mm_walk { int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, + struct mm_walk *); struct mm_struct *mm; void *private; }; diff --git a/mm/pagewalk.c b/mm/pagewalk.c index a286915e23ef..7b47a57b6646 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -120,15 +120,31 @@ int walk_page_range(unsigned long addr, unsigned long end, do { next = pgd_addr_end(addr, end); - /* skip hugetlb vma to avoid hugepage PMD being cleared - * in pmd_none_or_clear_bad(). */ + /* + * handle hugetlb vma individually because pagetable walk for + * the hugetlb page is dependent on the architecture and + * we can't handled it in the same manner as non-huge pages. + */ vma = find_vma(walk->mm, addr); +#ifdef CONFIG_HUGETLB_PAGE if (vma && is_vm_hugetlb_page(vma)) { + pte_t *pte; + struct hstate *hs; + if (vma->vm_end < next) next = vma->vm_end; + hs = hstate_vma(vma); + pte = huge_pte_offset(walk->mm, + addr & huge_page_mask(hs)); + if (pte && !huge_pte_none(huge_ptep_get(pte)) + && walk->hugetlb_entry) + err = walk->hugetlb_entry(pte, addr, + next, walk); + if (err) + break; continue; } - +#endif if (pgd_none_or_clear_bad(pgd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); -- cgit v1.2.3 From ea637639591def87a54cea811cbac796980cb30d Mon Sep 17 00:00:00 2001 From: Jie Zhang Date: Mon, 14 Dec 2009 18:00:02 -0800 Subject: nommu: fix malloc performance by adding uninitialized flag The NOMMU code currently clears all anonymous mmapped memory. While this is what we want in the default case, all memory allocation from userspace under NOMMU has to go through this interface, including malloc() which is allowed to return uninitialized memory. This can easily be a significant performance penalty. So for constrained embedded systems were security is irrelevant, allow people to avoid clearing memory unnecessarily. This also alters the ELF-FDPIC binfmt such that it obtains uninitialised memory for the brk and stack region. Signed-off-by: Jie Zhang Signed-off-by: Robin Getz Signed-off-by: Mike Frysinger Signed-off-by: David Howells Acked-by: Paul Mundt Acked-by: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/nommu-mmap.txt | 26 ++++++++++++++++++++++++++ fs/binfmt_elf_fdpic.c | 3 ++- include/asm-generic/mman-common.h | 5 +++++ init/Kconfig | 22 ++++++++++++++++++++++ mm/nommu.c | 8 +++++--- 5 files changed, 60 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt index b565e8279d13..8e1ddec2c78a 100644 --- a/Documentation/nommu-mmap.txt +++ b/Documentation/nommu-mmap.txt @@ -119,6 +119,32 @@ FURTHER NOTES ON NO-MMU MMAP granule but will only discard the excess if appropriately configured as this has an effect on fragmentation. + (*) The memory allocated by a request for an anonymous mapping will normally + be cleared by the kernel before being returned in accordance with the + Linux man pages (ver 2.22 or later). + + In the MMU case this can be achieved with reasonable performance as + regions are backed by virtual pages, with the contents only being mapped + to cleared physical pages when a write happens on that specific page + (prior to which, the pages are effectively mapped to the global zero page + from which reads can take place). This spreads out the time it takes to + initialize the contents of a page - depending on the write-usage of the + mapping. + + In the no-MMU case, however, anonymous mappings are backed by physical + pages, and the entire map is cleared at allocation time. This can cause + significant delays during a userspace malloc() as the C library does an + anonymous mapping and the kernel then does a memset for the entire map. + + However, for memory that isn't required to be precleared - such as that + returned by malloc() - mmap() can take a MAP_UNINITIALIZED flag to + indicate to the kernel that it shouldn't bother clearing the memory before + returning it. Note that CONFIG_MMAP_ALLOW_UNINITIALIZED must be enabled + to permit this, otherwise the flag will be ignored. + + uClibc uses this to speed up malloc(), and the ELF-FDPIC binfmt uses this + to allocate the brk and stack region. + (*) A list of all the private copy and anonymous mappings on the system is visible through /proc/maps in no-MMU mode. diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 38502c67987c..79d2b1aa389f 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -380,7 +380,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, down_write(¤t->mm->mmap_sem); current->mm->start_brk = do_mmap(NULL, 0, stack_size, PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, + MAP_PRIVATE | MAP_ANONYMOUS | + MAP_UNINITIALIZED | MAP_GROWSDOWN, 0); if (IS_ERR_VALUE(current->mm->start_brk)) { diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 5ee13b2fd223..20111265afd8 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h @@ -19,6 +19,11 @@ #define MAP_TYPE 0x0f /* Mask for type of mapping */ #define MAP_FIXED 0x10 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x20 /* don't use a file */ +#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED +# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */ +#else +# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ +#endif #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ diff --git a/init/Kconfig b/init/Kconfig index 54c655ce9c04..a23da9f01803 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1079,6 +1079,28 @@ config SLOB endchoice +config MMAP_ALLOW_UNINITIALIZED + bool "Allow mmapped anonymous memory to be uninitialized" + depends on EMBEDDED && !MMU + default n + help + Normally, and according to the Linux spec, anonymous memory obtained + from mmap() has it's contents cleared before it is passed to + userspace. Enabling this config option allows you to request that + mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus + providing a huge performance boost. If this option is not enabled, + then the flag will be ignored. + + This is taken advantage of by uClibc's malloc(), and also by + ELF-FDPIC binfmt's brk and stack allocator. + + Because of the obvious security issues, this option should only be + enabled on embedded devices where you control what is run in + userspace. Since that isn't generally a problem on no-MMU systems, + it is normally safe to say Y here. + + See Documentation/nommu-mmap.txt for more information. + config PROFILING bool "Profiling support (EXPERIMENTAL)" help diff --git a/mm/nommu.c b/mm/nommu.c index 9876fa0c3ad3..8687973462bb 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1143,9 +1143,6 @@ static int do_mmap_private(struct vm_area_struct *vma, if (ret < rlen) memset(base + ret, 0, rlen - ret); - } else { - /* if it's an anonymous mapping, then just clear it */ - memset(base, 0, rlen); } return 0; @@ -1343,6 +1340,11 @@ unsigned long do_mmap_pgoff(struct file *file, goto error_just_free; add_nommu_region(region); + /* clear anonymous mappings that don't ask for uninitialized data */ + if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) + memset((void *)region->vm_start, 0, + region->vm_end - region->vm_start); + /* okay... we have a mapping; now we have to register it */ result = vma->vm_start; -- cgit v1.2.3 From 471452104b8520337ae2fb48c4e61cd4896e025d Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 14 Dec 2009 18:00:08 -0800 Subject: const: constify remaining dev_pm_ops Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/plat-omap/debug-leds.c | 2 +- arch/arm/plat-omap/gpio.c | 2 +- arch/s390/appldata/appldata_base.c | 2 +- drivers/block/floppy.c | 2 +- drivers/char/hvc_iucv.c | 2 +- drivers/dma/at_hdmac.c | 2 +- drivers/dma/dw_dmac.c | 2 +- drivers/dma/txx9dmac.c | 2 +- drivers/hwmon/applesmc.c | 2 +- drivers/i2c/busses/i2c-pxa.c | 2 +- drivers/i2c/busses/i2c-s3c2410.c | 2 +- drivers/i2c/busses/i2c-sh_mobile.c | 2 +- drivers/input/keyboard/adp5588-keys.c | 2 +- drivers/input/keyboard/sh_keysc.c | 2 +- drivers/input/misc/bfin_rotary.c | 2 +- drivers/input/misc/pcspkr.c | 2 +- drivers/input/touchscreen/pcap_ts.c | 2 +- drivers/media/video/davinci/vpfe_capture.c | 2 +- drivers/media/video/davinci/vpif_capture.c | 2 +- drivers/media/video/sh_mobile_ceu_camera.c | 2 +- drivers/mmc/host/pxamci.c | 2 +- drivers/mmc/host/s3cmci.c | 2 +- drivers/mtd/nand/nomadik_nand.c | 2 +- drivers/net/3c59x.c | 2 +- drivers/net/dm9000.c | 2 +- drivers/net/r8169.c | 2 +- drivers/net/smsc911x.c | 2 +- drivers/net/vmxnet3/vmxnet3_drv.c | 2 +- drivers/pci/pcie/portdrv_pci.c | 2 +- drivers/pcmcia/pxa2xx_base.c | 2 +- drivers/pcmcia/yenta_socket.c | 2 +- drivers/platform/x86/acerhdf.c | 2 +- drivers/platform/x86/eeepc-laptop.c | 2 +- drivers/platform/x86/hp-wmi.c | 2 +- drivers/power/wm97xx_battery.c | 2 +- drivers/rtc/rtc-pxa.c | 2 +- drivers/rtc/rtc-sa1100.c | 2 +- drivers/rtc/rtc-sh.c | 2 +- drivers/rtc/rtc-wm831x.c | 2 +- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/block/xpram.c | 2 +- drivers/s390/char/monreader.c | 2 +- drivers/s390/char/monwriter.c | 2 +- drivers/s390/char/sclp.c | 2 +- drivers/s390/char/sclp_cmd.c | 2 +- drivers/s390/char/vmlogrdr.c | 2 +- drivers/s390/cio/ccwgroup.c | 2 +- drivers/s390/cio/css.c | 2 +- drivers/s390/cio/device.c | 2 +- drivers/s390/net/netiucv.c | 2 +- drivers/s390/net/smsgiucv.c | 2 +- drivers/serial/pxa.c | 2 +- drivers/serial/sh-sci.c | 2 +- drivers/spi/pxa2xx_spi.c | 2 +- drivers/spi/spi_s3c24xx.c | 2 +- drivers/uio/uio_pdrv_genirq.c | 2 +- drivers/usb/core/hcd-pci.c | 2 +- drivers/usb/core/hcd.h | 2 +- drivers/usb/core/usb.c | 2 +- drivers/usb/host/ehci-au1xxx.c | 2 +- drivers/usb/host/ohci-au1xxx.c | 2 +- drivers/usb/host/ohci-pxa27x.c | 2 +- drivers/usb/host/r8a66597-hcd.c | 2 +- drivers/usb/musb/musb_core.c | 2 +- drivers/video/backlight/da903x_bl.c | 2 +- drivers/video/hitfb.c | 2 +- drivers/video/pxafb.c | 2 +- drivers/video/sh_mobile_lcdcfb.c | 2 +- drivers/watchdog/adx_wdt.c | 2 +- include/linux/pm.h | 2 +- net/iucv/af_iucv.c | 2 +- net/iucv/iucv.c | 2 +- sound/arm/pxa2xx-ac97.c | 2 +- sound/soc/s3c24xx/s3c24xx_simtec.c | 2 +- sound/soc/s3c24xx/s3c24xx_simtec.h | 2 +- sound/soc/soc-core.c | 2 +- 76 files changed, 76 insertions(+), 76 deletions(-) (limited to 'include') diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c index 6c768b71ad64..53fcef7c5201 100644 --- a/arch/arm/plat-omap/debug-leds.c +++ b/arch/arm/plat-omap/debug-leds.c @@ -293,7 +293,7 @@ static int fpga_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops fpga_dev_pm_ops = { +static const struct dev_pm_ops fpga_dev_pm_ops = { .suspend_noirq = fpga_suspend_noirq, .resume_noirq = fpga_resume_noirq, }; diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 055160e0620e..04846811d0aa 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -1431,7 +1431,7 @@ static int omap_mpuio_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops omap_mpuio_dev_pm_ops = { +static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { .suspend_noirq = omap_mpuio_suspend_noirq, .resume_noirq = omap_mpuio_resume_noirq, }; diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 495589950dc7..5c91995b74e4 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -551,7 +551,7 @@ static int appldata_thaw(struct device *dev) return appldata_restore(dev); } -static struct dev_pm_ops appldata_pm_ops = { +static const struct dev_pm_ops appldata_pm_ops = { .freeze = appldata_freeze, .thaw = appldata_thaw, .restore = appldata_restore, diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 5c01f747571b..d41d7f018549 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4162,7 +4162,7 @@ static int floppy_resume(struct device *dev) return 0; } -static struct dev_pm_ops floppy_pm_ops = { +static const struct dev_pm_ops floppy_pm_ops = { .resume = floppy_resume, .restore = floppy_resume, }; diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index b8a5d654d3d0..fe62bd0e17b7 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c @@ -931,7 +931,7 @@ static struct hv_ops hvc_iucv_ops = { }; /* Suspend / resume device operations */ -static struct dev_pm_ops hvc_iucv_pm_ops = { +static const struct dev_pm_ops hvc_iucv_pm_ops = { .freeze = hvc_iucv_pm_freeze, .thaw = hvc_iucv_pm_restore_thaw, .restore = hvc_iucv_pm_restore_thaw, diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c52ac9efd0bf..f15112569c1d 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1188,7 +1188,7 @@ static int at_dma_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops at_dma_dev_pm_ops = { +static const struct dev_pm_ops at_dma_dev_pm_ops = { .suspend_noirq = at_dma_suspend_noirq, .resume_noirq = at_dma_resume_noirq, }; diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 2eea823516a7..285bed0fe17b 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1427,7 +1427,7 @@ static int dw_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops dw_dev_pm_ops = { +static const struct dev_pm_ops dw_dev_pm_ops = { .suspend_noirq = dw_suspend_noirq, .resume_noirq = dw_resume_noirq, }; diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index fb6bb64e8861..3ebc61067e54 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -1313,7 +1313,7 @@ static int txx9dmac_resume_noirq(struct device *dev) } -static struct dev_pm_ops txx9dmac_dev_pm_ops = { +static const struct dev_pm_ops txx9dmac_dev_pm_ops = { .suspend_noirq = txx9dmac_suspend_noirq, .resume_noirq = txx9dmac_resume_noirq, }; diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 7ea6a8f66056..c1605b528e8f 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -518,7 +518,7 @@ static int applesmc_pm_restore(struct device *dev) return applesmc_pm_resume(dev); } -static struct dev_pm_ops applesmc_pm_ops = { +static const struct dev_pm_ops applesmc_pm_ops = { .resume = applesmc_pm_resume, .restore = applesmc_pm_restore, }; diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 049555777f67..7647a20523a0 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1155,7 +1155,7 @@ static int i2c_pxa_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops i2c_pxa_dev_pm_ops = { +static const struct dev_pm_ops i2c_pxa_dev_pm_ops = { .suspend_noirq = i2c_pxa_suspend_noirq, .resume_noirq = i2c_pxa_resume_noirq, }; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 96aafb91b69a..1d8c98613fa0 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -967,7 +967,7 @@ static int s3c24xx_i2c_resume(struct device *dev) return 0; } -static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { +static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { .suspend_noirq = s3c24xx_i2c_suspend_noirq, .resume = s3c24xx_i2c_resume, }; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 86a9d4e81472..ccc46418ef7f 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -647,7 +647,7 @@ static int sh_mobile_i2c_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = { +static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = { .runtime_suspend = sh_mobile_i2c_runtime_nop, .runtime_resume = sh_mobile_i2c_runtime_nop, }; diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index d48c808d5928..1edb596d927b 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -319,7 +319,7 @@ static int adp5588_resume(struct device *dev) return 0; } -static struct dev_pm_ops adp5588_dev_pm_ops = { +static const struct dev_pm_ops adp5588_dev_pm_ops = { .suspend = adp5588_suspend, .resume = adp5588_resume, }; diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 076111fc72d2..8e9380bfed40 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c @@ -295,7 +295,7 @@ static int sh_keysc_resume(struct device *dev) return 0; } -static struct dev_pm_ops sh_keysc_dev_pm_ops = { +static const struct dev_pm_ops sh_keysc_dev_pm_ops = { .suspend = sh_keysc_suspend, .resume = sh_keysc_resume, }; diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 690f3fafa03b..61d10177fa83 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -247,7 +247,7 @@ static int bfin_rotary_resume(struct device *dev) return 0; } -static struct dev_pm_ops bfin_rotary_pm_ops = { +static const struct dev_pm_ops bfin_rotary_pm_ops = { .suspend = bfin_rotary_suspend, .resume = bfin_rotary_resume, }; diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index 21cb755a54fb..ea4e1fd12651 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c @@ -127,7 +127,7 @@ static void pcspkr_shutdown(struct platform_device *dev) pcspkr_event(NULL, EV_SND, SND_BELL, 0); } -static struct dev_pm_ops pcspkr_pm_ops = { +static const struct dev_pm_ops pcspkr_pm_ops = { .suspend = pcspkr_suspend, }; diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c index 67fcd33595de..b79097e3028a 100644 --- a/drivers/input/touchscreen/pcap_ts.c +++ b/drivers/input/touchscreen/pcap_ts.c @@ -233,7 +233,7 @@ static int pcap_ts_resume(struct device *dev) return 0; } -static struct dev_pm_ops pcap_ts_pm_ops = { +static const struct dev_pm_ops pcap_ts_pm_ops = { .suspend = pcap_ts_suspend, .resume = pcap_ts_resume, }; diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c index 12a1b3d7132d..c3916a42668e 100644 --- a/drivers/media/video/davinci/vpfe_capture.c +++ b/drivers/media/video/davinci/vpfe_capture.c @@ -2127,7 +2127,7 @@ vpfe_resume(struct device *dev) return -1; } -static struct dev_pm_ops vpfe_dev_pm_ops = { +static const struct dev_pm_ops vpfe_dev_pm_ops = { .suspend = vpfe_suspend, .resume = vpfe_resume, }; diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c index d947ee5e4eb4..78130721f578 100644 --- a/drivers/media/video/davinci/vpif_capture.c +++ b/drivers/media/video/davinci/vpif_capture.c @@ -2107,7 +2107,7 @@ vpif_resume(struct device *dev) return -1; } -static struct dev_pm_ops vpif_dev_pm_ops = { +static const struct dev_pm_ops vpif_dev_pm_ops = { .suspend = vpif_suspend, .resume = vpif_resume, }; diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index a4f3472d4db8..961e4484d721 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -1825,7 +1825,7 @@ static int sh_mobile_ceu_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { +static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { .runtime_suspend = sh_mobile_ceu_runtime_nop, .runtime_resume = sh_mobile_ceu_runtime_nop, }; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index bb47ff465c04..0d783f3e79ed 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -828,7 +828,7 @@ static int pxamci_resume(struct device *dev) return ret; } -static struct dev_pm_ops pxamci_pm_ops = { +static const struct dev_pm_ops pxamci_pm_ops = { .suspend = pxamci_suspend, .resume = pxamci_resume, }; diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 941a4d35ef8d..ec15f1bbf8b9 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1892,7 +1892,7 @@ static int s3cmci_resume(struct device *dev) return mmc_resume_host(mmc); } -static struct dev_pm_ops s3cmci_pm = { +static const struct dev_pm_ops s3cmci_pm = { .suspend = s3cmci_suspend, .resume = s3cmci_resume, }; diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c index 7c302d55910e..66123419f65d 100644 --- a/drivers/mtd/nand/nomadik_nand.c +++ b/drivers/mtd/nand/nomadik_nand.c @@ -216,7 +216,7 @@ static int nomadik_nand_resume(struct device *dev) return 0; } -static struct dev_pm_ops nomadik_nand_pm_ops = { +static const struct dev_pm_ops nomadik_nand_pm_ops = { .suspend = nomadik_nand_suspend, .resume = nomadik_nand_resume, }; diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 78b7167a8ce3..39db0e96815d 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -837,7 +837,7 @@ static int vortex_resume(struct device *dev) return 0; } -static struct dev_pm_ops vortex_pm_ops = { +static const struct dev_pm_ops vortex_pm_ops = { .suspend = vortex_suspend, .resume = vortex_resume, .freeze = vortex_suspend, diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 0cbe3c0e7c06..b37730065688 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -1646,7 +1646,7 @@ dm9000_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops dm9000_drv_pm_ops = { +static const struct dev_pm_ops dm9000_drv_pm_ops = { .suspend = dm9000_drv_suspend, .resume = dm9000_drv_resume, }; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index acfc5a3aa490..60f96c468a24 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -4859,7 +4859,7 @@ out: return 0; } -static struct dev_pm_ops rtl8169_pm_ops = { +static const struct dev_pm_ops rtl8169_pm_ops = { .suspend = rtl8169_suspend, .resume = rtl8169_resume, .freeze = rtl8169_suspend, diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 20d6095cf411..494cd91ea39c 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -2154,7 +2154,7 @@ static int smsc911x_resume(struct device *dev) return (to == 0) ? -EIO : 0; } -static struct dev_pm_ops smsc911x_pm_ops = { +static const struct dev_pm_ops smsc911x_pm_ops = { .suspend = smsc911x_suspend, .resume = smsc911x_resume, }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1ceb9d0f8b97..9cc438282d77 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2689,7 +2689,7 @@ vmxnet3_resume(struct device *device) return 0; } -static struct dev_pm_ops vmxnet3_pm_ops = { +static const struct dev_pm_ops vmxnet3_pm_ops = { .suspend = vmxnet3_suspend, .resume = vmxnet3_resume, }; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index ce52ea34fee5..a49452e2aed9 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -43,7 +43,7 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) } #ifdef CONFIG_PM -static struct dev_pm_ops pcie_portdrv_pm_ops = { +static const struct dev_pm_ops pcie_portdrv_pm_ops = { .suspend = pcie_port_device_suspend, .resume = pcie_port_device_resume, .freeze = pcie_port_device_suspend, diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index da346eb7e77e..3aabf1e37988 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c @@ -336,7 +336,7 @@ static int pxa2xx_drv_pcmcia_resume(struct device *dev) return pcmcia_socket_dev_resume(dev); } -static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { +static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { .suspend = pxa2xx_drv_pcmcia_suspend, .resume = pxa2xx_drv_pcmcia_resume, }; diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index fe02cfd4b5e9..e4d12acdd525 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -1330,7 +1330,7 @@ static int yenta_dev_resume(struct device *dev) return 0; } -static struct dev_pm_ops yenta_pm_ops = { +static const struct dev_pm_ops yenta_pm_ops = { .suspend_noirq = yenta_dev_suspend_noirq, .resume_noirq = yenta_dev_resume_noirq, .resume = yenta_dev_resume, diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index ab64522aaa64..be27aa47e810 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -460,7 +460,7 @@ static int acerhdf_remove(struct platform_device *device) return 0; } -static struct dev_pm_ops acerhdf_pm_ops = { +static const struct dev_pm_ops acerhdf_pm_ops = { .suspend = acerhdf_suspend, .freeze = acerhdf_suspend, }; diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 4226e5352738..e647a856b9bf 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -154,7 +154,7 @@ static struct eeepc_hotk *ehotk; static int eeepc_hotk_thaw(struct device *device); static int eeepc_hotk_restore(struct device *device); -static struct dev_pm_ops eeepc_pm_ops = { +static const struct dev_pm_ops eeepc_pm_ops = { .thaw = eeepc_hotk_thaw, .restore = eeepc_hotk_restore, }; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index c2842171cec6..f00a71c58e69 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -94,7 +94,7 @@ static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *wwan_rfkill; -static struct dev_pm_ops hp_wmi_pm_ops = { +static const struct dev_pm_ops hp_wmi_pm_ops = { .resume = hp_wmi_resume_handler, .restore = hp_wmi_resume_handler, }; diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index f2bfd296dbae..fa39e759a275 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -157,7 +157,7 @@ static int wm97xx_bat_resume(struct device *dev) return 0; } -static struct dev_pm_ops wm97xx_bat_pm_ops = { +static const struct dev_pm_ops wm97xx_bat_pm_ops = { .suspend = wm97xx_bat_suspend, .resume = wm97xx_bat_resume, }; diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index 747ca194fad4..e6351b743da6 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c @@ -456,7 +456,7 @@ static int pxa_rtc_resume(struct device *dev) return 0; } -static struct dev_pm_ops pxa_rtc_pm_ops = { +static const struct dev_pm_ops pxa_rtc_pm_ops = { .suspend = pxa_rtc_suspend, .resume = pxa_rtc_resume, }; diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 29f98a70586e..e4a44b641702 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -407,7 +407,7 @@ static int sa1100_rtc_resume(struct device *dev) return 0; } -static struct dev_pm_ops sa1100_rtc_pm_ops = { +static const struct dev_pm_ops sa1100_rtc_pm_ops = { .suspend = sa1100_rtc_suspend, .resume = sa1100_rtc_resume, }; diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index e6ed5404bca0..e95cc6f8d61e 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -826,7 +826,7 @@ static int sh_rtc_resume(struct device *dev) return 0; } -static struct dev_pm_ops sh_rtc_dev_pm_ops = { +static const struct dev_pm_ops sh_rtc_dev_pm_ops = { .suspend = sh_rtc_suspend, .resume = sh_rtc_resume, }; diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c index 79795cdf6ed8..000c7e481e59 100644 --- a/drivers/rtc/rtc-wm831x.c +++ b/drivers/rtc/rtc-wm831x.c @@ -485,7 +485,7 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev) return 0; } -static struct dev_pm_ops wm831x_rtc_pm_ops = { +static const struct dev_pm_ops wm831x_rtc_pm_ops = { .suspend = wm831x_rtc_suspend, .resume = wm831x_rtc_resume, diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index f76f4bd82b9f..9b43ae94beba 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -1005,7 +1005,7 @@ static int dcssblk_thaw(struct device *dev) return 0; } -static struct dev_pm_ops dcssblk_pm_ops = { +static const struct dev_pm_ops dcssblk_pm_ops = { .freeze = dcssblk_freeze, .thaw = dcssblk_thaw, .restore = dcssblk_restore, diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 116d1b3eeb15..118de392af63 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -407,7 +407,7 @@ static int xpram_restore(struct device *dev) return 0; } -static struct dev_pm_ops xpram_pm_ops = { +static const struct dev_pm_ops xpram_pm_ops = { .restore = xpram_restore, }; diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 60473f86e1f9..33e96484d54f 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -529,7 +529,7 @@ static int monreader_restore(struct device *dev) return monreader_thaw(dev); } -static struct dev_pm_ops monreader_pm_ops = { +static const struct dev_pm_ops monreader_pm_ops = { .freeze = monreader_freeze, .thaw = monreader_thaw, .restore = monreader_restore, diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 6532ed8b4afa..668a0579b26b 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -323,7 +323,7 @@ static int monwriter_thaw(struct device *dev) return monwriter_restore(dev); } -static struct dev_pm_ops monwriter_pm_ops = { +static const struct dev_pm_ops monwriter_pm_ops = { .freeze = monwriter_freeze, .thaw = monwriter_thaw, .restore = monwriter_restore, diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index a983f5086788..ec88c59842e3 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -1019,7 +1019,7 @@ static int sclp_restore(struct device *dev) return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); } -static struct dev_pm_ops sclp_pm_ops = { +static const struct dev_pm_ops sclp_pm_ops = { .freeze = sclp_freeze, .thaw = sclp_thaw, .restore = sclp_restore, diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 28b5afc129c3..b3beab610da4 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -547,7 +547,7 @@ struct read_storage_sccb { u32 entries[0]; } __packed; -static struct dev_pm_ops sclp_mem_pm_ops = { +static const struct dev_pm_ops sclp_mem_pm_ops = { .freeze = sclp_mem_freeze, }; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 899aa795bf38..7dfa5412d5a8 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -675,7 +675,7 @@ static int vmlogrdr_pm_prepare(struct device *dev) } -static struct dev_pm_ops vmlogrdr_pm_ops = { +static const struct dev_pm_ops vmlogrdr_pm_ops = { .prepare = vmlogrdr_pm_prepare, }; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index a5a62f1f7747..5f97ea2ee6b1 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -560,7 +560,7 @@ static int ccwgroup_pm_restore(struct device *dev) return gdrv->restore ? gdrv->restore(gdev) : 0; } -static struct dev_pm_ops ccwgroup_pm_ops = { +static const struct dev_pm_ops ccwgroup_pm_ops = { .prepare = ccwgroup_pm_prepare, .complete = ccwgroup_pm_complete, .freeze = ccwgroup_pm_freeze, diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 92ff88ac1107..7679aee6fa14 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1148,7 +1148,7 @@ static int css_pm_restore(struct device *dev) return drv->restore ? drv->restore(sch) : 0; } -static struct dev_pm_ops css_pm_ops = { +static const struct dev_pm_ops css_pm_ops = { .prepare = css_pm_prepare, .complete = css_pm_complete, .freeze = css_pm_freeze, diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 9fecfb4223a8..73901c9e260f 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1904,7 +1904,7 @@ out_unlock: return ret; } -static struct dev_pm_ops ccw_pm_ops = { +static const struct dev_pm_ops ccw_pm_ops = { .prepare = ccw_device_pm_prepare, .complete = ccw_device_pm_complete, .freeze = ccw_device_pm_freeze, diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 98c04cac43c1..65ebee0a3266 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -159,7 +159,7 @@ static void netiucv_pm_complete(struct device *); static int netiucv_pm_freeze(struct device *); static int netiucv_pm_restore_thaw(struct device *); -static struct dev_pm_ops netiucv_pm_ops = { +static const struct dev_pm_ops netiucv_pm_ops = { .prepare = netiucv_pm_prepare, .complete = netiucv_pm_complete, .freeze = netiucv_pm_freeze, diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3012355f8304..67f2485d2372 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -168,7 +168,7 @@ static int smsg_pm_restore_thaw(struct device *dev) return 0; } -static struct dev_pm_ops smsg_pm_ops = { +static const struct dev_pm_ops smsg_pm_ops = { .freeze = smsg_pm_freeze, .thaw = smsg_pm_restore_thaw, .restore = smsg_pm_restore_thaw, diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c index 4a821046baae..56ee082157aa 100644 --- a/drivers/serial/pxa.c +++ b/drivers/serial/pxa.c @@ -756,7 +756,7 @@ static int serial_pxa_resume(struct device *dev) return 0; } -static struct dev_pm_ops serial_pxa_pm_ops = { +static const struct dev_pm_ops serial_pxa_pm_ops = { .suspend = serial_pxa_suspend, .resume = serial_pxa_resume, }; diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index ff38dbdb5c6e..7e3f4ff58cfd 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -1312,7 +1312,7 @@ static int sci_resume(struct device *dev) return 0; } -static struct dev_pm_ops sci_dev_pm_ops = { +static const struct dev_pm_ops sci_dev_pm_ops = { .suspend = sci_suspend, .resume = sci_resume, }; diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index c8c2b693ffac..c2f707e5ce74 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -1709,7 +1709,7 @@ static int pxa2xx_spi_resume(struct device *dev) return 0; } -static struct dev_pm_ops pxa2xx_spi_pm_ops = { +static const struct dev_pm_ops pxa2xx_spi_pm_ops = { .suspend = pxa2xx_spi_suspend, .resume = pxa2xx_spi_resume, }; diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 33d94f76b9ef..276591569c8b 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -489,7 +489,7 @@ static int s3c24xx_spi_resume(struct device *dev) return 0; } -static struct dev_pm_ops s3c24xx_spi_pmops = { +static const struct dev_pm_ops s3c24xx_spi_pmops = { .suspend = s3c24xx_spi_suspend, .resume = s3c24xx_spi_resume, }; diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index aa53db9f2e88..1ef3b8fc50b3 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -210,7 +210,7 @@ static int uio_pdrv_genirq_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { +static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { .runtime_suspend = uio_pdrv_genirq_runtime_nop, .runtime_resume = uio_pdrv_genirq_runtime_nop, }; diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 91f2885b6ee1..2dcf906df569 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -363,7 +363,7 @@ static int hcd_pci_restore(struct device *dev) return resume_common(dev, true); } -struct dev_pm_ops usb_hcd_pci_pm_ops = { +const struct dev_pm_ops usb_hcd_pci_pm_ops = { .suspend = hcd_pci_suspend, .suspend_noirq = hcd_pci_suspend_noirq, .resume_noirq = hcd_pci_resume_noirq, diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h index d8b43aee581e..bbe2b924aae8 100644 --- a/drivers/usb/core/hcd.h +++ b/drivers/usb/core/hcd.h @@ -330,7 +330,7 @@ extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); #ifdef CONFIG_PM_SLEEP -extern struct dev_pm_ops usb_hcd_pci_pm_ops; +extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif #endif /* CONFIG_PCI */ diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 4e2c6df8d3cc..043fa833eeca 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -320,7 +320,7 @@ static int usb_dev_restore(struct device *dev) return usb_resume(dev, PMSG_RESTORE); } -static struct dev_pm_ops usb_device_pm_ops = { +static const struct dev_pm_ops usb_device_pm_ops = { .prepare = usb_dev_prepare, .complete = usb_dev_complete, .suspend = usb_dev_suspend, diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index ed77be76d6bb..dbfb482a94e3 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c @@ -297,7 +297,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ehci_pmops = { +static const struct dev_pm_ops au1xxx_ehci_pmops = { .suspend = ehci_hcd_au1xxx_drv_suspend, .resume = ehci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index e4380082ebb1..17a6043c1fa0 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c @@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ohci_pmops = { +static const struct dev_pm_ops au1xxx_ohci_pmops = { .suspend = ohci_hcd_au1xxx_drv_suspend, .resume = ohci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index f1c06202fdf2..a18debdd79b8 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { +static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { .suspend = ohci_hcd_pxa27x_drv_suspend, .resume = ohci_hcd_pxa27x_drv_resume, }; diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 41dbc70ae752..b7a661c02bcd 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -2353,7 +2353,7 @@ static int r8a66597_resume(struct device *dev) return 0; } -static struct dev_pm_ops r8a66597_dev_pm_ops = { +static const struct dev_pm_ops r8a66597_dev_pm_ops = { .suspend = r8a66597_suspend, .resume = r8a66597_resume, .poweroff = r8a66597_suspend, diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 49f2346afad3..bfe08f4975a3 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2214,7 +2214,7 @@ static int musb_resume_noirq(struct device *dev) return 0; } -static struct dev_pm_ops musb_dev_pm_ops = { +static const struct dev_pm_ops musb_dev_pm_ops = { .suspend = musb_suspend, .resume_noirq = musb_resume_noirq, }; diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c index 7fcb0eb54c60..f2d76dae1eb3 100644 --- a/drivers/video/backlight/da903x_bl.c +++ b/drivers/video/backlight/da903x_bl.c @@ -177,7 +177,7 @@ static int da903x_backlight_resume(struct device *dev) return 0; } -static struct dev_pm_ops da903x_backlight_pm_ops = { +static const struct dev_pm_ops da903x_backlight_pm_ops = { .suspend = da903x_backlight_suspend, .resume = da903x_backlight_resume, }; diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c index e7116a6d82d3..73c83a8de2d3 100644 --- a/drivers/video/hitfb.c +++ b/drivers/video/hitfb.c @@ -456,7 +456,7 @@ static int hitfb_resume(struct device *dev) return 0; } -static struct dev_pm_ops hitfb_dev_pm_ops = { +static const struct dev_pm_ops hitfb_dev_pm_ops = { .suspend = hitfb_suspend, .resume = hitfb_resume, }; diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index f58a3aae6ea6..b7e58059b592 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -1667,7 +1667,7 @@ static int pxafb_resume(struct device *dev) return 0; } -static struct dev_pm_ops pxafb_pm_ops = { +static const struct dev_pm_ops pxafb_pm_ops = { .suspend = pxafb_suspend, .resume = pxafb_resume, }; diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index b4b5de930cf5..8a65fb6648a6 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -890,7 +890,7 @@ static int sh_mobile_lcdc_runtime_resume(struct device *dev) return 0; } -static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { +static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { .suspend = sh_mobile_lcdc_suspend, .resume = sh_mobile_lcdc_resume, .runtime_suspend = sh_mobile_lcdc_runtime_suspend, diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c index 77afb0acc500..9c6594473d3b 100644 --- a/drivers/watchdog/adx_wdt.c +++ b/drivers/watchdog/adx_wdt.c @@ -314,7 +314,7 @@ static int adx_wdt_resume(struct device *dev) return 0; } -static struct dev_pm_ops adx_wdt_pm_ops = { +static const struct dev_pm_ops adx_wdt_pm_ops = { .suspend = adx_wdt_suspend, .resume = adx_wdt_resume, }; diff --git a/include/linux/pm.h b/include/linux/pm.h index 0d65934246af..198b8f9fe05e 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -219,7 +219,7 @@ struct dev_pm_ops { * to RAM and hibernation. */ #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ -struct dev_pm_ops name = { \ +const struct dev_pm_ops name = { \ .suspend = suspend_fn, \ .resume = resume_fn, \ .freeze = suspend_fn, \ diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 1e428863574f..c18286a2167b 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev) return 0; } -static struct dev_pm_ops afiucv_pm_ops = { +static const struct dev_pm_ops afiucv_pm_ops = { .prepare = afiucv_pm_prepare, .complete = afiucv_pm_complete, .freeze = afiucv_pm_freeze, diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 3b1f5f5f8de7..fd8b28361a64 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *); static int iucv_pm_thaw(struct device *); static int iucv_pm_restore(struct device *); -static struct dev_pm_ops iucv_pm_ops = { +static const struct dev_pm_ops iucv_pm_ops = { .prepare = iucv_pm_prepare, .complete = iucv_pm_complete, .freeze = iucv_pm_freeze, diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c index b4b48afb6de6..5d9411839cd7 100644 --- a/sound/arm/pxa2xx-ac97.c +++ b/sound/arm/pxa2xx-ac97.c @@ -159,7 +159,7 @@ static int pxa2xx_ac97_resume(struct device *dev) return ret; } -static struct dev_pm_ops pxa2xx_ac97_pm_ops = { +static const struct dev_pm_ops pxa2xx_ac97_pm_ops = { .suspend = pxa2xx_ac97_suspend, .resume = pxa2xx_ac97_resume, }; diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c index d441c3b64631..4984754f3298 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec.c +++ b/sound/soc/s3c24xx/s3c24xx_simtec.c @@ -312,7 +312,7 @@ int simtec_audio_resume(struct device *dev) return 0; } -struct dev_pm_ops simtec_audio_pmops = { +const struct dev_pm_ops simtec_audio_pmops = { .resume = simtec_audio_resume, }; EXPORT_SYMBOL_GPL(simtec_audio_pmops); diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h index 2714203af161..e18faee30cce 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec.h +++ b/sound/soc/s3c24xx/s3c24xx_simtec.h @@ -15,7 +15,7 @@ extern int simtec_audio_core_probe(struct platform_device *pdev, extern int simtec_audio_remove(struct platform_device *pdev); #ifdef CONFIG_PM -extern struct dev_pm_ops simtec_audio_pmops; +extern const struct dev_pm_ops simtec_audio_pmops; #define simtec_audio_pm &simtec_audio_pmops #else #define simtec_audio_pm NULL diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index ef8f28284cb9..0a6440c6f54a 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1236,7 +1236,7 @@ static int soc_poweroff(struct device *dev) return 0; } -static struct dev_pm_ops soc_pm_ops = { +static const struct dev_pm_ops soc_pm_ops = { .suspend = soc_suspend, .resume = soc_resume, .poweroff = soc_poweroff, -- cgit v1.2.3 From 42f247c83aeb52d2ee7a9fe23fb57e22317f18fd Mon Sep 17 00:00:00 2001 From: Cesar Eduardo Barros Date: Mon, 14 Dec 2009 18:00:13 -0800 Subject: WARN_ONCE(): use bool for boolean flag Commit 70867453092297be9afb2249e712a1f960ec0a09 ("printk_once(): use bool for boolean flag") changed printk_once() to use bool instead of int for its guard variable. Do the same change to WARN_ONCE() and WARN_ON_ONCE(), for the same reasons. This resulted in a reduction of 1462 bytes on a x86-64 defconfig: text data bss dec hex filename 8101271 1207116 992764 10301151 9d2edf vmlinux.before 8100553 1207148 991988 10299689 9d2929 vmlinux.after Signed-off-by: Cesar Eduardo Barros Cc: Roland Dreier Cc: Daniel Walker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/bug.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 4b6755984d24..18c435d7c082 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -113,22 +113,22 @@ extern void warn_slowpath_null(const char *file, const int line); #endif #define WARN_ON_ONCE(condition) ({ \ - static int __warned; \ + static bool __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once)) \ if (WARN_ON(!__warned)) \ - __warned = 1; \ + __warned = true; \ unlikely(__ret_warn_once); \ }) #define WARN_ONCE(condition, format...) ({ \ - static int __warned; \ + static bool __warned; \ int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once)) \ if (WARN(!__warned, format)) \ - __warned = 1; \ + __warned = true; \ unlikely(__ret_warn_once); \ }) -- cgit v1.2.3 From 00b55864bb37200d7f05143c44f5e2edfc8c4578 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 14 Dec 2009 18:00:14 -0800 Subject: dynamic_debug.h/kernel.h: Remove KBUILD_MODNAME from dynamic_pr_debug If CONFIG_DYNAMIC_DEBUG is enabled and a source file has: #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include dynamic_debug.h will duplicate KBUILD_MODNAME in the output string. Remove the use of KBUILD_MODNAME from the output format string generated by dynamic_debug.h If CONFIG_DYNAMIC_DEBUG is not enabled, no compile-time check is done to printk/dev_printk arguments. Add it. Signed-off-by: Joe Perches Cc: Jason Baron Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/dynamic_debug.h | 13 ++++++------- include/linux/kernel.h | 5 ++--- 2 files changed, 8 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index a0d9422a1569..f8c2e1767500 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name); { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ if (__dynamic_dbg_enabled(descriptor)) \ - printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \ - ##__VA_ARGS__); \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ } while (0) @@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name); { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ if (__dynamic_dbg_enabled(descriptor)) \ - dev_printk(KERN_DEBUG, dev, \ - KBUILD_MODNAME ": " fmt, \ - ##__VA_ARGS__); \ + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ } while (0) #else @@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod) return 0; } -#define dynamic_pr_debug(fmt, ...) do { } while (0) -#define dynamic_dev_dbg(dev, format, ...) do { } while (0) +#define dynamic_pr_debug(fmt, ...) \ + do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define dynamic_dev_dbg(dev, format, ...) \ + do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) #endif #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3fa4c590cf12..a93cdd031aee 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -397,9 +397,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #elif defined(CONFIG_DYNAMIC_DEBUG) /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ -#define pr_debug(fmt, ...) do { \ - dynamic_pr_debug(fmt, ##__VA_ARGS__); \ - } while (0) +#define pr_debug(fmt, ...) \ + dynamic_pr_debug(fmt, ##__VA_ARGS__) #else #define pr_debug(fmt, ...) \ ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) -- cgit v1.2.3 From 0b2749aa6ca40ff3fe12ebb3fdf010ebad2e9085 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 14 Dec 2009 18:00:19 -0800 Subject: kernel.h: remove initialization of bool in printk_once Don't initialize __print_once. Invert the test to reduce initialized data. defconfig before: $size vmlinux text data bss dec hex filename 6976022 679572 1359668 9015262 898fde vmlinux defconfig after: $size vmlinux text data bss dec hex filename 6976006 679508 1359700 9015214 898fae vmlinux Signed-off-by: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index a93cdd031aee..910db75b1a72 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -251,10 +251,10 @@ extern int printk_delay_msec; * Print a one-time message (analogous to WARN_ONCE() et al): */ #define printk_once(x...) ({ \ - static bool __print_once = true; \ + static bool __print_once; \ \ - if (__print_once) { \ - __print_once = false; \ + if (!__print_once) { \ + __print_once = true; \ printk(x); \ } \ }) -- cgit v1.2.3 From 29671f22a8b6522db3b126a3fdfb208759ce46e3 Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Mon, 14 Dec 2009 18:00:21 -0800 Subject: rwsem: fix rwsem_is_locked() bugs rwsem_is_locked() tests ->activity without locks, so we should always keep ->activity consistent. However, the code in __rwsem_do_wake() breaks this rule, it updates ->activity after _all_ readers waken up, this may give some reader a wrong ->activity value, thus cause rwsem_is_locked() behaves wrong. Quote from Andrew: " - we have one or more processes sleeping in down_read(), waiting for access. - we wake one or more processes up without altering ->activity - they start to run and they do rwsem_is_locked(). This incorrectly returns "false", because the waker process is still crunching away in __rwsem_do_wake(). - the waker now alters ->activity, but it was too late. " So we need get a spinlock to protect this. And rwsem_is_locked() should not block, thus we use spin_trylock_irqsave(). [akpm@linux-foundation.org: simplify code] Reported-by: Brian Behlendorf Cc: Ben Woodard Cc: David Howells Signed-off-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rwsem-spinlock.h | 6 +----- lib/rwsem-spinlock.c | 13 +++++++++++++ 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index 6c3c0f6c261f..bdfcc2527970 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem); extern void __up_read(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem); extern void __downgrade_write(struct rw_semaphore *sem); - -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->activity != 0); -} +extern int rwsem_is_locked(struct rw_semaphore *sem); #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 39a74110f55b..ccf95bff7984 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,6 +17,19 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; +int rwsem_is_locked(struct rw_semaphore *sem) +{ + int ret = 1; + unsigned long flags; + + if (spin_trylock_irqsave(&sem->wait_lock, flags)) { + ret = (sem->activity != 0); + spin_unlock_irqrestore(&sem->wait_lock, flags); + } + return ret; +} +EXPORT_SYMBOL(rwsem_is_locked); + /* * initialise the semaphore */ -- cgit v1.2.3 From 948c1e2521979c332b21b623414cf258150f214e Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Mon, 14 Dec 2009 18:00:22 -0800 Subject: kallsyms: remove deprecated print_fn_descriptor_symbol() According to feature-removal-schedule.txt, it is the time to remove print_fn_descriptor_symbol(). And a quick grep shows that it no longer has any callers. Signed-off-by: WANG Cong Cc: Bjorn Helgaas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/feature-removal-schedule.txt | 9 --------- include/linux/kallsyms.h | 12 ------------ 2 files changed, 21 deletions(-) (limited to 'include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index eb2c138c277c..21ab9357326d 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -291,15 +291,6 @@ Who: Michael Buesch --------------------------- -What: print_fn_descriptor_symbol() -When: October 2009 -Why: The %pF vsprintf format provides the same functionality in a - simpler way. print_fn_descriptor_symbol() is deprecated but - still present to give out-of-tree modules time to change. -Who: Bjorn Helgaas - ---------------------------- - What: /sys/o2cb symlink When: January 2010 Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 792274269f2b..d8e9b3d1c23c 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr) __builtin_extract_return_addr((void *)addr)); } -/* - * Pretty-print a function pointer. This function is deprecated. - * Please use the "%pF" vsprintf format instead. - */ -static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr) -{ -#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) - addr = *(void **)addr; -#endif - print_symbol(fmt, (unsigned long)addr); -} - static inline void print_ip_sym(unsigned long ip) { printk("[<%p>] %pS\n", (void *) ip, (void *) ip); -- cgit v1.2.3 From 8a64f336bc1d4aa203b138d29d5a9c414a9fbb47 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 14 Dec 2009 18:00:25 -0800 Subject: kernel.h: add printk_ratelimited and pr__rl Add a printk_ratelimited statement expression macro that uses a per-call ratelimit_state so that multiple subsystems output messages are not suppressed by a global __ratelimit state. [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: s/_rl/_ratelimited/g] Signed-off-by: Joe Perches Cc: Naohiro Ooiwa Cc: Ingo Molnar Cc: Hiroshi Shimamoto Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 910db75b1a72..4d9c916d06d9 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -404,6 +404,50 @@ static inline char *pack_hex_byte(char *buf, u8 byte) ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) #endif +/* + * ratelimited messages with local ratelimit_state, + * no local ratelimit_state used in the !PRINTK case + */ +#ifdef CONFIG_PRINTK +#define printk_ratelimited(fmt, ...) ({ \ + static struct ratelimit_state _rs = { \ + .interval = DEFAULT_RATELIMIT_INTERVAL, \ + .burst = DEFAULT_RATELIMIT_BURST, \ + }; \ + \ + if (!__ratelimit(&_rs)) \ + printk(fmt, ##__VA_ARGS__); \ +}) +#else +/* No effect, but we still get type checking even in the !PRINTK case: */ +#define printk_ratelimited printk +#endif + +#define pr_emerg_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warning_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_notice_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +/* no pr_cont_ratelimited, don't do that... */ +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(DEBUG) +#define pr_debug_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug_ratelimited(fmt, ...) \ + ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ + ##__VA_ARGS__); 0; }) +#endif + /* * General tracing related utility functions - trace_printk(), * tracing_on/tracing_off and tracing_start()/tracing_stop -- cgit v1.2.3 From e4c570c4cb7a95dbfafa3d016d2739bf3fdfe319 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Mon, 14 Dec 2009 18:00:26 -0800 Subject: task_struct: make journal_info conditional journal_info in task_struct is used in journaling file system only. So introduce CONFIG_FS_JOURNAL_INFO and make it conditional. Signed-off-by: Hiroshi Shimamoto Cc: Chris Mason Cc: "Theodore Ts'o" Cc: Steven Whitehouse Cc: KONISHI Ryusuke Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Kconfig | 4 ++++ fs/btrfs/Kconfig | 1 + fs/ext4/Kconfig | 1 + fs/gfs2/Kconfig | 1 + fs/jbd/Kconfig | 1 + fs/jbd2/Kconfig | 1 + fs/nilfs2/Kconfig | 1 + fs/reiserfs/Kconfig | 1 + include/linux/init_task.h | 8 +++++++- include/linux/sched.h | 2 ++ 10 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/Kconfig b/fs/Kconfig index 64d44efad7a5..f8fccaaad628 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -6,6 +6,10 @@ menu "File systems" if BLOCK +config FS_JOURNAL_INFO + bool + default n + source "fs/ext2/Kconfig" source "fs/ext3/Kconfig" source "fs/ext4/Kconfig" diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 7bb3c020e570..402afe0a0bfb 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -4,6 +4,7 @@ config BTRFS_FS select LIBCRC32C select ZLIB_INFLATE select ZLIB_DEFLATE + select FS_JOURNAL_INFO help Btrfs is a new filesystem with extents, writable snapshotting, support for multiple devices and many more features. diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 9acf7e808139..e5f6774846e4 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -2,6 +2,7 @@ config EXT4_FS tristate "The Extended 4 (ext4) filesystem" select JBD2 select CRC16 + select FS_JOURNAL_INFO help This is the next generation of the ext3 filesystem. diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index 4dcddf83326f..b192c661caa6 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig @@ -10,6 +10,7 @@ config GFS2_FS select SLOW_WORK select QUOTA select QUOTACTL + select FS_JOURNAL_INFO help A cluster filesystem. diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig index 4e28beeed157..a8408983abd4 100644 --- a/fs/jbd/Kconfig +++ b/fs/jbd/Kconfig @@ -1,5 +1,6 @@ config JBD tristate + select FS_JOURNAL_INFO help This is a generic journalling layer for block devices. It is currently used by the ext3 file system, but it could also be diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig index f32f346f4b0a..0f7d1ceafdfd 100644 --- a/fs/jbd2/Kconfig +++ b/fs/jbd2/Kconfig @@ -1,6 +1,7 @@ config JBD2 tristate select CRC32 + select FS_JOURNAL_INFO help This is a generic journaling layer for block devices that support both 32-bit and 64-bit block numbers. It is currently used by diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig index 251da07b2a1d..1225af7b2166 100644 --- a/fs/nilfs2/Kconfig +++ b/fs/nilfs2/Kconfig @@ -2,6 +2,7 @@ config NILFS2_FS tristate "NILFS2 file system support (EXPERIMENTAL)" depends on EXPERIMENTAL select CRC32 + select FS_JOURNAL_INFO help NILFS2 is a log-structured file system (LFS) supporting continuous snapshotting. In addition to versioning capability of the entire diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig index 513f431038f9..ac7cd75c86f8 100644 --- a/fs/reiserfs/Kconfig +++ b/fs/reiserfs/Kconfig @@ -1,6 +1,7 @@ config REISERFS_FS tristate "Reiserfs support" select CRC32 + select FS_JOURNAL_INFO help Stores not just filenames but the files themselves in a balanced tree. Uses journalling. diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 8d10aa7fd4c9..8ed0abf06f89 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -111,6 +111,12 @@ extern struct cred init_cred; # define INIT_PERF_EVENTS(tsk) #endif +#ifdef CONFIG_FS_JOURNAL_INFO +#define INIT_JOURNAL_INFO .journal_info = NULL, +#else +#define INIT_JOURNAL_INFO +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -162,7 +168,6 @@ extern struct cred init_cred; .signal = {{0}}}, \ .blocked = {{0}}, \ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ - .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ @@ -173,6 +178,7 @@ extern struct cred init_cred; [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ + INIT_JOURNAL_INFO \ INIT_IDS \ INIT_PERF_EVENTS(tsk) \ INIT_TRACE_IRQFLAGS \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 294eb2f80144..7d388494f45d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1446,8 +1446,10 @@ struct task_struct { gfp_t lockdep_reclaim_gfp; #endif +#ifdef CONFIG_FS_JOURNAL_INFO /* journalling filesystem info */ void *journal_info; +#endif /* stacked block device info */ struct bio *bio_list, **bio_tail; -- cgit v1.2.3 From 603c4ba96be998a8dd7a6f9b23681c49acdf4b64 Mon Sep 17 00:00:00 2001 From: Phil Carmody Date: Mon, 14 Dec 2009 18:00:29 -0800 Subject: err.h: add helper function to simplify pointer error checking There are quite a few instances in the kernel of checks of pointers both against NULL and against the errno range, handling both cases identically. This additional helper function would simplify such code. [akpm@linux-foundation.org: build fix] Signed-off-by: Phil Carmody Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/err.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/err.h b/include/linux/err.h index ec87f3142bf3..1b12642636c7 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr) return IS_ERR_VALUE((unsigned long)ptr); } +static inline long IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} + /** * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type * @ptr: The pointer to cast. -- cgit v1.2.3 From 5f0a96b044d8edaee20f4a32ef6c393599ca55f8 Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Mon, 14 Dec 2009 18:00:32 -0800 Subject: cs5535-gpio: add AMD CS5535/CS5536 GPIO driver support This creates a CS5535/CS5536 GPIO driver which uses a gpio_chip backend (allowing GPIO users to use the generic GPIO API if desired) while also allowing architecture-specific users directly (via the cs5535_gpio_* functions). Tested on an OLPC machine. Some Leemotes also use CS5536 (with a mips cpu), which is why this is in drivers/gpio rather than arch/x86. Currently, it conflicts with older geode GPIO support; once MFGPT support is reworked to also be more generic, the older geode code will be removed. Signed-off-by: Andres Salomon Cc: Takashi Iwai Cc: Jordan Crouse Cc: David Brownell Reviewed-by: Alessandro Zummo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/geode.h | 28 +---- drivers/gpio/Kconfig | 10 ++ drivers/gpio/Makefile | 1 + drivers/gpio/cs5535-gpio.c | 282 +++++++++++++++++++++++++++++++++++++++++++ include/linux/cs5535.h | 58 +++++++++ 5 files changed, 352 insertions(+), 27 deletions(-) create mode 100644 drivers/gpio/cs5535-gpio.c create mode 100644 include/linux/cs5535.h (limited to 'include') diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h index ad3c2ed75481..5716214d37d9 100644 --- a/arch/x86/include/asm/geode.h +++ b/arch/x86/include/asm/geode.h @@ -12,6 +12,7 @@ #include #include +#include /* Generic southbridge functions */ @@ -115,33 +116,6 @@ extern int geode_get_dev_base(unsigned int dev); #define VSA_VR_MEM_SIZE 0x0200 #define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ #define GSW_VSA_SIG 0x534d /* General Software signature */ -/* GPIO */ - -#define GPIO_OUTPUT_VAL 0x00 -#define GPIO_OUTPUT_ENABLE 0x04 -#define GPIO_OUTPUT_OPEN_DRAIN 0x08 -#define GPIO_OUTPUT_INVERT 0x0C -#define GPIO_OUTPUT_AUX1 0x10 -#define GPIO_OUTPUT_AUX2 0x14 -#define GPIO_PULL_UP 0x18 -#define GPIO_PULL_DOWN 0x1C -#define GPIO_INPUT_ENABLE 0x20 -#define GPIO_INPUT_INVERT 0x24 -#define GPIO_INPUT_FILTER 0x28 -#define GPIO_INPUT_EVENT_COUNT 0x2C -#define GPIO_READ_BACK 0x30 -#define GPIO_INPUT_AUX1 0x34 -#define GPIO_EVENTS_ENABLE 0x38 -#define GPIO_LOCK_ENABLE 0x3C -#define GPIO_POSITIVE_EDGE_EN 0x40 -#define GPIO_NEGATIVE_EDGE_EN 0x44 -#define GPIO_POSITIVE_EDGE_STS 0x48 -#define GPIO_NEGATIVE_EDGE_STS 0x4C - -#define GPIO_MAP_X 0xE0 -#define GPIO_MAP_Y 0xE4 -#define GPIO_MAP_Z 0xE8 -#define GPIO_MAP_W 0xEC static inline u32 geode_gpio(unsigned int nr) { diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 2ad0128c63c6..df4b82d1348e 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -174,6 +174,16 @@ config GPIO_ADP5520 comment "PCI GPIO expanders:" +config GPIO_CS5535 + tristate "AMD CS5535/CS5536 GPIO support" + depends on PCI && !CS5535_GPIO && !MGEODE_LX + help + The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that + can be used for quite a number of things. The CS5535/6 is found on + AMD Geode and Lemote Yeeloong devices. + + If unsure, say N. + config GPIO_BT8XX tristate "BT8XX GPIO abuser" depends on PCI && VIDEO_BT848=n diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 00a532c9a1e2..270b6d7839f5 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_PL061) += pl061.o obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o +obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c new file mode 100644 index 000000000000..56138893819a --- /dev/null +++ b/drivers/gpio/cs5535-gpio.c @@ -0,0 +1,282 @@ +/* + * AMD CS5535/CS5536 GPIO driver + * Copyright (C) 2006 Advanced Micro Devices, Inc. + * Copyright (C) 2007-2009 Andres Salomon + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "cs5535-gpio" +#define GPIO_BAR 1 + +static struct cs5535_gpio_chip { + struct gpio_chip chip; + resource_size_t base; + + struct pci_dev *pdev; + spinlock_t lock; +} cs5535_gpio_chip; + +/* + * The CS5535/CS5536 GPIOs support a number of extra features not defined + * by the gpio_chip API, so these are exported. For a full list of the + * registers, see include/linux/cs5535.h. + */ + +static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, + unsigned int reg) +{ + if (offset < 16) + /* low bank register */ + outl(1 << offset, chip->base + reg); + else + /* high bank register */ + outl(1 << (offset - 16), chip->base + 0x80 + reg); +} + +void cs5535_gpio_set(unsigned offset, unsigned int reg) +{ + struct cs5535_gpio_chip *chip = &cs5535_gpio_chip; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + __cs5535_gpio_set(chip, offset, reg); + spin_unlock_irqrestore(&chip->lock, flags); +} +EXPORT_SYMBOL_GPL(cs5535_gpio_set); + +static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset, + unsigned int reg) +{ + if (offset < 16) + /* low bank register */ + outl(1 << (offset + 16), chip->base + reg); + else + /* high bank register */ + outl(1 << offset, chip->base + 0x80 + reg); +} + +void cs5535_gpio_clear(unsigned offset, unsigned int reg) +{ + struct cs5535_gpio_chip *chip = &cs5535_gpio_chip; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + __cs5535_gpio_clear(chip, offset, reg); + spin_unlock_irqrestore(&chip->lock, flags); +} +EXPORT_SYMBOL_GPL(cs5535_gpio_clear); + +int cs5535_gpio_isset(unsigned offset, unsigned int reg) +{ + struct cs5535_gpio_chip *chip = &cs5535_gpio_chip; + unsigned long flags; + long val; + + spin_lock_irqsave(&chip->lock, flags); + if (offset < 16) + /* low bank register */ + val = inl(chip->base + reg); + else { + /* high bank register */ + val = inl(chip->base + 0x80 + reg); + offset -= 16; + } + spin_unlock_irqrestore(&chip->lock, flags); + + return (val & (1 << offset)) ? 1 : 0; +} +EXPORT_SYMBOL_GPL(cs5535_gpio_isset); + +/* + * Generic gpio_chip API support. + */ + +static int chip_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + return cs5535_gpio_isset(offset, GPIO_OUTPUT_VAL); +} + +static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +{ + if (val) + cs5535_gpio_set(offset, GPIO_OUTPUT_VAL); + else + cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL); +} + +static int chip_direction_input(struct gpio_chip *c, unsigned offset) +{ + struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + __cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE); + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val) +{ + struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE); + if (val) + __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL); + else + __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_VAL); + + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static struct cs5535_gpio_chip cs5535_gpio_chip = { + .chip = { + .owner = THIS_MODULE, + .label = DRV_NAME, + + .base = 0, + .ngpio = 28, + + .get = chip_gpio_get, + .set = chip_gpio_set, + + .direction_input = chip_direction_input, + .direction_output = chip_direction_output, + }, +}; + +static int __init cs5535_gpio_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + int err; + + /* There are two ways to get the GPIO base address; one is by + * fetching it from MSR_LBAR_GPIO, the other is by reading the + * PCI BAR info. The latter method is easier (especially across + * different architectures), so we'll stick with that for now. If + * it turns out to be unreliable in the face of crappy BIOSes, we + * can always go back to using MSRs.. */ + + err = pci_enable_device_io(pdev); + if (err) { + dev_err(&pdev->dev, "can't enable device IO\n"); + goto done; + } + + err = pci_request_region(pdev, GPIO_BAR, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR); + goto done; + } + + /* set up the driver-specific struct */ + cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR); + cs5535_gpio_chip.pdev = pdev; + spin_lock_init(&cs5535_gpio_chip.lock); + + dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR, + (unsigned long long) cs5535_gpio_chip.base); + + /* finally, register with the generic GPIO API */ + err = gpiochip_add(&cs5535_gpio_chip.chip); + if (err) { + dev_err(&pdev->dev, "failed to register gpio chip\n"); + goto release_region; + } + + printk(KERN_INFO DRV_NAME ": GPIO support successfully loaded.\n"); + return 0; + +release_region: + pci_release_region(pdev, GPIO_BAR); +done: + return err; +} + +static void __exit cs5535_gpio_remove(struct pci_dev *pdev) +{ + int err; + + err = gpiochip_remove(&cs5535_gpio_chip.chip); + if (err) { + /* uhh? */ + dev_err(&pdev->dev, "unable to remove gpio_chip?\n"); + } + pci_release_region(pdev, GPIO_BAR); +} + +static struct pci_device_id cs5535_gpio_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl); + +/* + * We can't use the standard PCI driver registration stuff here, since + * that allows only one driver to bind to each PCI device (and we want + * multiple drivers to be able to bind to the device). Instead, manually + * scan for the PCI device, request a single region, and keep track of the + * devices that we're using. + */ + +static int __init cs5535_gpio_scan_pci(void) +{ + struct pci_dev *pdev; + int err = -ENODEV; + int i; + + for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) { + pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor, + cs5535_gpio_pci_tbl[i].device, NULL); + if (pdev) { + err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]); + if (err) + pci_dev_put(pdev); + + /* we only support a single CS5535/6 southbridge */ + break; + } + } + + return err; +} + +static void __exit cs5535_gpio_free_pci(void) +{ + cs5535_gpio_remove(cs5535_gpio_chip.pdev); + pci_dev_put(cs5535_gpio_chip.pdev); +} + +static int __init cs5535_gpio_init(void) +{ + return cs5535_gpio_scan_pci(); +} + +static void __exit cs5535_gpio_exit(void) +{ + cs5535_gpio_free_pci(); +} + +module_init(cs5535_gpio_init); +module_exit(cs5535_gpio_exit); + +MODULE_AUTHOR("Andres Salomon "); +MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h new file mode 100644 index 000000000000..cfea689dfa34 --- /dev/null +++ b/include/linux/cs5535.h @@ -0,0 +1,58 @@ +/* + * AMD CS5535/CS5536 definitions + * Copyright (C) 2006 Advanced Micro Devices, Inc. + * Copyright (C) 2009 Andres Salomon + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef _CS5535_H +#define _CS5535_H + +/* MSRs */ +#define MSR_LBAR_SMB 0x5140000B +#define MSR_LBAR_GPIO 0x5140000C +#define MSR_LBAR_MFGPT 0x5140000D +#define MSR_LBAR_ACPI 0x5140000E +#define MSR_LBAR_PMS 0x5140000F + +/* resource sizes */ +#define LBAR_GPIO_SIZE 0xFF +#define LBAR_MFGPT_SIZE 0x40 +#define LBAR_ACPI_SIZE 0x40 +#define LBAR_PMS_SIZE 0x80 + +/* GPIOs */ +#define GPIO_OUTPUT_VAL 0x00 +#define GPIO_OUTPUT_ENABLE 0x04 +#define GPIO_OUTPUT_OPEN_DRAIN 0x08 +#define GPIO_OUTPUT_INVERT 0x0C +#define GPIO_OUTPUT_AUX1 0x10 +#define GPIO_OUTPUT_AUX2 0x14 +#define GPIO_PULL_UP 0x18 +#define GPIO_PULL_DOWN 0x1C +#define GPIO_INPUT_ENABLE 0x20 +#define GPIO_INPUT_INVERT 0x24 +#define GPIO_INPUT_FILTER 0x28 +#define GPIO_INPUT_EVENT_COUNT 0x2C +#define GPIO_READ_BACK 0x30 +#define GPIO_INPUT_AUX1 0x34 +#define GPIO_EVENTS_ENABLE 0x38 +#define GPIO_LOCK_ENABLE 0x3C +#define GPIO_POSITIVE_EDGE_EN 0x40 +#define GPIO_NEGATIVE_EDGE_EN 0x44 +#define GPIO_POSITIVE_EDGE_STS 0x48 +#define GPIO_NEGATIVE_EDGE_STS 0x4C + +#define GPIO_MAP_X 0xE0 +#define GPIO_MAP_Y 0xE4 +#define GPIO_MAP_Z 0xE8 +#define GPIO_MAP_W 0xEC + +void cs5535_gpio_set(unsigned offset, unsigned int reg); +void cs5535_gpio_clear(unsigned offset, unsigned int reg); +int cs5535_gpio_isset(unsigned offset, unsigned int reg); + +#endif -- cgit v1.2.3 From 82dca611bb516ec5fb7d04077733d6a4b70f52d1 Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Mon, 14 Dec 2009 18:00:37 -0800 Subject: cs5535: add a generic MFGPT driver This is based on the old code on arch/x86/kernel/mfgpt_32.c, except it's not x86 specific, it's modular, and it makes use of a PCI BAR rather than a random MSR. Currently module unloading is not supported; it's uncertain whether or not it can be made work with the hardware. [akpm@linux-foundation.org: add X86 dependency] Signed-off-by: Andres Salomon Cc: Jordan Crouse Cc: Ingo Molnar Cc: Thomas Gleixner Cc: john stultz Cc: Chris Ball Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/geode.h | 40 ----- drivers/misc/Kconfig | 24 +++ drivers/misc/Makefile | 1 + drivers/misc/cs5535-mfgpt.c | 370 +++++++++++++++++++++++++++++++++++++++++++ include/linux/cs5535.h | 67 ++++++++ 5 files changed, 462 insertions(+), 40 deletions(-) create mode 100644 drivers/misc/cs5535-mfgpt.c (limited to 'include') diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h index 5716214d37d9..547e9642642a 100644 --- a/arch/x86/include/asm/geode.h +++ b/arch/x86/include/asm/geode.h @@ -47,16 +47,6 @@ extern int geode_get_dev_base(unsigned int dev); #define MSR_DIVIL_SOFT_RESET 0x51400017 -#define MSR_PIC_YSEL_LOW 0x51400020 -#define MSR_PIC_YSEL_HIGH 0x51400021 -#define MSR_PIC_ZSEL_LOW 0x51400022 -#define MSR_PIC_ZSEL_HIGH 0x51400023 -#define MSR_PIC_IRQM_LPC 0x51400025 - -#define MSR_MFGPT_IRQ 0x51400028 -#define MSR_MFGPT_NR 0x51400029 -#define MSR_MFGPT_SETUP 0x5140002B - #define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ #define MSR_GX_GLD_MSR_CONFIG 0xC0002001 @@ -169,36 +159,6 @@ static inline int geode_has_vsa2(void) } #endif -/* MFGPTs */ - -#define MFGPT_MAX_TIMERS 8 -#define MFGPT_TIMER_ANY (-1) - -#define MFGPT_DOMAIN_WORKING 1 -#define MFGPT_DOMAIN_STANDBY 2 -#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) - -#define MFGPT_CMP1 0 -#define MFGPT_CMP2 1 - -#define MFGPT_EVENT_IRQ 0 -#define MFGPT_EVENT_NMI 1 -#define MFGPT_EVENT_RESET 3 - -#define MFGPT_REG_CMP1 0 -#define MFGPT_REG_CMP2 2 -#define MFGPT_REG_COUNTER 4 -#define MFGPT_REG_SETUP 6 - -#define MFGPT_SETUP_CNTEN (1 << 15) -#define MFGPT_SETUP_CMP2 (1 << 14) -#define MFGPT_SETUP_CMP1 (1 << 13) -#define MFGPT_SETUP_SETUP (1 << 12) -#define MFGPT_SETUP_STOPEN (1 << 11) -#define MFGPT_SETUP_EXTEN (1 << 10) -#define MFGPT_SETUP_REVEN (1 << 5) -#define MFGPT_SETUP_CLKSEL (1 << 4) - static inline void geode_mfgpt_write(int timer, u16 reg, u16 value) { u32 base = geode_get_dev_base(GEODE_DEV_MFGPT); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 724d1188a322..d6d6d846f0d6 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -187,6 +187,30 @@ config SGI_XP this feature will allow for direct communication between SSIs based on a network adapter and DMA messaging. +config CS5535_MFGPT + tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" + depends on PCI && !MGEODE_LX + depends on X86 + default n + help + This driver provides access to MFGPT functionality for other + drivers that need timers. MFGPTs are available in the CS5535 and + CS5536 companion chips that are found in AMD Geode and several + other platforms. They have a better resolution and max interval + than the generic PIT, and are suitable for use as high-res timers. + You probably don't want to enable this manually; other drivers that + make use of it should enable it. + +config CS5535_MFGPT_DEFAULT_IRQ + int + default 7 + help + MFGPTs on the CS5535 require an interrupt. The selected IRQ + can be overridden as a module option as well as by driver that + use the cs5535_mfgpt_ API; however, different architectures might + want to use a different IRQ by default. This is here for + architectures to set as necessary. + config HP_ILO tristate "Channel interface driver for HP iLO/iLO2 processor" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index e76b77977442..049ff2482f30 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_SGI_XP) += sgi-xp/ obj-$(CONFIG_SGI_GRU) += sgi-gru/ +obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o obj-$(CONFIG_HP_ILO) += hpilo.o obj-$(CONFIG_ISL29003) += isl29003.o obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c new file mode 100644 index 000000000000..8110460558ff --- /dev/null +++ b/drivers/misc/cs5535-mfgpt.c @@ -0,0 +1,370 @@ +/* + * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT) + * + * Copyright (C) 2006, Advanced Micro Devices, Inc. + * Copyright (C) 2007 Andres Salomon + * Copyright (C) 2009 Andres Salomon + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. + */ + +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "cs5535-mfgpt" +#define MFGPT_BAR 2 + +static int mfgpt_reset_timers; +module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); +MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; " + "required by some broken BIOSes (ie, TinyBIOS < 0.99)."); + +struct cs5535_mfgpt_timer { + struct cs5535_mfgpt_chip *chip; + int nr; +}; + +static struct cs5535_mfgpt_chip { + DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); + resource_size_t base; + + struct pci_dev *pdev; + spinlock_t lock; + int initialized; +} cs5535_mfgpt_chip; + +int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, + int event, int enable) +{ + uint32_t msr, mask, value, dummy; + int shift = (cmp == MFGPT_CMP1) ? 0 : 8; + + if (!timer) { + WARN_ON(1); + return -EIO; + } + + /* + * The register maps for these are described in sections 6.17.1.x of + * the AMD Geode CS5536 Companion Device Data Book. + */ + switch (event) { + case MFGPT_EVENT_RESET: + /* + * XXX: According to the docs, we cannot reset timers above + * 6; that is, resets for 7 and 8 will be ignored. Is this + * a problem? -dilinger + */ + msr = MSR_MFGPT_NR; + mask = 1 << (timer->nr + 24); + break; + + case MFGPT_EVENT_NMI: + msr = MSR_MFGPT_NR; + mask = 1 << (timer->nr + shift); + break; + + case MFGPT_EVENT_IRQ: + msr = MSR_MFGPT_IRQ; + mask = 1 << (timer->nr + shift); + break; + + default: + return -EIO; + } + + rdmsr(msr, value, dummy); + + if (enable) + value |= mask; + else + value &= ~mask; + + wrmsr(msr, value, dummy); + return 0; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event); + +int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq, + int enable) +{ + uint32_t zsel, lpc, dummy; + int shift; + + if (!timer) { + WARN_ON(1); + return -EIO; + } + + /* + * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA + * is using the same CMP of the timer's Siamese twin, the IRQ is set to + * 2, and we mustn't use nor change it. + * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the + * IRQ of the 1st. This can only happen if forcing an IRQ, calling this + * with *irq==0 is safe. Currently there _are_ no 2 drivers. + */ + rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); + shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4; + if (((zsel >> shift) & 0xF) == 2) + return -EIO; + + /* Choose IRQ: if none supplied, keep IRQ already set or use default */ + if (!*irq) + *irq = (zsel >> shift) & 0xF; + if (!*irq) + *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; + + /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ + if (*irq < 1 || *irq == 2 || *irq > 15) + return -EIO; + rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); + if (lpc & (1 << *irq)) + return -EIO; + + /* All chosen and checked - go for it */ + if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) + return -EIO; + if (enable) { + zsel = (zsel & ~(0xF << shift)) | (*irq << shift); + wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); + } + + return 0; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq); + +struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain) +{ + struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip; + struct cs5535_mfgpt_timer *timer = NULL; + unsigned long flags; + int max; + + if (!mfgpt->initialized) + goto done; + + /* only allocate timers from the working domain if requested */ + if (domain == MFGPT_DOMAIN_WORKING) + max = 6; + else + max = MFGPT_MAX_TIMERS; + + if (timer_nr >= max) { + /* programmer error. silly programmers! */ + WARN_ON(1); + goto done; + } + + spin_lock_irqsave(&mfgpt->lock, flags); + if (timer_nr < 0) { + unsigned long t; + + /* try to find any available timer */ + t = find_first_bit(mfgpt->avail, max); + /* set timer_nr to -1 if no timers available */ + timer_nr = t < max ? (int) t : -1; + } else { + /* check if the requested timer's available */ + if (test_bit(timer_nr, mfgpt->avail)) + timer_nr = -1; + } + + if (timer_nr >= 0) + /* if timer_nr is not -1, it's an available timer */ + __clear_bit(timer_nr, mfgpt->avail); + spin_unlock_irqrestore(&mfgpt->lock, flags); + + if (timer_nr < 0) + goto done; + + timer = kmalloc(sizeof(*timer), GFP_KERNEL); + if (!timer) { + /* aw hell */ + spin_lock_irqsave(&mfgpt->lock, flags); + __set_bit(timer_nr, mfgpt->avail); + spin_unlock_irqrestore(&mfgpt->lock, flags); + goto done; + } + timer->chip = mfgpt; + timer->nr = timer_nr; + dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr); + +done: + return timer; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer); + +/* + * XXX: This frees the timer memory, but never resets the actual hardware + * timer. The old geode_mfgpt code did this; it would be good to figure + * out a way to actually release the hardware timer. See comments below. + */ +void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer) +{ + kfree(timer); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer); + +uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg) +{ + return inw(timer->chip->base + reg + (timer->nr * 8)); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_read); + +void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, + uint16_t value) +{ + outw(value, timer->chip->base + reg + (timer->nr * 8)); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_write); + +/* + * This is a sledgehammer that resets all MFGPT timers. This is required by + * some broken BIOSes which leave the system in an unstable state + * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to + * whether or not this secret MSR can be used to release individual timers. + * Jordan tells me that he and Mitch once played w/ it, but it's unclear + * what the results of that were (and they experienced some instability). + */ +static void __init reset_all_timers(void) +{ + uint32_t val, dummy; + + /* The following undocumented bit resets the MFGPT timers */ + val = 0xFF; dummy = 0; + wrmsr(MSR_MFGPT_SETUP, val, dummy); +} + +/* + * Check whether any MFGPTs are available for the kernel to use. In most + * cases, firmware that uses AMD's VSA code will claim all timers during + * bootup; we certainly don't want to take them if they're already in use. + * In other cases (such as with VSAless OpenFirmware), the system firmware + * leaves timers available for us to use. + */ +static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) +{ + struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; + unsigned long flags; + int timers = 0; + uint16_t val; + int i; + + /* bios workaround */ + if (mfgpt_reset_timers) + reset_all_timers(); + + /* just to be safe, protect this section w/ lock */ + spin_lock_irqsave(&mfgpt->lock, flags); + for (i = 0; i < MFGPT_MAX_TIMERS; i++) { + timer.nr = i; + val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP); + if (!(val & MFGPT_SETUP_SETUP)) { + __set_bit(i, mfgpt->avail); + timers++; + } + } + spin_unlock_irqrestore(&mfgpt->lock, flags); + + return timers; +} + +static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + int err, t; + + /* There are two ways to get the MFGPT base address; one is by + * fetching it from MSR_LBAR_MFGPT, the other is by reading the + * PCI BAR info. The latter method is easier (especially across + * different architectures), so we'll stick with that for now. If + * it turns out to be unreliable in the face of crappy BIOSes, we + * can always go back to using MSRs.. */ + + err = pci_enable_device_io(pdev); + if (err) { + dev_err(&pdev->dev, "can't enable device IO\n"); + goto done; + } + + err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR); + goto done; + } + + /* set up the driver-specific struct */ + cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR); + cs5535_mfgpt_chip.pdev = pdev; + spin_lock_init(&cs5535_mfgpt_chip.lock); + + dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR, + (unsigned long long) cs5535_mfgpt_chip.base); + + /* detect the available timers */ + t = scan_timers(&cs5535_mfgpt_chip); + dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t); + cs5535_mfgpt_chip.initialized = 1; + return 0; + +done: + return err; +} + +static struct pci_device_id cs5535_mfgpt_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl); + +/* + * Just like with the cs5535-gpio driver, we can't use the standard PCI driver + * registration stuff. It only allows only one driver to bind to each PCI + * device, and we want the GPIO and MFGPT drivers to be able to share a PCI + * device. Instead, we manually scan for the PCI device, request a single + * region, and keep track of the devices that we're using. + */ + +static int __init cs5535_mfgpt_scan_pci(void) +{ + struct pci_dev *pdev; + int err = -ENODEV; + int i; + + for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) { + pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor, + cs5535_mfgpt_pci_tbl[i].device, NULL); + if (pdev) { + err = cs5535_mfgpt_probe(pdev, + &cs5535_mfgpt_pci_tbl[i]); + if (err) + pci_dev_put(pdev); + + /* we only support a single CS5535/6 southbridge */ + break; + } + } + + return err; +} + +static int __init cs5535_mfgpt_init(void) +{ + return cs5535_mfgpt_scan_pci(); +} + +module_init(cs5535_mfgpt_init); + +MODULE_AUTHOR("Andres Salomon "); +MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h index cfea689dfa34..30ecb04fa570 100644 --- a/include/linux/cs5535.h +++ b/include/linux/cs5535.h @@ -18,6 +18,16 @@ #define MSR_LBAR_ACPI 0x5140000E #define MSR_LBAR_PMS 0x5140000F +#define MSR_PIC_YSEL_LOW 0x51400020 +#define MSR_PIC_YSEL_HIGH 0x51400021 +#define MSR_PIC_ZSEL_LOW 0x51400022 +#define MSR_PIC_ZSEL_HIGH 0x51400023 +#define MSR_PIC_IRQM_LPC 0x51400025 + +#define MSR_MFGPT_IRQ 0x51400028 +#define MSR_MFGPT_NR 0x51400029 +#define MSR_MFGPT_SETUP 0x5140002B + /* resource sizes */ #define LBAR_GPIO_SIZE 0xFF #define LBAR_MFGPT_SIZE 0x40 @@ -55,4 +65,61 @@ void cs5535_gpio_set(unsigned offset, unsigned int reg); void cs5535_gpio_clear(unsigned offset, unsigned int reg); int cs5535_gpio_isset(unsigned offset, unsigned int reg); +/* MFGPTs */ + +#define MFGPT_MAX_TIMERS 8 +#define MFGPT_TIMER_ANY (-1) + +#define MFGPT_DOMAIN_WORKING 1 +#define MFGPT_DOMAIN_STANDBY 2 +#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) + +#define MFGPT_CMP1 0 +#define MFGPT_CMP2 1 + +#define MFGPT_EVENT_IRQ 0 +#define MFGPT_EVENT_NMI 1 +#define MFGPT_EVENT_RESET 3 + +#define MFGPT_REG_CMP1 0 +#define MFGPT_REG_CMP2 2 +#define MFGPT_REG_COUNTER 4 +#define MFGPT_REG_SETUP 6 + +#define MFGPT_SETUP_CNTEN (1 << 15) +#define MFGPT_SETUP_CMP2 (1 << 14) +#define MFGPT_SETUP_CMP1 (1 << 13) +#define MFGPT_SETUP_SETUP (1 << 12) +#define MFGPT_SETUP_STOPEN (1 << 11) +#define MFGPT_SETUP_EXTEN (1 << 10) +#define MFGPT_SETUP_REVEN (1 << 5) +#define MFGPT_SETUP_CLKSEL (1 << 4) + +struct cs5535_mfgpt_timer; + +extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, + uint16_t reg); +extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, + uint16_t value); + +extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, + int event, int enable); +extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, + int *irq, int enable); +extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer, + int domain); +extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer); + +static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 1); +} + +static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 0); +} + #endif -- cgit v1.2.3 From 2e8c12436f540d3c40137ebf10268803dc972f6a Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Mon, 14 Dec 2009 18:00:39 -0800 Subject: cs5535: move the DIVIL MSR definition into linux/cs5535.h The only thing that uses this is the reboot_fixups code. Signed-off-by: Andres Salomon Cc: Jordan Crouse Cc: Ingo Molnar Cc: Thomas Gleixner Cc: john stultz Cc: Chris Ball Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/geode.h | 2 -- arch/x86/kernel/reboot_fixups_32.c | 2 +- include/linux/cs5535.h | 2 ++ 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h index 547e9642642a..976b3f11c009 100644 --- a/arch/x86/include/asm/geode.h +++ b/arch/x86/include/asm/geode.h @@ -45,8 +45,6 @@ extern int geode_get_dev_base(unsigned int dev); #define MSR_LBAR_ACPI 0x5140000E #define MSR_LBAR_PMS 0x5140000F -#define MSR_DIVIL_SOFT_RESET 0x51400017 - #define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ #define MSR_GX_GLD_MSR_CONFIG 0xC0002001 diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c index 201eab63b05f..fda313ebbb03 100644 --- a/arch/x86/kernel/reboot_fixups_32.c +++ b/arch/x86/kernel/reboot_fixups_32.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include static void cs5530a_warm_reset(struct pci_dev *dev) { diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h index 30ecb04fa570..39e93e8ed95d 100644 --- a/include/linux/cs5535.h +++ b/include/linux/cs5535.h @@ -18,6 +18,8 @@ #define MSR_LBAR_ACPI 0x5140000E #define MSR_LBAR_PMS 0x5140000F +#define MSR_DIVIL_SOFT_RESET 0x51400017 + #define MSR_PIC_YSEL_LOW 0x51400020 #define MSR_PIC_YSEL_HIGH 0x51400021 #define MSR_PIC_ZSEL_LOW 0x51400022 -- cgit v1.2.3 From f060f27007b393bac6e50ee6fc26d8505acf6fe4 Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Mon, 14 Dec 2009 18:00:40 -0800 Subject: cs5535: move VSA2 checks into linux/cs5535.h Signed-off-by: Andres Salomon Cc: Jordan Crouse Cc: Ingo Molnar Cc: Thomas Gleixner Cc: john stultz Cc: Chris Ball Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/geode.h | 19 ------------------- arch/x86/kernel/geode_32.c | 22 ---------------------- arch/x86/kernel/olpc.c | 4 ++-- drivers/video/geode/display_gx.c | 2 +- drivers/video/geode/lxfb_ops.c | 2 +- include/linux/cs5535.h | 32 ++++++++++++++++++++++++++++++++ 6 files changed, 36 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h index 976b3f11c009..df1eaf87426a 100644 --- a/arch/x86/include/asm/geode.h +++ b/arch/x86/include/asm/geode.h @@ -95,16 +95,6 @@ extern int geode_get_dev_base(unsigned int dev); #define PM_AWKD 0x50 #define PM_SSC 0x54 -/* VSA2 magic values */ - -#define VSA_VRC_INDEX 0xAC1C -#define VSA_VRC_DATA 0xAC1E -#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ -#define VSA_VR_SIGNATURE 0x0003 -#define VSA_VR_MEM_SIZE 0x0200 -#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ -#define GSW_VSA_SIG 0x534d /* General Software signature */ - static inline u32 geode_gpio(unsigned int nr) { BUG_ON(nr > 28); @@ -148,15 +138,6 @@ static inline int is_geode(void) return (is_geode_gx() || is_geode_lx()); } -#ifdef CONFIG_MGEODE_LX -extern int geode_has_vsa2(void); -#else -static inline int geode_has_vsa2(void) -{ - return 0; -} -#endif - static inline void geode_mfgpt_write(int timer, u16 reg, u16 value) { u32 base = geode_get_dev_base(GEODE_DEV_MFGPT); diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c index 9b08e852fd1a..9dad6ca6cd70 100644 --- a/arch/x86/kernel/geode_32.c +++ b/arch/x86/kernel/geode_32.c @@ -161,28 +161,6 @@ void geode_gpio_setup_event(unsigned int gpio, int pair, int pme) } EXPORT_SYMBOL_GPL(geode_gpio_setup_event); -int geode_has_vsa2(void) -{ - static int has_vsa2 = -1; - - if (has_vsa2 == -1) { - u16 val; - - /* - * The VSA has virtual registers that we can query for a - * signature. - */ - outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); - outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); - - val = inw(VSA_VRC_DATA); - has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); - } - - return has_vsa2; -} -EXPORT_SYMBOL_GPL(geode_has_vsa2); - static int __init geode_southbridge_init(void) { if (!is_geode()) diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 4006c522adc7..9d1d263f786f 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c @@ -212,7 +212,7 @@ static int __init olpc_init(void) unsigned char *romsig; /* The ioremap check is dangerous; limit what we run it on */ - if (!is_geode() || geode_has_vsa2()) + if (!is_geode() || cs5535_has_vsa2()) return 0; spin_lock_init(&ec_lock); @@ -244,7 +244,7 @@ static int __init olpc_init(void) (unsigned char *) &olpc_platform_info.ecver, 1); /* check to see if the VSA exists */ - if (geode_has_vsa2()) + if (cs5535_has_vsa2()) olpc_platform_info.flags |= OLPC_F_VSA; printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c index e759895bf3d3..3ddf055e302e 100644 --- a/drivers/video/geode/display_gx.c +++ b/drivers/video/geode/display_gx.c @@ -25,7 +25,7 @@ unsigned int gx_frame_buffer_size(void) { unsigned int val; - if (!geode_has_vsa2()) { + if (!cs5535_has_vsa2()) { uint32_t hi, lo; /* The number of pages is (PMAX - PMIN)+1 */ diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index b1cd49c99356..fe1ee7cbccda 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c @@ -307,7 +307,7 @@ unsigned int lx_framebuffer_size(void) { unsigned int val; - if (!geode_has_vsa2()) { + if (!cs5535_has_vsa2()) { uint32_t hi, lo; /* The number of pages is (PMAX - PMIN)+1 */ diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h index 39e93e8ed95d..eb34108a608b 100644 --- a/include/linux/cs5535.h +++ b/include/linux/cs5535.h @@ -36,6 +36,38 @@ #define LBAR_ACPI_SIZE 0x40 #define LBAR_PMS_SIZE 0x80 +/* VSA2 magic values */ +#define VSA_VRC_INDEX 0xAC1C +#define VSA_VRC_DATA 0xAC1E +#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ +#define VSA_VR_SIGNATURE 0x0003 +#define VSA_VR_MEM_SIZE 0x0200 +#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ +#define GSW_VSA_SIG 0x534d /* General Software signature */ + +#include + +static inline int cs5535_has_vsa2(void) +{ + static int has_vsa2 = -1; + + if (has_vsa2 == -1) { + uint16_t val; + + /* + * The VSA has virtual registers that we can query for a + * signature. + */ + outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); + outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); + + val = inw(VSA_VRC_DATA); + has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); + } + + return has_vsa2; +} + /* GPIOs */ #define GPIO_OUTPUT_VAL 0x00 #define GPIO_OUTPUT_ENABLE 0x04 -- cgit v1.2.3 From f3a57a60d3e107d17aebb9e52b61c503e5bc14f9 Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Mon, 14 Dec 2009 18:00:40 -0800 Subject: cs5535: define lxfb/gxfb MSRs in linux/cs5535.h ..and include them in the lxfb/gxfb drivers rather than asm/geode.h (where possible). Signed-off-by: Andres Salomon Cc: Jordan Crouse Cc: Ingo Molnar Cc: Thomas Gleixner Cc: john stultz Cc: Chris Ball Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/geode.h | 13 ------------- drivers/video/geode/display_gx.c | 2 +- drivers/video/geode/gxfb.h | 2 +- drivers/video/geode/gxfb_core.c | 2 +- drivers/video/geode/lxfb.h | 2 +- drivers/video/geode/lxfb_ops.c | 2 +- drivers/video/geode/suspend_gx.c | 2 +- drivers/video/geode/video_gx.c | 2 +- include/linux/cs5535.h | 13 +++++++++++++ 9 files changed, 20 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h index df1eaf87426a..ae104da6ad5a 100644 --- a/arch/x86/include/asm/geode.h +++ b/arch/x86/include/asm/geode.h @@ -31,25 +31,12 @@ extern int geode_get_dev_base(unsigned int dev); /* MSRS */ -#define MSR_GLIU_P2D_RO0 0x10000029 - -#define MSR_LX_GLD_MSR_CONFIG 0x48002001 -#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data - * sheet has the wrong value */ -#define MSR_GLCP_SYS_RSTPLL 0x4C000014 -#define MSR_GLCP_DOTPLL 0x4C000015 - #define MSR_LBAR_SMB 0x5140000B #define MSR_LBAR_GPIO 0x5140000C #define MSR_LBAR_MFGPT 0x5140000D #define MSR_LBAR_ACPI 0x5140000E #define MSR_LBAR_PMS 0x5140000F -#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ - -#define MSR_GX_GLD_MSR_CONFIG 0xC0002001 -#define MSR_GX_MSR_PADSEL 0xC0002011 - /* Resource Sizes */ #define LBAR_GPIO_SIZE 0xFF diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c index 3ddf055e302e..f0af911a096d 100644 --- a/drivers/video/geode/display_gx.c +++ b/drivers/video/geode/display_gx.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "gxfb.h" diff --git a/drivers/video/geode/gxfb.h b/drivers/video/geode/gxfb.h index 16a96f8fd8c5..d19e9378b0c0 100644 --- a/drivers/video/geode/gxfb.h +++ b/drivers/video/geode/gxfb.h @@ -340,7 +340,7 @@ static inline void write_fp(struct gxfb_par *par, int reg, uint32_t val) } -/* MSRs are defined in asm/geode.h; their bitfields are here */ +/* MSRs are defined in linux/cs5535.h; their bitfields are here */ #define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (1 << 3) #define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (1 << 2) diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c index 2552cac39e1c..b3e639d1e12c 100644 --- a/drivers/video/geode/gxfb_core.c +++ b/drivers/video/geode/gxfb_core.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include "gxfb.h" diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h index 6a51448fd3f7..fc68a8b0a144 100644 --- a/drivers/video/geode/lxfb.h +++ b/drivers/video/geode/lxfb.h @@ -409,7 +409,7 @@ static inline void write_fp(struct lxfb_par *par, int reg, uint32_t val) } -/* MSRs are defined in asm/geode.h; their bitfields are here */ +/* MSRs are defined in linux/cs5535.h; their bitfields are here */ #define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */ #define MSR_GLCP_DOTPLL_HALFPIX (1 << 24) diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index fe1ee7cbccda..0e5d8c7c3eba 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "lxfb.h" diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/geode/suspend_gx.c index 9aff32ef8bb6..1bb043d70c64 100644 --- a/drivers/video/geode/suspend_gx.c +++ b/drivers/video/geode/suspend_gx.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include "gxfb.h" diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c index b8d52a8360db..6082f653c68a 100644 --- a/drivers/video/geode/video_gx.c +++ b/drivers/video/geode/video_gx.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "gxfb.h" diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h index eb34108a608b..d5a1d4810b80 100644 --- a/include/linux/cs5535.h +++ b/include/linux/cs5535.h @@ -12,6 +12,14 @@ #define _CS5535_H /* MSRs */ +#define MSR_GLIU_P2D_RO0 0x10000029 + +#define MSR_LX_GLD_MSR_CONFIG 0x48002001 +#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data + * sheet has the wrong value */ +#define MSR_GLCP_SYS_RSTPLL 0x4C000014 +#define MSR_GLCP_DOTPLL 0x4C000015 + #define MSR_LBAR_SMB 0x5140000B #define MSR_LBAR_GPIO 0x5140000C #define MSR_LBAR_MFGPT 0x5140000D @@ -30,6 +38,11 @@ #define MSR_MFGPT_NR 0x51400029 #define MSR_MFGPT_SETUP 0x5140002B +#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ + +#define MSR_GX_GLD_MSR_CONFIG 0xC0002001 +#define MSR_GX_MSR_PADSEL 0xC0002011 + /* resource sizes */ #define LBAR_GPIO_SIZE 0xFF #define LBAR_MFGPT_SIZE 0x40 -- cgit v1.2.3 From 5ada918b82399eef3afd6a71e3637697d6bd719f Mon Sep 17 00:00:00 2001 From: Bernhard Walle Date: Mon, 14 Dec 2009 18:00:43 -0800 Subject: vt: introduce and use vt_kmsg_redirect() function The kernel offers with TIOCL_GETKMSGREDIRECT ioctl() the possibility to redirect the kernel messages to a specific console. However, since it's not possible to switch to the kernel message console after a panic(), it would be nice if the kernel would print the panic message on the current console. This patch series adds a new interface to access the global kmsg_redirect variable by a function to be able to use it in code where CONFIG_VT_CONSOLE is not set (kernel/panic.c). This patch: Instead of using and exporting a global value kmsg_redirect, introduce a function vt_kmsg_redirect() that both can set and return the console where messages are printed. Change all users of kmsg_redirect (the VT code itself and kernel/power.c) to the new interface. The main advantage is that vt_kmsg_redirect() can also be used when CONFIG_VT_CONSOLE is not set. Signed-off-by: Bernhard Walle Cc: Alan Cox Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/vt.c | 43 +++++++++++++++++++++++++++++++++++++------ include/linux/tty.h | 2 -- include/linux/vt.h | 15 +++++++++++++++ kernel/power/console.c | 7 +++---- 4 files changed, 55 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 1e3d728dbf7e..e43fbc66aef0 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -184,12 +184,10 @@ static DECLARE_WORK(console_work, console_callback); * fg_console is the current virtual console, * last_console is the last used one, * want_console is the console we want to switch to, - * kmsg_redirect is the console for kernel messages, */ int fg_console; int last_console; int want_console = -1; -int kmsg_redirect; /* * For each existing display, we have a pointer to console currently visible @@ -2434,6 +2432,37 @@ struct tty_driver *console_driver; #ifdef CONFIG_VT_CONSOLE +/** + * vt_kmsg_redirect() - Sets/gets the kernel message console + * @new: The new virtual terminal number or -1 if the console should stay + * unchanged + * + * By default, the kernel messages are always printed on the current virtual + * console. However, the user may modify that default with the + * TIOCL_SETKMSGREDIRECT ioctl call. + * + * This function sets the kernel message console to be @new. It returns the old + * virtual console number. The virtual terminal number 0 (both as parameter and + * return value) means no redirection (i.e. always printed on the currently + * active console). + * + * The parameter -1 means that only the current console is returned, but the + * value is not modified. You may use the macro vt_get_kmsg_redirect() in that + * case to make the code more understandable. + * + * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores + * the parameter and always returns 0. + */ +int vt_kmsg_redirect(int new) +{ + static int kmsg_con; + + if (new != -1) + return xchg(&kmsg_con, new); + else + return kmsg_con; +} + /* * Console on virtual terminal * @@ -2448,6 +2477,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) const ushort *start; ushort cnt = 0; ushort myx; + int kmsg_console; /* console busy or not yet initialized */ if (!printable) @@ -2455,8 +2485,9 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) if (!spin_trylock(&printing_lock)) return; - if (kmsg_redirect && vc_cons_allocated(kmsg_redirect - 1)) - vc = vc_cons[kmsg_redirect - 1].d; + kmsg_console = vt_get_kmsg_redirect(); + if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) + vc = vc_cons[kmsg_console - 1].d; /* read `x' only after setting currcons properly (otherwise the `x' macro will read the x of the foreground console). */ @@ -2613,7 +2644,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) ret = set_vesa_blanking(p); break; case TIOCL_GETKMSGREDIRECT: - data = kmsg_redirect; + data = vt_get_kmsg_redirect(); ret = __put_user(data, p); break; case TIOCL_SETKMSGREDIRECT: @@ -2623,7 +2654,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) if (get_user(data, p+1)) ret = -EFAULT; else - kmsg_redirect = data; + vt_kmsg_redirect(data); } break; case TIOCL_GETFGCONSOLE: diff --git a/include/linux/tty.h b/include/linux/tty.h index 405a9035fe40..ef3a2947b102 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *); extern struct ktermios tty_std_termios; -extern int kmsg_redirect; - extern void console_init(void); extern int vcs_init(void); diff --git a/include/linux/vt.h b/include/linux/vt.h index 7ffa11f06232..3fb9944e50a6 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -84,4 +84,19 @@ struct vt_setactivate { #define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ +#ifdef CONFIG_VT_CONSOLE + +extern int vt_kmsg_redirect(int new); + +#else + +static inline int vt_kmsg_redirect(int new) +{ + return 0; +} + +#endif + +#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1) + #endif /* _LINUX_VT_H */ diff --git a/kernel/power/console.c b/kernel/power/console.c index 5187136fe1de..218e5af90156 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include "power.h" @@ -21,8 +21,7 @@ int pm_prepare_console(void) if (orig_fgconsole < 0) return 1; - orig_kmsg = kmsg_redirect; - kmsg_redirect = SUSPEND_CONSOLE; + orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE); return 0; } @@ -30,7 +29,7 @@ void pm_restore_console(void) { if (orig_fgconsole >= 0) { vt_move_to_console(orig_fgconsole, 0); - kmsg_redirect = orig_kmsg; + vt_kmsg_redirect(orig_kmsg); } } #endif -- cgit v1.2.3 From 7707e61c70999a1f9f1fd9ac92e293c198585152 Mon Sep 17 00:00:00 2001 From: André Goddard Rosa Date: Mon, 14 Dec 2009 18:01:02 -0800 Subject: ctype: constify read-only _ctype string MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While at it, use tabs to indent the comments. Signed-off-by: André Goddard Rosa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ctype.h | 2 +- lib/ctype.c | 50 +++++++++++++++++++++++++------------------------- 2 files changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/linux/ctype.h b/include/linux/ctype.h index afa36392297a..6dec944a370b 100644 --- a/include/linux/ctype.h +++ b/include/linux/ctype.h @@ -15,7 +15,7 @@ #define _X 0x40 /* hex digit */ #define _SP 0x80 /* hard space (0x20) */ -extern unsigned char _ctype[]; +extern const unsigned char _ctype[]; #define __ismask(x) (_ctype[(int)(unsigned char)(x)]) diff --git a/lib/ctype.c b/lib/ctype.c index d02ace14a322..26baa620e95b 100644 --- a/lib/ctype.c +++ b/lib/ctype.c @@ -7,30 +7,30 @@ #include #include -unsigned char _ctype[] = { -_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ -_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ -_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ -_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ -_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ -_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ -_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ -_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ -_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ -_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ -_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ -_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ -_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ -_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ -_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ -_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ -_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ -_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ -_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ -_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ -_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ -_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ +const unsigned char _ctype[] = { +_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ +_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ +_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ +_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ +_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ +_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ +_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ +_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ +_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ +_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ +_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ +_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ +_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ +_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ EXPORT_SYMBOL(_ctype); -- cgit v1.2.3 From f653398c86a1c104f0992bd788dd4bb065449be4 Mon Sep 17 00:00:00 2001 From: André Goddard Rosa Date: Mon, 14 Dec 2009 18:01:04 -0800 Subject: string: factorize skip_spaces and export it to be generally available MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On the following sentence: while (*s && isspace(*s)) s++; If *s == 0, isspace() evaluates to ((_ctype[*s] & 0x20) != 0), which evaluates to ((0x08 & 0x20) != 0) which equals to 0 as well. If *s == 1, we depend on isspace() result anyway. In other words, "a char equals zero is never a space", so remove this check. Also, *s != 0 is most common case (non-null string). Fixed const return as noticed by Jan Engelhardt and James Bottomley. Fixed unnecessary extra cast on strstrip() as noticed by Jan Engelhardt. Signed-off-by: André Goddard Rosa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ctype.h | 1 + include/linux/string.h | 1 + lib/string.c | 19 +++++++++++++++---- 3 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/ctype.h b/include/linux/ctype.h index 6dec944a370b..a3d6ee0044f9 100644 --- a/include/linux/ctype.h +++ b/include/linux/ctype.h @@ -27,6 +27,7 @@ extern const unsigned char _ctype[]; #define islower(c) ((__ismask(c)&(_L)) != 0) #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) #define ispunct(c) ((__ismask(c)&(_P)) != 0) +/* Note: isspace() must return false for %NUL-terminator */ #define isspace(c) ((__ismask(c)&(_S)) != 0) #define isupper(c) ((__ismask(c)&(_U)) != 0) #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) diff --git a/include/linux/string.h b/include/linux/string.h index b8508868d5ad..168dad11ae03 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -62,6 +62,7 @@ extern char * strnchr(const char *, size_t, int); #ifndef __HAVE_ARCH_STRRCHR extern char * strrchr(const char *,int); #endif +extern char * __must_check skip_spaces(const char *); extern char * __must_check strstrip(char *); #ifndef __HAVE_ARCH_STRSTR extern char * strstr(const char *,const char *); diff --git a/lib/string.c b/lib/string.c index e96421ab9a9a..3a912a4e9a63 100644 --- a/lib/string.c +++ b/lib/string.c @@ -337,6 +337,20 @@ char *strnchr(const char *s, size_t count, int c) EXPORT_SYMBOL(strnchr); #endif +/** + * skip_spaces - Removes leading whitespace from @s. + * @s: The string to be stripped. + * + * Returns a pointer to the first non-whitespace character in @s. + */ +char *skip_spaces(const char *str) +{ + while (isspace(*str)) + ++str; + return (char *)str; +} +EXPORT_SYMBOL(skip_spaces); + /** * strstrip - Removes leading and trailing whitespace from @s. * @s: The string to be stripped. @@ -360,10 +374,7 @@ char *strstrip(char *s) end--; *(end + 1) = '\0'; - while (*s && isspace(*s)) - s++; - - return s; + return skip_spaces(s); } EXPORT_SYMBOL(strstrip); -- cgit v1.2.3 From 925ede0bf4ecef96fc2d939b16619530111aa16e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 14 Dec 2009 18:01:14 -0800 Subject: efi.h: use %pUl to print UUIDs Shrinks vmlinux without: $ size vmlinux text data bss dec hex filename 6975863 679652 1359668 9015183 898f8f vmlinux with: $ size vmlinux text data bss dec hex filename 6975639 679652 1359668 9014959 898eaf vmlinux Signed-off-by: Joe Perches Cc: Jeff Garzik Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/efi.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/efi.h b/include/linux/efi.h index ce4581fbc08b..fb737bc19a8c 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right) static inline char * efi_guid_unparse(efi_guid_t *guid, char *out) { - sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", - guid->b[3], guid->b[2], guid->b[1], guid->b[0], - guid->b[5], guid->b[4], guid->b[7], guid->b[6], - guid->b[8], guid->b[9], guid->b[10], guid->b[11], - guid->b[12], guid->b[13], guid->b[14], guid->b[15]); + sprintf(out, "%pUl", guid->b); return out; } -- cgit v1.2.3 From ca54cb8c9eb38095dc420b73c6380ce1dbeb10fa Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 14 Dec 2009 18:01:15 -0800 Subject: Subject: Re: [PATCH] strstrip incorrectly marked __must_check Recently, We marked strstrip() as must_check. because it was frequently misused and it should be checked. However, we found one exception. scsi/ipr.c intentionally ignore return value of strstrip. Because it wishes to keep the whitespace at the beginning. Thus we need to keep with and without checked whitespace trim function. This patch adds a new strim() and changes ipr.c to use it. [akpm@linux-foundation.org: coding-style fixes] Suggested-by: Alan Cox Signed-off-by: KOSAKI Motohiro Cc: James Bottomley Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/scsi/ipr.c | 4 ++-- include/linux/string.h | 9 ++++++++- lib/string.c | 6 +++--- 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 206c2fa8c1ba..8643f5089361 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1333,7 +1333,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, error = &hostrcb->hcam.u.error.u.type_17_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; - strstrip(error->failure_reason); + strim(error->failure_reason); ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, be32_to_cpu(hostrcb->hcam.u.error.prc)); @@ -1359,7 +1359,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, error = &hostrcb->hcam.u.error.u.type_07_error; error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; - strstrip(error->failure_reason); + strim(error->failure_reason); ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, be32_to_cpu(hostrcb->hcam.u.error.prc)); diff --git a/include/linux/string.h b/include/linux/string.h index 168dad11ae03..651839a2a755 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -63,7 +63,14 @@ extern char * strnchr(const char *, size_t, int); extern char * strrchr(const char *,int); #endif extern char * __must_check skip_spaces(const char *); -extern char * __must_check strstrip(char *); + +extern char *strim(char *); + +static inline __must_check char *strstrip(char *str) +{ + return strim(str); +} + #ifndef __HAVE_ARCH_STRSTR extern char * strstr(const char *,const char *); #endif diff --git a/lib/string.c b/lib/string.c index 765566a6a088..afce96af3afd 100644 --- a/lib/string.c +++ b/lib/string.c @@ -352,14 +352,14 @@ char *skip_spaces(const char *str) EXPORT_SYMBOL(skip_spaces); /** - * strstrip - Removes leading and trailing whitespace from @s. + * strim - Removes leading and trailing whitespace from @s. * @s: The string to be stripped. * * Note that the first trailing whitespace is replaced with a %NUL-terminator * in the given string @s. Returns a pointer to the first non-whitespace * character in @s. */ -char *strstrip(char *s) +char *strim(char *s) { size_t size; char *end; @@ -376,7 +376,7 @@ char *strstrip(char *s) return s; } -EXPORT_SYMBOL(strstrip); +EXPORT_SYMBOL(strim); #ifndef __HAVE_ARCH_STRLEN /** -- cgit v1.2.3 From 2635d1ba711560d521f6218c585a3e0401f566e1 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Mon, 14 Dec 2009 18:01:30 -0800 Subject: atmel-mci: change use of dma slave interface Allow the use of another DMA controller driver in atmel-mci sd/mmc driver. This adds a generic dma_slave pointer to the mci platform structure where we can store DMA controller information. In atmel-mci we use information provided by this structure to initialize the driver (with new helper functions that are architecture dependant). This also adds at32/avr32 chip modifications to cope with this new access method. Signed-off-by: Nicolas Ferre Cc: Haavard Skinnemoen Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mach-at91/include/mach/atmel-mci.h | 24 +++++++++++ arch/avr32/mach-at32ap/at32ap700x.c | 18 +++++--- arch/avr32/mach-at32ap/include/mach/atmel-mci.h | 24 +++++++++++ drivers/mmc/host/atmel-mci.c | 56 ++++++++++++++++--------- include/linux/atmel-mci.h | 4 +- 5 files changed, 98 insertions(+), 28 deletions(-) create mode 100644 arch/arm/mach-at91/include/mach/atmel-mci.h create mode 100644 arch/avr32/mach-at32ap/include/mach/atmel-mci.h (limited to 'include') diff --git a/arch/arm/mach-at91/include/mach/atmel-mci.h b/arch/arm/mach-at91/include/mach/atmel-mci.h new file mode 100644 index 000000000000..998cb0c07135 --- /dev/null +++ b/arch/arm/mach-at91/include/mach/atmel-mci.h @@ -0,0 +1,24 @@ +#ifndef __MACH_ATMEL_MCI_H +#define __MACH_ATMEL_MCI_H + +#include + +/** + * struct mci_dma_data - DMA data for MCI interface + */ +struct mci_dma_data { + struct at_dma_slave sdata; +}; + +/* accessor macros */ +#define slave_data_ptr(s) (&(s)->sdata) +#define find_slave_dev(s) ((s)->sdata.dma_dev) + +#define setup_dma_addr(s, t, r) do { \ + if (s) { \ + (s)->sdata.tx_reg = (t); \ + (s)->sdata.rx_reg = (r); \ + } \ +} while (0) + +#endif /* __MACH_ATMEL_MCI_H */ diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index eb9d4dc2e86d..b40ff39e0ac8 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -15,6 +15,8 @@ #include #include #include + +#include #include #include @@ -1320,7 +1322,7 @@ struct platform_device *__init at32_add_device_mci(unsigned int id, struct mci_platform_data *data) { struct platform_device *pdev; - struct dw_dma_slave *dws = &data->dma_slave; + struct mci_dma_slave *slave; u32 pioa_mask; u32 piob_mask; @@ -1339,13 +1341,17 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) ARRAY_SIZE(atmel_mci0_resource))) goto fail; - dws->dma_dev = &dw_dmac0_device.dev; - dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; - dws->cfg_hi = (DWC_CFGH_SRC_PER(0) + slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); + + slave->sdata.dma_dev = &dw_dmac0_device.dev; + slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; + slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0) | DWC_CFGH_DST_PER(1)); - dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL + slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); + data->dma_slave = slave; + if (platform_device_add_data(pdev, data, sizeof(struct mci_platform_data))) goto fail; @@ -1411,6 +1417,8 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) return pdev; fail: + data->dma_slave = NULL; + kfree(slave); platform_device_put(pdev); return NULL; } diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h new file mode 100644 index 000000000000..a9b38967f703 --- /dev/null +++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h @@ -0,0 +1,24 @@ +#ifndef __MACH_ATMEL_MCI_H +#define __MACH_ATMEL_MCI_H + +#include + +/** + * struct mci_dma_data - DMA data for MCI interface + */ +struct mci_dma_data { + struct dw_dma_slave sdata; +}; + +/* accessor macros */ +#define slave_data_ptr(s) (&(s)->sdata) +#define find_slave_dev(s) ((s)->sdata.dma_dev) + +#define setup_dma_addr(s, t, r) do { \ + if (s) { \ + (s)->sdata.tx_reg = (t); \ + (s)->sdata.rx_reg = (r); \ + } \ +} while (0) + +#endif /* __MACH_ATMEL_MCI_H */ diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index fc25586b7ee1..ba8b219d44c1 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -25,6 +25,8 @@ #include #include + +#include #include #include @@ -1584,14 +1586,43 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, #ifdef CONFIG_MMC_ATMELMCI_DMA static bool filter(struct dma_chan *chan, void *slave) { - struct dw_dma_slave *dws = slave; + struct mci_dma_data *sl = slave; - if (dws->dma_dev == chan->device->dev) { - chan->private = dws; + if (sl && find_slave_dev(sl) == chan->device->dev) { + chan->private = slave_data_ptr(sl); return true; - } else + } else { return false; + } } + +static void atmci_configure_dma(struct atmel_mci *host) +{ + struct mci_platform_data *pdata; + + if (host == NULL) + return; + + pdata = host->pdev->dev.platform_data; + + if (pdata && find_slave_dev(pdata->dma_slave)) { + dma_cap_mask_t mask; + + setup_dma_addr(pdata->dma_slave, + host->mapbase + MCI_TDR, + host->mapbase + MCI_RDR); + + /* Try to grab a DMA channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma.chan = + dma_request_channel(mask, filter, pdata->dma_slave); + } + if (!host->dma.chan) + dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); +} +#else +static void atmci_configure_dma(struct atmel_mci *host) {} #endif static int __init atmci_probe(struct platform_device *pdev) @@ -1645,22 +1676,7 @@ static int __init atmci_probe(struct platform_device *pdev) if (ret) goto err_request_irq; -#ifdef CONFIG_MMC_ATMELMCI_DMA - if (pdata->dma_slave.dma_dev) { - struct dw_dma_slave *dws = &pdata->dma_slave; - dma_cap_mask_t mask; - - dws->tx_reg = regs->start + MCI_TDR; - dws->rx_reg = regs->start + MCI_RDR; - - /* Try to grab a DMA channel */ - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - host->dma.chan = dma_request_channel(mask, filter, dws); - } - if (!host->dma.chan) - dev_notice(&pdev->dev, "DMA not available, using PIO\n"); -#endif /* CONFIG_MMC_ATMELMCI_DMA */ + atmci_configure_dma(host); platform_set_drvdata(pdev, host); diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 57b1846a3c87..3e09b345f4d6 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -3,8 +3,6 @@ #define ATMEL_MCI_MAX_NR_SLOTS 2 -#include - /** * struct mci_slot_pdata - board-specific per-slot configuration * @bus_width: Number of data lines wired up the slot @@ -34,7 +32,7 @@ struct mci_slot_pdata { * @slot: Per-slot configuration data. */ struct mci_platform_data { - struct dw_dma_slave dma_slave; + struct mci_dma_data *dma_slave; struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; }; -- cgit v1.2.3 From e40d6eaa79bc9d9d347c3c51fe0c9204e9025b79 Mon Sep 17 00:00:00 2001 From: Samu Onkalo Date: Mon, 14 Dec 2009 18:01:35 -0800 Subject: lis3lv02d: axis remap and resource setup/release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the possibility to remap axes via platform data. Function pointers for resource setup and release purposes Signed-off-by: Samu Onkalo Acked-by: Éric Piel Cc: Pavel Machek Cc: Jean Delvare Cc: "Trisal, Kalhan" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/lis3lv02d.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index 3cc2f2c53e4c..89701355c74f 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -43,6 +43,18 @@ struct lis3lv02d_platform_data { #define LIS3_WAKEUP_Z_HI (1 << 5) unsigned char wakeup_flags; unsigned char wakeup_thresh; +#define LIS3_NO_MAP 0 +#define LIS3_DEV_X 1 +#define LIS3_DEV_Y 2 +#define LIS3_DEV_Z 3 +#define LIS3_INV_DEV_X -1 +#define LIS3_INV_DEV_Y -2 +#define LIS3_INV_DEV_Z -3 + s8 axis_x; + s8 axis_y; + s8 axis_z; + int (*setup_resources)(void); + int (*release_resources)(void); }; #endif /* __LIS3LV02D_H_ */ -- cgit v1.2.3 From 2db4a76d5f3554e9e5632c8f91828313318579c8 Mon Sep 17 00:00:00 2001 From: Samu Onkalo Date: Mon, 14 Dec 2009 18:01:43 -0800 Subject: lis3: selftest support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement selftest feature as specified by chip manufacturer. Control: read selftest sysfs entry Response: "OK x y z" or "FAIL x y z" where x, y, and z are difference between selftest mode and normal mode. Test is passed when values are within acceptance limit values. Acceptance limits are provided via platform data. See chip spesifications for acceptance limits. If limits are not properly set, OK / FAIL decision is meaningless. However, userspace application can still make decision based on the numeric x, y, z values. Selftest is meant for HW diagnostic purposes. It is not meant to be called during normal use of the chip. It may cause false interrupt events. Selftest mode delays polling of the normal results but it doesn't cause wrong values. Chip must be in static state during selftest. Any acceration during the test causes most probably failure. Signed-off-by: Samu Onkalo Acked-by: Éric Piel Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/hwmon/lis3lv02d.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++- drivers/hwmon/lis3lv02d.h | 14 ++++++++-- include/linux/lis3lv02d.h | 3 +++ 3 files changed, 81 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index 39b9ac8e18ed..55ec883e2026 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c @@ -106,9 +106,11 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z) { int position[3]; + mutex_lock(&lis3->mutex); position[0] = lis3->read_data(lis3, OUTX); position[1] = lis3->read_data(lis3, OUTY); position[2] = lis3->read_data(lis3, OUTZ); + mutex_unlock(&lis3->mutex); *x = lis3lv02d_get_axis(lis3->ac.x, position); *y = lis3lv02d_get_axis(lis3->ac.y, position); @@ -133,6 +135,55 @@ static int lis3lv02d_get_odr(void) return val; } +static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3]) +{ + u8 reg; + s16 x, y, z; + u8 selftest; + int ret; + + mutex_lock(&lis3->mutex); + if (lis3_dev.whoami == WAI_12B) + selftest = CTRL1_ST; + else + selftest = CTRL1_STP; + + lis3->read(lis3, CTRL_REG1, ®); + lis3->write(lis3, CTRL_REG1, (reg | selftest)); + msleep(lis3->pwron_delay / lis3lv02d_get_odr()); + + /* Read directly to avoid axis remap */ + x = lis3->read_data(lis3, OUTX); + y = lis3->read_data(lis3, OUTY); + z = lis3->read_data(lis3, OUTZ); + + /* back to normal settings */ + lis3->write(lis3, CTRL_REG1, reg); + msleep(lis3->pwron_delay / lis3lv02d_get_odr()); + + results[0] = x - lis3->read_data(lis3, OUTX); + results[1] = y - lis3->read_data(lis3, OUTY); + results[2] = z - lis3->read_data(lis3, OUTZ); + + ret = 0; + if (lis3->pdata) { + int i; + for (i = 0; i < 3; i++) { + /* Check against selftest acceptance limits */ + if ((results[i] < lis3->pdata->st_min_limits[i]) || + (results[i] > lis3->pdata->st_max_limits[i])) { + ret = -EIO; + goto fail; + } + } + } + + /* test passed */ +fail: + mutex_unlock(&lis3->mutex); + return ret; +} + void lis3lv02d_poweroff(struct lis3lv02d *lis3) { /* disable X,Y,Z axis and power down */ @@ -365,6 +416,17 @@ void lis3lv02d_joystick_disable(void) EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable); /* Sysfs stuff */ +static ssize_t lis3lv02d_selftest_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int result; + s16 values[3]; + + result = lis3lv02d_selftest(&lis3_dev, values); + return sprintf(buf, "%s %d %d %d\n", result == 0 ? "OK" : "FAIL", + values[0], values[1], values[2]); +} + static ssize_t lis3lv02d_position_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -394,12 +456,14 @@ static ssize_t lis3lv02d_rate_show(struct device *dev, return sprintf(buf, "%d\n", lis3lv02d_get_odr()); } +static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL); static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL); static DEVICE_ATTR(calibrate, S_IRUGO|S_IWUSR, lis3lv02d_calibrate_show, lis3lv02d_calibrate_store); static DEVICE_ATTR(rate, S_IRUGO, lis3lv02d_rate_show, NULL); static struct attribute *lis3lv02d_attributes[] = { + &dev_attr_selftest.attr, &dev_attr_position.attr, &dev_attr_calibrate.attr, &dev_attr_rate.attr, @@ -455,6 +519,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) return -EINVAL; } + mutex_init(&dev->mutex); + lis3lv02d_add_fs(dev); lis3lv02d_poweron(dev); @@ -507,4 +573,3 @@ EXPORT_SYMBOL_GPL(lis3lv02d_init_device); MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver"); MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek"); MODULE_LICENSE("GPL"); - diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h index c57f21f45676..166794cb91a3 100644 --- a/drivers/hwmon/lis3lv02d.h +++ b/drivers/hwmon/lis3lv02d.h @@ -98,7 +98,7 @@ enum lis3_who_am_i { WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */ }; -enum lis3lv02d_ctrl1 { +enum lis3lv02d_ctrl1_12b { CTRL1_Xen = 0x01, CTRL1_Yen = 0x02, CTRL1_Zen = 0x04, @@ -107,8 +107,17 @@ enum lis3lv02d_ctrl1 { CTRL1_DF1 = 0x20, CTRL1_PD0 = 0x40, CTRL1_PD1 = 0x80, - CTRL1_DR = 0x80, /* Data rate on 8 bits */ }; + +/* Delta to ctrl1_12b version */ +enum lis3lv02d_ctrl1_8b { + CTRL1_STM = 0x08, + CTRL1_STP = 0x10, + CTRL1_FS = 0x20, + CTRL1_PD = 0x40, + CTRL1_DR = 0x80, +}; + enum lis3lv02d_ctrl2 { CTRL2_DAS = 0x01, CTRL2_SIM = 0x02, @@ -218,6 +227,7 @@ struct lis3lv02d { unsigned long misc_opened; /* bit0: whether the device is open */ struct lis3lv02d_platform_data *pdata; /* for passing board config */ + struct mutex mutex; /* Serialize poll and selftest */ }; int lis3lv02d_init_device(struct lis3lv02d *lis3); diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index 89701355c74f..f1ca0dcc1628 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h @@ -55,6 +55,9 @@ struct lis3lv02d_platform_data { s8 axis_z; int (*setup_resources)(void); int (*release_resources)(void); + /* Limits for selftest are specified in chip data sheet */ + s16 st_min_limits[3]; /* min pass limit x, y, z */ + s16 st_max_limits[3]; /* max pass limit x, y, z */ }; #endif /* __LIS3LV02D_H_ */ -- cgit v1.2.3 From 48f186124220794fce85ed1439fc32f16f69d3e2 Mon Sep 17 00:00:00 2001 From: Alexandros Batsakis Date: Mon, 14 Dec 2009 21:27:53 -0800 Subject: rpc: add rpc_queue_empty function Signed-off-by: Alexandros Batsakis Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 1 + net/sunrpc/sched.c | 14 ++++++++++++++ 2 files changed, 15 insertions(+) (limited to 'include') diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1906782ec86b..9157405f9320 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -229,6 +229,7 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *, void rpc_wake_up(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); void rpc_wake_up_status(struct rpc_wait_queue *, int); +int rpc_queue_empty(struct rpc_wait_queue *); void rpc_delay(struct rpc_task *, unsigned long); void * rpc_malloc(struct rpc_task *, size_t); void rpc_free(void *); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cef74ba0666c..89ea8e69ec78 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -384,6 +384,20 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r __rpc_do_wake_up_task(queue, task); } +/* + * Tests whether rpc queue is empty + */ +int rpc_queue_empty(struct rpc_wait_queue *queue) +{ + int res; + + spin_lock_bh(&queue->lock); + res = queue->qlen; + spin_unlock_bh(&queue->lock); + return (res == 0); +} +EXPORT_SYMBOL_GPL(rpc_queue_empty); + /* * Wake up a task on a specific queue */ -- cgit v1.2.3 From cf3b01b54880debb01ea7d471123da5887a7c2cb Mon Sep 17 00:00:00 2001 From: Alexandros Batsakis Date: Mon, 14 Dec 2009 21:27:55 -0800 Subject: rpc: add a new priority in RPC task Signed-off-by: Alexandros Batsakis Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 9157405f9320..7bc7fd5291ce 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -173,7 +173,8 @@ struct rpc_task_setup { #define RPC_PRIORITY_LOW (-1) #define RPC_PRIORITY_NORMAL (0) #define RPC_PRIORITY_HIGH (1) -#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW) +#define RPC_PRIORITY_PRIVILEGED (2) +#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) struct rpc_timer { struct timer_list timer; @@ -255,6 +256,16 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) return __rpc_wait_for_completion_task(task, NULL); } +static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio) +{ + task->tk_priority = prio - RPC_PRIORITY_LOW; +} + +static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio) +{ + return (task->tk_priority + RPC_PRIORITY_LOW == prio); +} + #ifdef RPC_DEBUG static inline const char * rpc_qname(struct rpc_wait_queue *q) { -- cgit v1.2.3 From eb4c86c6a5adec423c9e615d4937fdddd06a16c5 Mon Sep 17 00:00:00 2001 From: Steve Dickson Date: Wed, 9 Sep 2009 14:58:22 -0400 Subject: nfsd: introduce export flag for v4 pseudoroot NFSv4 differs from v2 and v3 in that it presents a single unified filesystem tree, whereas v2 and v3 exported multiple filesystem (whose roots could be found using a separate mount protocol). Our original NFSv4 server implementation asked the administrator to designate a single filesystem as the NFSv4 root, then to mount filesystems they wished to export underneath. (Often using bind mounts of already-existing filesystems.) This was conceptually simple, and allowed easy implementation, but created a serious obstacle to upgrading between v2/v3: since the paths to v4 filesystems were different, administrators would have to adjust all the paths in client-side mount commands when switching to v4. Various workarounds are possible. For example, the administrator could export "/" and designate it as the v4 root. However, the security risks of that approach are obvious, and in any case we shouldn't be requiring the administrator to take extra steps to fix this problem; instead, the server should present consistent paths across different versions by default. These patches take a modified version of that approach: we provide a new export option which exports only a subset of a filesystem. With this flag, it becomes safe for mountd to export "/" by default, with no need for additional configuration. We begin just by defining the new flag. Signed-off-by: Steve Dickson Signed-off-by: J. Bruce Fields --- fs/nfsd/export.c | 1 + include/linux/nfsd/export.h | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index cb3dae2fcd86..c64d55f319bd 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1425,6 +1425,7 @@ static struct flags { { NFSEXP_CROSSMOUNT, {"crossmnt", ""}}, { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}}, { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, + { NFSEXP_V4ROOT, {"v4root", ""}}, #ifdef MSNFS { NFSEXP_MSNFS, {"msnfs", ""}}, #endif diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 4cafbe1255f0..41f0d4e25374 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -39,6 +39,16 @@ #define NFSEXP_FSID 0x2000 #define NFSEXP_CROSSMOUNT 0x4000 #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ +/* + * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4 + * clients, and only to the single directory that is the root of the + * export; further lookup and readdir operations are treated as if every + * subdirectory was a mountpoint, and ignored if they are not themselves + * exported. This is used by nfsd and mountd to construct the NFSv4 + * pseudofilesystem, which provides access only to paths leading to each + * exported filesystem. + */ +#define NFSEXP_V4ROOT 0x10000 /* All flags that we claim to support. (Note we don't support NOACL.) */ #define NFSEXP_ALLFLAGS 0x7E3F -- cgit v1.2.3 From f13c12c634e124d5d31f912b969d542a016d6105 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 15 Dec 2009 19:43:11 +0100 Subject: perf_events: Fix perf_event_attr layout The miss-alignment of bp_addr created a 32bit hole, causing different structure packings on 32 and 64 bit machines. Fix that by moving __reserve_2 into that hole. Further, remove the useless struct and redundant __bp_reserve muck. Signed-off-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <1260902591.8023.781.camel@laptop> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 12 +++--------- kernel/perf_event.c | 2 +- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 64a53f74c9a9..5fcbf7d2712a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -211,17 +211,11 @@ struct perf_event_attr { __u32 wakeup_watermark; /* bytes before wakeup */ }; - struct { /* Hardware breakpoint info */ - __u64 bp_addr; - __u32 bp_type; - __u32 bp_len; - __u64 __bp_reserved_1; - __u64 __bp_reserved_2; - }; - __u32 __reserved_2; - __u64 __reserved_3; + __u64 bp_addr; + __u32 bp_type; + __u32 bp_len; }; /* diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8823b0885183..0dd8e5d02c66 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4564,7 +4564,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, if (attr->type >= PERF_TYPE_MAX) return -EINVAL; - if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) + if (attr->__reserved_1 || attr->__reserved_2) return -EINVAL; if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) -- cgit v1.2.3 From f2511774863487e61b56a97da07ebf8dd61d7836 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 13 Dec 2009 20:29:01 +0100 Subject: PM: Add initcall_debug style timing for suspend/resume In order to diagnose overall suspend/resume times, we need basic instrumentation to break down the total time into per device timing, similar to initcall_debug. This patch adds the basic timing instrumentation, needed for a scritps/bootgraph.pl equivalent or humans. The bootgraph.pl program is still a work in progress, but is far enough along to know that this patch is sufficient. Signed-off-by: Arjan van de Ven Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 31 +++++++++++++++++++++++++++++++ include/linux/init.h | 2 ++ 2 files changed, 33 insertions(+) (limited to 'include') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 8aa2443182d5..30f0ceebd36c 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "../base.h" #include "power.h" @@ -172,6 +173,13 @@ static int pm_op(struct device *dev, pm_message_t state) { int error = 0; + ktime_t calltime, delta, rettime; + + if (initcall_debug) { + pr_info("calling %s+ @ %i\n", + dev_name(dev), task_pid_nr(current)); + calltime = ktime_get(); + } switch (state.event) { #ifdef CONFIG_SUSPEND @@ -219,6 +227,14 @@ static int pm_op(struct device *dev, default: error = -EINVAL; } + + if (initcall_debug) { + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), + error, (unsigned long long)ktime_to_ns(delta) >> 10); + } + return error; } @@ -236,6 +252,13 @@ static int pm_noirq_op(struct device *dev, pm_message_t state) { int error = 0; + ktime_t calltime, delta, rettime; + + if (initcall_debug) { + pr_info("calling %s_i+ @ %i\n", + dev_name(dev), task_pid_nr(current)); + calltime = ktime_get(); + } switch (state.event) { #ifdef CONFIG_SUSPEND @@ -283,6 +306,14 @@ static int pm_noirq_op(struct device *dev, default: error = -EINVAL; } + + if (initcall_debug) { + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev), + error, (unsigned long long)ktime_to_ns(delta) >> 10); + } + return error; } diff --git a/include/linux/init.h b/include/linux/init.h index ff8bde520d03..ab1d31f9352b 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -149,6 +149,8 @@ void prepare_namespace(void); extern void (*late_time_init)(void); +extern int initcall_debug; + #endif #ifndef MODULE -- cgit v1.2.3 From 3d8986c7585457c45fd349b2c542c7c1ecd20843 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 15 Dec 2009 14:09:03 -0500 Subject: nfsd: enable V4ROOT exports With the v4root option now enforced everywhere it should be, it is safe to advertise support for it to mountd. Signed-off-by: J. Bruce Fields --- include/linux/nfsd/export.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 41f0d4e25374..8ae78a61eea4 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -50,7 +50,7 @@ */ #define NFSEXP_V4ROOT 0x10000 /* All flags that we claim to support. (Note we don't support NOACL.) */ -#define NFSEXP_ALLFLAGS 0x7E3F +#define NFSEXP_ALLFLAGS 0x17E3F /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ -- cgit v1.2.3 From c7af6b0895229bd080b86afc91302b66f6df0378 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 4 Dec 2009 18:29:33 -0500 Subject: nfsd: remove unused field rq_reffh This field is never referenced anywhere else. I don't know what it was intended for. Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index d1567d627557..5a3085b9b394 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -273,10 +273,6 @@ struct svc_rqst { struct auth_domain * rq_client; /* RPC peer info */ struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ struct svc_cacherep * rq_cacherep; /* cache info */ - struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to - * determine what device number - * to report (real or virtual) - */ int rq_splice_ok; /* turned off in gss privacy * to prevent encrypting page * cache pages */ -- cgit v1.2.3 From 1557aca7904ed6fadd22cdc3364754070bb3d3c3 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 4 Dec 2009 19:36:06 -0500 Subject: nfsd: move most of nfsfh.h to fs/nfsd Most of this can be trivially moved to a private header as well. Signed-off-by: J. Bruce Fields --- fs/nfsd/export.c | 1 + fs/nfsd/nfsfh.h | 208 +++++++++++++++++++++++++++++++++++++++++++++ fs/nfsd/state.h | 1 + fs/nfsd/vfs.h | 2 + fs/nfsd/xdr.h | 1 + include/linux/nfsd/nfsfh.h | 199 ------------------------------------------- 6 files changed, 213 insertions(+), 199 deletions(-) create mode 100644 fs/nfsd/nfsfh.h (limited to 'include') diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 7d5ba1b0ffcf..b26a3644fbb9 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -22,6 +22,7 @@ #include #include "nfsd.h" +#include "nfsfh.h" #define NFSDDBG_FACILITY NFSDDBG_EXPORT diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h new file mode 100644 index 000000000000..cdfb8c6a4206 --- /dev/null +++ b/fs/nfsd/nfsfh.h @@ -0,0 +1,208 @@ +/* Copyright (C) 1995, 1996, 1997 Olaf Kirch */ + +#ifndef _LINUX_NFSD_FH_INT_H +#define _LINUX_NFSD_FH_INT_H + +#include + +enum nfsd_fsid { + FSID_DEV = 0, + FSID_NUM, + FSID_MAJOR_MINOR, + FSID_ENCODE_DEV, + FSID_UUID4_INUM, + FSID_UUID8, + FSID_UUID16, + FSID_UUID16_INUM, +}; + +enum fsid_source { + FSIDSOURCE_DEV, + FSIDSOURCE_FSID, + FSIDSOURCE_UUID, +}; +extern enum fsid_source fsid_source(struct svc_fh *fhp); + + +/* This might look a little large to "inline" but in all calls except + * one, 'vers' is constant so moste of the function disappears. + */ +static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino, + u32 fsid, unsigned char *uuid) +{ + u32 *up; + switch(vers) { + case FSID_DEV: + fsidv[0] = htonl((MAJOR(dev)<<16) | + MINOR(dev)); + fsidv[1] = ino_t_to_u32(ino); + break; + case FSID_NUM: + fsidv[0] = fsid; + break; + case FSID_MAJOR_MINOR: + fsidv[0] = htonl(MAJOR(dev)); + fsidv[1] = htonl(MINOR(dev)); + fsidv[2] = ino_t_to_u32(ino); + break; + + case FSID_ENCODE_DEV: + fsidv[0] = new_encode_dev(dev); + fsidv[1] = ino_t_to_u32(ino); + break; + + case FSID_UUID4_INUM: + /* 4 byte fsid and inode number */ + up = (u32*)uuid; + fsidv[0] = ino_t_to_u32(ino); + fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3]; + break; + + case FSID_UUID8: + /* 8 byte fsid */ + up = (u32*)uuid; + fsidv[0] = up[0] ^ up[2]; + fsidv[1] = up[1] ^ up[3]; + break; + + case FSID_UUID16: + /* 16 byte fsid - NFSv3+ only */ + memcpy(fsidv, uuid, 16); + break; + + case FSID_UUID16_INUM: + /* 8 byte inode and 16 byte fsid */ + *(u64*)fsidv = (u64)ino; + memcpy(fsidv+2, uuid, 16); + break; + default: BUG(); + } +} + +static inline int key_len(int type) +{ + switch(type) { + case FSID_DEV: return 8; + case FSID_NUM: return 4; + case FSID_MAJOR_MINOR: return 12; + case FSID_ENCODE_DEV: return 8; + case FSID_UUID4_INUM: return 8; + case FSID_UUID8: return 8; + case FSID_UUID16: return 16; + case FSID_UUID16_INUM: return 24; + default: return 0; + } +} + +/* + * Shorthand for dprintk()'s + */ +extern char * SVCFH_fmt(struct svc_fh *fhp); + +/* + * Function prototypes + */ +__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int); +__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *); +__be32 fh_update(struct svc_fh *); +void fh_put(struct svc_fh *); + +static __inline__ struct svc_fh * +fh_copy(struct svc_fh *dst, struct svc_fh *src) +{ + WARN_ON(src->fh_dentry || src->fh_locked); + + *dst = *src; + return dst; +} + +static inline void +fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src) +{ + dst->fh_size = src->fh_size; + memcpy(&dst->fh_base, &src->fh_base, src->fh_size); +} + +static __inline__ struct svc_fh * +fh_init(struct svc_fh *fhp, int maxsize) +{ + memset(fhp, 0, sizeof(*fhp)); + fhp->fh_maxsize = maxsize; + return fhp; +} + +#ifdef CONFIG_NFSD_V3 +/* + * Fill in the pre_op attr for the wcc data + */ +static inline void +fill_pre_wcc(struct svc_fh *fhp) +{ + struct inode *inode; + + inode = fhp->fh_dentry->d_inode; + if (!fhp->fh_pre_saved) { + fhp->fh_pre_mtime = inode->i_mtime; + fhp->fh_pre_ctime = inode->i_ctime; + fhp->fh_pre_size = inode->i_size; + fhp->fh_pre_change = inode->i_version; + fhp->fh_pre_saved = 1; + } +} + +extern void fill_post_wcc(struct svc_fh *); +#else +#define fill_pre_wcc(ignored) +#define fill_post_wcc(notused) +#endif /* CONFIG_NFSD_V3 */ + + +/* + * Lock a file handle/inode + * NOTE: both fh_lock and fh_unlock are done "by hand" in + * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once + * so, any changes here should be reflected there. + */ + +static inline void +fh_lock_nested(struct svc_fh *fhp, unsigned int subclass) +{ + struct dentry *dentry = fhp->fh_dentry; + struct inode *inode; + + BUG_ON(!dentry); + + if (fhp->fh_locked) { + printk(KERN_WARNING "fh_lock: %s/%s already locked!\n", + dentry->d_parent->d_name.name, dentry->d_name.name); + return; + } + + inode = dentry->d_inode; + mutex_lock_nested(&inode->i_mutex, subclass); + fill_pre_wcc(fhp); + fhp->fh_locked = 1; +} + +static inline void +fh_lock(struct svc_fh *fhp) +{ + fh_lock_nested(fhp, I_MUTEX_NORMAL); +} + +/* + * Unlock a file handle/inode + */ +static inline void +fh_unlock(struct svc_fh *fhp) +{ + BUG_ON(!fhp->fh_dentry); + + if (fhp->fh_locked) { + fill_post_wcc(fhp); + mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex); + fhp->fh_locked = 0; + } +} + +#endif /* _LINUX_NFSD_FH_INT_H */ diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 2af75686e0d3..775b8d281d6a 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -38,6 +38,7 @@ #define _NFSD4_STATE_H #include +#include "nfsfh.h" typedef struct { u32 cl_boot; diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index f4fa6d351bbd..4b1de0a9ea75 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -5,6 +5,8 @@ #ifndef LINUX_NFSD_VFS_H #define LINUX_NFSD_VFS_H +#include "nfsfh.h" + /* * Flags for nfsd_permission */ diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h index 235ee5c3be54..87fe6f64b8f7 100644 --- a/fs/nfsd/xdr.h +++ b/fs/nfsd/xdr.h @@ -9,6 +9,7 @@ #include #include "nfsd.h" +#include "nfsfh.h" struct nfsd_fhandle { struct svc_fh fh; diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index 49523edbc510..65e333afaee4 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -162,205 +162,6 @@ typedef struct svc_fh { } svc_fh; -enum nfsd_fsid { - FSID_DEV = 0, - FSID_NUM, - FSID_MAJOR_MINOR, - FSID_ENCODE_DEV, - FSID_UUID4_INUM, - FSID_UUID8, - FSID_UUID16, - FSID_UUID16_INUM, -}; - -enum fsid_source { - FSIDSOURCE_DEV, - FSIDSOURCE_FSID, - FSIDSOURCE_UUID, -}; -extern enum fsid_source fsid_source(struct svc_fh *fhp); - - -/* This might look a little large to "inline" but in all calls except - * one, 'vers' is constant so moste of the function disappears. - */ -static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino, - u32 fsid, unsigned char *uuid) -{ - u32 *up; - switch(vers) { - case FSID_DEV: - fsidv[0] = htonl((MAJOR(dev)<<16) | - MINOR(dev)); - fsidv[1] = ino_t_to_u32(ino); - break; - case FSID_NUM: - fsidv[0] = fsid; - break; - case FSID_MAJOR_MINOR: - fsidv[0] = htonl(MAJOR(dev)); - fsidv[1] = htonl(MINOR(dev)); - fsidv[2] = ino_t_to_u32(ino); - break; - - case FSID_ENCODE_DEV: - fsidv[0] = new_encode_dev(dev); - fsidv[1] = ino_t_to_u32(ino); - break; - - case FSID_UUID4_INUM: - /* 4 byte fsid and inode number */ - up = (u32*)uuid; - fsidv[0] = ino_t_to_u32(ino); - fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3]; - break; - - case FSID_UUID8: - /* 8 byte fsid */ - up = (u32*)uuid; - fsidv[0] = up[0] ^ up[2]; - fsidv[1] = up[1] ^ up[3]; - break; - - case FSID_UUID16: - /* 16 byte fsid - NFSv3+ only */ - memcpy(fsidv, uuid, 16); - break; - - case FSID_UUID16_INUM: - /* 8 byte inode and 16 byte fsid */ - *(u64*)fsidv = (u64)ino; - memcpy(fsidv+2, uuid, 16); - break; - default: BUG(); - } -} - -static inline int key_len(int type) -{ - switch(type) { - case FSID_DEV: return 8; - case FSID_NUM: return 4; - case FSID_MAJOR_MINOR: return 12; - case FSID_ENCODE_DEV: return 8; - case FSID_UUID4_INUM: return 8; - case FSID_UUID8: return 8; - case FSID_UUID16: return 16; - case FSID_UUID16_INUM: return 24; - default: return 0; - } -} - -/* - * Shorthand for dprintk()'s - */ -extern char * SVCFH_fmt(struct svc_fh *fhp); - -/* - * Function prototypes - */ -__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int); -__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *); -__be32 fh_update(struct svc_fh *); -void fh_put(struct svc_fh *); - -static __inline__ struct svc_fh * -fh_copy(struct svc_fh *dst, struct svc_fh *src) -{ - WARN_ON(src->fh_dentry || src->fh_locked); - - *dst = *src; - return dst; -} - -static inline void -fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src) -{ - dst->fh_size = src->fh_size; - memcpy(&dst->fh_base, &src->fh_base, src->fh_size); -} - -static __inline__ struct svc_fh * -fh_init(struct svc_fh *fhp, int maxsize) -{ - memset(fhp, 0, sizeof(*fhp)); - fhp->fh_maxsize = maxsize; - return fhp; -} - -#ifdef CONFIG_NFSD_V3 -/* - * Fill in the pre_op attr for the wcc data - */ -static inline void -fill_pre_wcc(struct svc_fh *fhp) -{ - struct inode *inode; - - inode = fhp->fh_dentry->d_inode; - if (!fhp->fh_pre_saved) { - fhp->fh_pre_mtime = inode->i_mtime; - fhp->fh_pre_ctime = inode->i_ctime; - fhp->fh_pre_size = inode->i_size; - fhp->fh_pre_change = inode->i_version; - fhp->fh_pre_saved = 1; - } -} - -extern void fill_post_wcc(struct svc_fh *); -#else -#define fill_pre_wcc(ignored) -#define fill_post_wcc(notused) -#endif /* CONFIG_NFSD_V3 */ - - -/* - * Lock a file handle/inode - * NOTE: both fh_lock and fh_unlock are done "by hand" in - * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once - * so, any changes here should be reflected there. - */ - -static inline void -fh_lock_nested(struct svc_fh *fhp, unsigned int subclass) -{ - struct dentry *dentry = fhp->fh_dentry; - struct inode *inode; - - BUG_ON(!dentry); - - if (fhp->fh_locked) { - printk(KERN_WARNING "fh_lock: %s/%s already locked!\n", - dentry->d_parent->d_name.name, dentry->d_name.name); - return; - } - - inode = dentry->d_inode; - mutex_lock_nested(&inode->i_mutex, subclass); - fill_pre_wcc(fhp); - fhp->fh_locked = 1; -} - -static inline void -fh_lock(struct svc_fh *fhp) -{ - fh_lock_nested(fhp, I_MUTEX_NORMAL); -} - -/* - * Unlock a file handle/inode - */ -static inline void -fh_unlock(struct svc_fh *fhp) -{ - BUG_ON(!fhp->fh_dentry); - - if (fhp->fh_locked) { - fill_post_wcc(fhp); - mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex); - fhp->fh_locked = 0; - } -} #endif /* __KERNEL__ */ -- cgit v1.2.3 From c1e7c3ae59b065bf7ff24a05cb609b2f9e314db6 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Mon, 14 Dec 2009 21:45:19 +0000 Subject: bzip2/lzma/gzip: pre-boot malloc doesn't return NULL on failure The trivial malloc implementation used in the pre-boot environment by the decompressors returns a bad pointer on failure (falling through after calling error). This is doubly wrong - the callers expect malloc to return NULL on failure, second the error function is intended to be used by the decompressors to propagate errors to *their* callers. The decompressors have no access to any state set by the error function. Signed-off-by: Phillip Lougher LKML-Reference: <4b26b1ef.hIInb2AYPMtImAJO%phillip@lougher.demon.co.uk> Signed-off-by: H. Peter Anvin --- include/linux/decompress/mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h index 12ff8c3f1d05..5032b9a31ae7 100644 --- a/include/linux/decompress/mm.h +++ b/include/linux/decompress/mm.h @@ -25,7 +25,7 @@ static void *malloc(int size) void *p; if (size < 0) - error("Malloc error"); + return NULL; if (!malloc_ptr) malloc_ptr = free_mem_ptr; @@ -35,7 +35,7 @@ static void *malloc(int size) malloc_ptr += size; if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) - error("Out of memory"); + return NULL; malloc_count++; return p; -- cgit v1.2.3 From 3a58176e4fa47d8232e04131b023f3f2ecd7084b Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Fri, 11 Dec 2009 15:23:22 +0800 Subject: ACPICA: Remove messages if predefined repair(s) are successful Repair mechanism was considered too wordy. Now, messages are only unconditionally emitted if the return object cannot be repaired. Existing messages for successful repairs were converted to ACPI_DEBUG_PRINT messages for now. ACPICA BZ 827. http://www.acpica.org/bugzilla/show_bug.cgi?id=827 Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- drivers/acpi/acpica/nspredef.c | 10 +++++----- drivers/acpi/acpica/nsrepair.c | 32 ++++++++++++++++++-------------- drivers/acpi/acpica/nsrepair2.c | 21 +++++++++++++-------- include/acpi/acoutput.h | 8 +++++--- 4 files changed, 41 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index c6297f57fea8..e352467413f5 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -462,11 +462,11 @@ acpi_ns_check_package(struct acpi_predefined_data *data, if (count < expected_count) { goto package_too_small; } else if (count > expected_count) { - ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, - data->node_flags, - "Return Package is larger than needed - " - "found %u, expected %u", count, - expected_count)); + ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, + "%s: Return Package is larger than needed - " + "found %u, expected %u\n", + data->pathname, count, + expected_count)); } /* Validate all elements of the returned package */ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index 062a016d4554..907f60c9c9e5 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -118,6 +118,8 @@ acpi_ns_repair_object(struct acpi_predefined_data *data, union acpi_operand_object *new_object; acpi_status status; + ACPI_FUNCTION_NAME(ns_repair_object); + /* * At this point, we know that the type of the returned object was not * one of the expected types for this predefined name. Attempt to @@ -171,19 +173,18 @@ acpi_ns_repair_object(struct acpi_predefined_data *data, return_object->common.reference_count--; } - ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags, - "Converted %s to expected %s at index %u", - acpi_ut_get_object_type_name - (return_object), - acpi_ut_get_object_type_name(new_object), - package_index)); + ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, + "%s: Converted %s to expected %s at index %u\n", + data->pathname, + acpi_ut_get_object_type_name(return_object), + acpi_ut_get_object_type_name(new_object), + package_index)); } else { - ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags, - "Converted %s to expected %s", - acpi_ut_get_object_type_name - (return_object), - acpi_ut_get_object_type_name - (new_object))); + ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, + "%s: Converted %s to expected %s\n", + data->pathname, + acpi_ut_get_object_type_name(return_object), + acpi_ut_get_object_type_name(new_object))); } /* Delete old object, install the new return object */ @@ -528,6 +529,8 @@ acpi_ns_repair_package_list(struct acpi_predefined_data *data, { union acpi_operand_object *pkg_obj_desc; + ACPI_FUNCTION_NAME(ns_repair_package_list); + /* * Create the new outer package and populate it. The new package will * have a single element, the lone subpackage. @@ -544,8 +547,9 @@ acpi_ns_repair_package_list(struct acpi_predefined_data *data, *obj_desc_ptr = pkg_obj_desc; data->flags |= ACPI_OBJECT_REPAIRED; - ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags, - "Repaired Incorrectly formed Package")); + ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, + "%s: Repaired incorrectly formed Package\n", + data->pathname)); return (AE_OK); } diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 846df7047a81..199333880c8d 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -255,6 +255,8 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data, u32 *dword_buffer; u32 i; + ACPI_FUNCTION_NAME(ns_repair_FDE); + switch (return_object->common.type) { case ACPI_TYPE_BUFFER: @@ -296,8 +298,9 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data, byte_buffer++; } - ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags, - "Expanded Byte Buffer to expected DWord Buffer")); + ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, + "%s Expanded Byte Buffer to expected DWord Buffer\n", + data->pathname)); break; default: @@ -445,6 +448,8 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data, u32 previous_value; acpi_status status; + ACPI_FUNCTION_NAME(ns_check_sorted_list); + /* The top-level object must be a package */ if (return_object->common.type != ACPI_TYPE_PACKAGE) { @@ -460,8 +465,9 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data, */ status = acpi_ns_remove_null_elements(return_object); if (status == AE_NULL_ENTRY) { - ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags, - "NULL elements removed from package")); + ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, + "%s: NULL elements removed from package\n", + data->pathname)); /* Exit if package is now zero length */ @@ -522,10 +528,9 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data, data->flags |= ACPI_OBJECT_REPAIRED; - ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, - data->node_flags, - "Repaired unsorted list - now sorted by %s", - sort_key_name)); + ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, + "%s: Repaired unsorted list - now sorted by %s\n", + data->pathname, sort_key_name)); return (AE_OK); } diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 5c823d5ab783..d814da4b5365 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -85,7 +85,8 @@ #define ACPI_LV_INIT 0x00000001 #define ACPI_LV_DEBUG_OBJECT 0x00000002 #define ACPI_LV_INFO 0x00000004 -#define ACPI_LV_ALL_EXCEPTIONS 0x00000007 +#define ACPI_LV_REPAIR 0x00000008 +#define ACPI_LV_ALL_EXCEPTIONS 0x0000000F /* Trace verbosity level 1 [Standard Trace Level] */ @@ -143,6 +144,7 @@ #define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT) #define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT) #define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO) +#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR) #define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS) /* Trace level -- also used in the global "DebugLevel" */ @@ -174,8 +176,8 @@ /* Defaults for debug_level, debug and normal */ -#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO) -#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT) +#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR) +#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) #define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) #if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES) -- cgit v1.2.3 From 88e5071525ad6814be3a8a2792ce9e81a0cca22a Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Tue, 15 Dec 2009 12:45:15 +0800 Subject: ACPICA: Update version to 20091214 Version 20091214. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/acpixf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 5e1ad3cd1bbd..86e9735a96bd 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -47,7 +47,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20091112 +#define ACPI_CA_VERSION 0x20091214 #include "actypes.h" #include "actbl.h" -- cgit v1.2.3 From 9065ce4500085b9ca66b19d3c4d21a73cb410173 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 17 Nov 2009 17:05:19 -0700 Subject: PNP: add interface to retrieve ACPI device from a PNPACPI device Add pnp_acpi_device(pnp_dev), which takes a PNP device and returns the associated ACPI device (or NULL, if the device is not a PNPACPI device). This allows us to write a PNP driver that can manage both traditional PNPBIOS and ACPI devices, treating ACPI-only functionality as an optional extension. Signed-off-by: Bjorn Helgaas Signed-off-by: Len Brown --- drivers/pnp/pnpacpi/core.c | 3 ++- include/linux/pnp.h | 13 +++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index b2348fc2378e..5314bf630bc4 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -144,7 +144,7 @@ static int pnpacpi_resume(struct pnp_dev *dev) } #endif -static struct pnp_protocol pnpacpi_protocol = { +struct pnp_protocol pnpacpi_protocol = { .name = "Plug and Play ACPI", .get = pnpacpi_get_resources, .set = pnpacpi_set_resources, @@ -154,6 +154,7 @@ static struct pnp_protocol pnpacpi_protocol = { .resume = pnpacpi_resume, #endif }; +EXPORT_SYMBOL(pnpacpi_protocol); static int __init pnpacpi_add_device(struct acpi_device *device) { diff --git a/include/linux/pnp.h b/include/linux/pnp.h index fddfafaed024..7c4193eb0072 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -334,6 +334,19 @@ extern struct pnp_protocol pnpbios_protocol; #define pnp_device_is_pnpbios(dev) 0 #endif +#ifdef CONFIG_PNPACPI +extern struct pnp_protocol pnpacpi_protocol; + +static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev) +{ + if (dev->protocol == &pnpacpi_protocol) + return dev->data; + return NULL; +} +#else +#define pnp_acpi_device(dev) 0 +#endif + /* status */ #define PNP_READY 0x0000 #define PNP_ATTACHED 0x0001 -- cgit v1.2.3 From d30a3fe89635324397c9cf5802f18f11a49ace17 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sat, 5 Dec 2009 01:19:47 -0300 Subject: V4L/DVB (13543): ir-common: Associate a table with a given protocol type While here, convert the protocol types into an enum and define 0 as unknown. Signed-off-by: Mauro Carvalho Chehab --- include/media/ir-common.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/media/ir-common.h b/include/media/ir-common.h index e41a99ee353e..28d1b9c65f2e 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -32,9 +32,13 @@ extern int media_ir_debug; /* media_ir_debug level (0,1,2) */ #define IR_dprintk(level, fmt, arg...) if (media_ir_debug >= level) \ printk(KERN_DEBUG "%s: " fmt , __func__, ## arg) -#define IR_TYPE_RC5 1 -#define IR_TYPE_PD 2 /* Pulse distance encoded IR */ -#define IR_TYPE_OTHER 99 +enum ir_type { + IR_TYPE_UNKNOWN = 0, + IR_TYPE_RC5 = 1, + IR_TYPE_PD = 2, /* Pulse distance encoded IR */ + IR_TYPE_NEC = 3, + IR_TYPE_OTHER = 99, +}; struct ir_scancode { u16 scancode; @@ -44,6 +48,7 @@ struct ir_scancode { struct ir_scancode_table { struct ir_scancode *scan; int size; + enum ir_type ir_type; spinlock_t lock; }; -- cgit v1.2.3 From cda4303f555316930a219cd7c03a1925526145f0 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sat, 5 Dec 2009 09:34:21 -0300 Subject: V4L/DVB (13546): ir-keymaps: Add table for Terratec Cinergy XS FM This IR uses NEC protocol, with address=0x14. This keymap is similar to the existing Terratec Cinergy XS, except that: - it contains the full address/command code; - the Music button were mapped as KEY_RADIO; - some keycodes from the previous entry were wrong, IMHO. Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/ir-keymaps.c | 73 +++++++++++++++++++++++++++++++++++++++ include/media/ir-common.h | 1 + 2 files changed, 74 insertions(+) (limited to 'include') diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c index abb5c73fbd12..9bbe6b1e9871 100644 --- a/drivers/media/common/ir-keymaps.c +++ b/drivers/media/common/ir-keymaps.c @@ -3320,3 +3320,76 @@ struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = { .ir_type = IR_TYPE_RC5, }; EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table); + +/* Terratec Cinergy Hybrid T USB XS FM + Mauro Carvalho Chehab + */ +static struct ir_scancode ir_codes_nec_terratec_cinergy_xs[] = { + { 0x1441, KEY_HOME}, + { 0x1401, KEY_POWER2}, + + { 0x1442, KEY_MENU}, /* DVD menu */ + { 0x1443, KEY_SUBTITLE}, + { 0x1444, KEY_TEXT}, /* Teletext */ + { 0x1445, KEY_DELETE}, + + { 0x1402, KEY_1}, + { 0x1403, KEY_2}, + { 0x1404, KEY_3}, + { 0x1405, KEY_4}, + { 0x1406, KEY_5}, + { 0x1407, KEY_6}, + { 0x1408, KEY_7}, + { 0x1409, KEY_8}, + { 0x140a, KEY_9}, + { 0x140c, KEY_0}, + + { 0x140b, KEY_TUNER}, /* AV */ + { 0x140d, KEY_MODE}, /* A.B */ + + { 0x1446, KEY_TV}, + { 0x1447, KEY_DVD}, + { 0x1449, KEY_VIDEO}, + { 0x144a, KEY_RADIO}, /* Music */ + { 0x144b, KEY_CAMERA}, /* PIC */ + + { 0x1410, KEY_UP}, + { 0x1411, KEY_LEFT}, + { 0x1412, KEY_OK}, + { 0x1413, KEY_RIGHT}, + { 0x1414, KEY_DOWN}, + + { 0x140f, KEY_EPG}, + { 0x1416, KEY_INFO}, + { 0x144d, KEY_BACKSPACE}, + + { 0x141c, KEY_VOLUMEUP}, + { 0x141e, KEY_VOLUMEDOWN}, + + { 0x144c, KEY_PLAY}, + { 0x141d, KEY_MUTE}, + + { 0x141b, KEY_CHANNELUP}, + { 0x141f, KEY_CHANNELDOWN}, + + { 0x1417, KEY_RED}, + { 0x1418, KEY_GREEN}, + { 0x1419, KEY_YELLOW}, + { 0x141a, KEY_BLUE}, + + { 0x1458, KEY_RECORD}, + { 0x1448, KEY_STOP}, + { 0x1440, KEY_PAUSE}, + + { 0x1454, KEY_LAST}, + { 0x144e, KEY_REWIND}, + { 0x144f, KEY_FASTFORWARD}, + { 0x145c, KEY_NEXT}, +}; +struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table = { + .scan = ir_codes_nec_terratec_cinergy_xs, + .size = ARRAY_SIZE(ir_codes_nec_terratec_cinergy_xs), + .ir_type = IR_TYPE_NEC, +}; +EXPORT_SYMBOL_GPL(ir_codes_nec_terratec_cinergy_xs_table); + diff --git a/include/media/ir-common.h b/include/media/ir-common.h index 28d1b9c65f2e..45bf5cf59458 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -200,4 +200,5 @@ extern struct ir_scancode_table ir_codes_evga_indtube_table; extern struct ir_scancode_table ir_codes_terratec_cinergy_xs_table; extern struct ir_scancode_table ir_codes_videomate_s350_table; extern struct ir_scancode_table ir_codes_gadmei_rm008z_table; +extern struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table; #endif -- cgit v1.2.3 From eac8ea536aded07004bde917f05a2329902c64b0 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 27 Nov 2009 13:56:50 -0300 Subject: V4L/DVB (13549): v4l: Add video_device_node_name function Many drivers access the device number (video_device::v4l2_devnode::num) in order to print the video device node name. Add and use a helper function to retrieve the video_device node name. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/v4l2-framework.txt | 16 ++++++++++++++-- drivers/media/video/v4l2-dev.c | 4 ++-- include/media/v4l2-dev.h | 5 +++++ 3 files changed, 21 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt index b806edaf3e75..74d677c8b036 100644 --- a/Documentation/video4linux/v4l2-framework.txt +++ b/Documentation/video4linux/v4l2-framework.txt @@ -561,6 +561,8 @@ video_device helper functions There are a few useful helper functions: +- file/video_device private data + You can set/get driver private data in the video_device struct using: void *video_get_drvdata(struct video_device *vdev); @@ -575,8 +577,7 @@ struct video_device *video_devdata(struct file *file); returns the video_device belonging to the file struct. -The final helper function combines video_get_drvdata with -video_devdata: +The video_drvdata function combines video_get_drvdata with video_devdata: void *video_drvdata(struct file *file); @@ -584,6 +585,17 @@ You can go from a video_device struct to the v4l2_device struct using: struct v4l2_device *v4l2_dev = vdev->v4l2_dev; +- Device node name + +The video_device node kernel name can be retrieved using + +const char *video_device_node_name(struct video_device *vdev); + +The name is used as a hint by userspace tools such as udev. The function +should be used where possible instead of accessing the video_device::num and +video_device::minor fields. + + video buffer helper functions ----------------------------- diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 500cbe9891ac..aa3e0f9aa3bf 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -551,8 +551,8 @@ static int __video_register_device(struct video_device *vdev, int type, int nr, vdev->dev.release = v4l2_device_release; if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) - printk(KERN_WARNING "%s: requested %s%d, got %s%d\n", - __func__, name_base, nr, name_base, vdev->num); + printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__, + name_base, nr, video_device_node_name(vdev)); /* Part 5: Activate this minor. The char device can now be used. */ mutex_lock(&videodev_lock); diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 73c9867d744c..9135f57351f6 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -141,6 +141,11 @@ static inline void *video_drvdata(struct file *file) return video_get_drvdata(video_devdata(file)); } +static inline const char *video_device_node_name(struct video_device *vdev) +{ + return dev_name(&vdev->dev); +} + static inline int video_is_unregistered(struct video_device *vdev) { return test_bit(V4L2_FL_UNREGISTERED, &vdev->flags); -- cgit v1.2.3 From 957b4aa9f786cf04585a690a2e4c3dc867ce80e9 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 27 Nov 2009 13:57:22 -0300 Subject: V4L/DVB (13552): v4l: Replace video_is_unregistered with video_is_registered Replace the video_is_unregistered function by a video_is_registered function. The V4L2_FL_UNREGISTERED flag is replaced by a V4L2_FL_REGISTERED flag. This change makes the video_is_registered function return coherent results when called on an initialize but not yet registered video_device instance. The function can now be used instead of checking video_device::minor. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/hdpvr/hdpvr-video.c | 2 +- drivers/media/video/v4l2-dev.c | 18 +++++++++--------- include/media/v4l2-dev.h | 18 ++++++++---------- 3 files changed, 18 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c index b5439cabb381..fdd782039e9d 100644 --- a/drivers/media/video/hdpvr/hdpvr-video.c +++ b/drivers/media/video/hdpvr/hdpvr-video.c @@ -523,7 +523,7 @@ static unsigned int hdpvr_poll(struct file *filp, poll_table *wait) mutex_lock(&dev->io_mutex); - if (video_is_unregistered(dev->video_dev)) { + if (!video_is_registered(dev->video_dev)) { mutex_unlock(&dev->io_mutex); return -EIO; } diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index aa3e0f9aa3bf..709069916068 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -189,7 +189,7 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf, if (!vdev->fops->read) return -EINVAL; - if (video_is_unregistered(vdev)) + if (!video_is_registered(vdev)) return -EIO; return vdev->fops->read(filp, buf, sz, off); } @@ -201,7 +201,7 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf, if (!vdev->fops->write) return -EINVAL; - if (video_is_unregistered(vdev)) + if (!video_is_registered(vdev)) return -EIO; return vdev->fops->write(filp, buf, sz, off); } @@ -210,7 +210,7 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll) { struct video_device *vdev = video_devdata(filp); - if (!vdev->fops->poll || video_is_unregistered(vdev)) + if (!vdev->fops->poll || !video_is_registered(vdev)) return DEFAULT_POLLMASK; return vdev->fops->poll(filp, poll); } @@ -250,7 +250,7 @@ static unsigned long v4l2_get_unmapped_area(struct file *filp, if (!vdev->fops->get_unmapped_area) return -ENOSYS; - if (video_is_unregistered(vdev)) + if (!video_is_registered(vdev)) return -ENODEV; return vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags); } @@ -260,8 +260,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm) { struct video_device *vdev = video_devdata(filp); - if (!vdev->fops->mmap || - video_is_unregistered(vdev)) + if (!vdev->fops->mmap || !video_is_registered(vdev)) return -ENODEV; return vdev->fops->mmap(filp, vm); } @@ -277,7 +276,7 @@ static int v4l2_open(struct inode *inode, struct file *filp) vdev = video_devdata(filp); /* return ENODEV if the video device has been removed already or if it is not registered anymore. */ - if (vdev == NULL || video_is_unregistered(vdev)) { + if (vdev == NULL || !video_is_registered(vdev)) { mutex_unlock(&videodev_lock); return -ENODEV; } @@ -555,6 +554,7 @@ static int __video_register_device(struct video_device *vdev, int type, int nr, name_base, nr, video_device_node_name(vdev)); /* Part 5: Activate this minor. The char device can now be used. */ + set_bit(V4L2_FL_REGISTERED, &vdev->flags); mutex_lock(&videodev_lock); video_device[vdev->minor] = vdev; mutex_unlock(&videodev_lock); @@ -593,11 +593,11 @@ EXPORT_SYMBOL(video_register_device_no_warn); void video_unregister_device(struct video_device *vdev) { /* Check if vdev was ever registered at all */ - if (!vdev || vdev->minor < 0) + if (!vdev || !video_is_registered(vdev)) return; mutex_lock(&videodev_lock); - set_bit(V4L2_FL_UNREGISTERED, &vdev->flags); + clear_bit(V4L2_FL_REGISTERED, &vdev->flags); mutex_unlock(&videodev_lock); device_unregister(&vdev->dev); } diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 9135f57351f6..2dee93892ea2 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -28,10 +28,10 @@ struct v4l2_ioctl_callbacks; struct video_device; struct v4l2_device; -/* Flag to mark the video_device struct as unregistered. - Drivers can set this flag if they want to block all future - device access. It is set by video_unregister_device. */ -#define V4L2_FL_UNREGISTERED (0) +/* Flag to mark the video_device struct as registered. + Drivers can clear this flag if they want to block all future + device access. It is cleared by video_unregister_device. */ +#define V4L2_FL_REGISTERED (0) struct v4l2_file_operations { struct module *owner; @@ -96,9 +96,7 @@ struct video_device /* Register video devices. Note that if video_register_device fails, the release() callback of the video_device structure is *not* called, so the caller is responsible for freeing any data. Usually that means that - you call video_device_release() on failure. - - Also note that vdev->minor is set to -1 if the registration failed. */ + you call video_device_release() on failure. */ int __must_check video_register_device(struct video_device *vdev, int type, int nr); /* Same as video_register_device, but no warning is issued if the desired @@ -106,7 +104,7 @@ int __must_check video_register_device(struct video_device *vdev, int type, int int __must_check video_register_device_no_warn(struct video_device *vdev, int type, int nr); /* Unregister video devices. Will do nothing if vdev == NULL or - vdev->minor < 0. */ + video_is_registered() returns false. */ void video_unregister_device(struct video_device *vdev); /* helper functions to alloc/release struct video_device, the @@ -146,9 +144,9 @@ static inline const char *video_device_node_name(struct video_device *vdev) return dev_name(&vdev->dev); } -static inline int video_is_unregistered(struct video_device *vdev) +static inline int video_is_registered(struct video_device *vdev) { - return test_bit(V4L2_FL_UNREGISTERED, &vdev->flags); + return test_bit(V4L2_FL_REGISTERED, &vdev->flags); } #endif /* _V4L2_DEV_H */ -- cgit v1.2.3 From 327ae59757f2e56fd3fc2b11acbd0a7c4070f4e8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 27 Nov 2009 13:57:55 -0300 Subject: V4L/DVB (13557): v4l: Remove unneeded video_device::minor usage in drivers The video_device::minor field is used where it shouldn't, either to - test for error conditions that can't happen anymore with the current v4l-dvb core, - store the value in a driver private field that isn't used anymore, - check the video device type where video_device::vfl_type should be used, or - create the name of a kernel thread that should get a stable name. Remove or fix those use cases. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/saa7146_fops.c | 19 ------------------- drivers/media/radio/radio-tea5764.c | 4 ---- drivers/media/video/bt8xx/bttv-driver.c | 15 ++++----------- drivers/media/video/gspca/sn9c20x.c | 5 +++-- drivers/media/video/omap24xxcam.c | 3 +-- include/media/saa7146_vv.h | 4 ---- 6 files changed, 8 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c index 3c11cff00475..7364b9642d00 100644 --- a/drivers/media/common/saa7146_fops.c +++ b/drivers/media/common/saa7146_fops.c @@ -1,7 +1,5 @@ #include -#define BOARD_CAN_DO_VBI(dev) (dev->revision != 0 && dev->vv_data->vbi_minor != -1) - /****************************************************************************/ /* resource management functions, shamelessly stolen from saa7134 driver */ @@ -455,9 +453,6 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) configuration data) */ dev->ext_vv_data = ext_vv; - vv->video_minor = -1; - vv->vbi_minor = -1; - vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle); if( NULL == vv->d_clipping.cpu_addr ) { ERR(("out of memory. aborting.\n")); @@ -496,7 +491,6 @@ EXPORT_SYMBOL_GPL(saa7146_vv_release); int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev, char *name, int type) { - struct saa7146_vv *vv = dev->vv_data; struct video_device *vfd; int err; int i; @@ -524,11 +518,6 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev, return err; } - if (VFL_TYPE_GRABBER == type) - vv->video_minor = vfd->minor; - else - vv->vbi_minor = vfd->minor; - INFO(("%s: registered device %s [v4l2]\n", dev->name, video_device_node_name(vfd))); @@ -539,16 +528,8 @@ EXPORT_SYMBOL_GPL(saa7146_register_device); int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev) { - struct saa7146_vv *vv = dev->vv_data; - DEB_EE(("dev:%p\n",dev)); - if ((*vid)->vfl_type == VFL_TYPE_GRABBER) { - vv->video_minor = -1; - } else { - vv->vbi_minor = -1; - } - video_unregister_device(*vid); *vid = NULL; diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 730ffd9cdd3e..1d357b79b2fd 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c @@ -460,12 +460,8 @@ static int vidioc_s_audio(struct file *file, void *priv, static int tea5764_open(struct file *file) { /* Currently we support only one device */ - int minor = video_devdata(file)->minor; struct tea5764_device *radio = video_drvdata(file); - if (radio->videodev->minor != minor) - return -ENODEV; - mutex_lock(&radio->mutex); /* Only exclusive access */ if (radio->users) { diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index 410260a1c364..3182a406bdd1 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c @@ -3206,7 +3206,6 @@ err: static int bttv_open(struct file *file) { - int minor = video_devdata(file)->minor; struct video_device *vdev = video_devdata(file); struct bttv *btv = video_drvdata(file); struct bttv_fh *fh; @@ -3214,17 +3213,17 @@ static int bttv_open(struct file *file) dprintk(KERN_DEBUG "bttv: open dev=%s\n", video_device_node_name(vdev)); - lock_kernel(); - if (btv->video_dev->minor == minor) { + if (vdev->vfl_type == VFL_TYPE_GRABBER) { type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - } else if (btv->vbi_dev->minor == minor) { + } else if (vdev->vfl_type == VFL_TYPE_VBI) { type = V4L2_BUF_TYPE_VBI_CAPTURE; } else { WARN_ON(1); - unlock_kernel(); return -ENODEV; } + lock_kernel(); + dprintk(KERN_DEBUG "bttv%d: open called (type=%s)\n", btv->c.nr,v4l2_type_names[type]); @@ -3408,7 +3407,6 @@ static struct video_device bttv_video_template = { static int radio_open(struct file *file) { - int minor = video_devdata(file)->minor; struct video_device *vdev = video_devdata(file); struct bttv *btv = video_drvdata(file); struct bttv_fh *fh; @@ -3416,11 +3414,6 @@ static int radio_open(struct file *file) dprintk("bttv: open dev=%s\n", video_device_node_name(vdev)); lock_kernel(); - WARN_ON(btv->radio_dev && btv->radio_dev->minor != minor); - if (!btv->radio_dev || btv->radio_dev->minor != minor) { - unlock_kernel(); - return -ENODEV; - } dprintk("bttv%d: open called (radio)\n",btv->c.nr); diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c index b1944a7cbb0f..a3d8f4675c04 100644 --- a/drivers/media/video/gspca/sn9c20x.c +++ b/drivers/media/video/gspca/sn9c20x.c @@ -1476,8 +1476,9 @@ static int sn9c20x_input_init(struct gspca_dev *gspca_dev) if (input_register_device(sd->input_dev)) return -EINVAL; - sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%d", - gspca_dev->vdev.minor); + sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%s-%s", + gspca_dev->dev->bus->bus_name, + gspca_dev->dev->devpath); if (IS_ERR(sd->input_task)) return -EINVAL; diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c index 71e08be7960c..7400eacb4d64 100644 --- a/drivers/media/video/omap24xxcam.c +++ b/drivers/media/video/omap24xxcam.c @@ -1450,12 +1450,11 @@ static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma) static int omap24xxcam_open(struct file *file) { - int minor = video_devdata(file)->minor; struct omap24xxcam_device *cam = omap24xxcam.priv; struct omap24xxcam_fh *fh; struct v4l2_format format; - if (!cam || !cam->vfd || (cam->vfd->minor != minor)) + if (!cam || !cam->vfd) return -ENODEV; fh = kzalloc(sizeof(*fh), GFP_KERNEL); diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h index eed5fccc83f3..4aeff96ff7d8 100644 --- a/include/media/saa7146_vv.h +++ b/include/media/saa7146_vv.h @@ -108,8 +108,6 @@ struct saa7146_fh { struct saa7146_vv { - int vbi_minor; - /* vbi capture */ struct saa7146_dmaqueue vbi_q; /* vbi workaround interrupt queue */ @@ -117,8 +115,6 @@ struct saa7146_vv int vbi_fieldcount; struct saa7146_fh *vbi_streaming; - int video_minor; - int video_status; struct saa7146_fh *video_fh; -- cgit v1.2.3 From b6456c0cfe9d94e6d2bf684e6e6c031fc0b10031 Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Thu, 19 Nov 2009 12:00:31 -0300 Subject: V4L/DVB (13571): v4l: Adding Digital Video Timings APIs This adds the above APIs to the v4l2 core. This is based on version v1.2 of the RFC titled "V4L - Support for video timings at the input/output interface" Following new ioctls are added:- - VIDIOC_ENUM_DV_PRESETS - VIDIOC_S_DV_PRESET - VIDIOC_G_DV_PRESET - VIDIOC_QUERY_DV_PRESET - VIDIOC_S_DV_TIMINGS - VIDIOC_G_DV_TIMINGS Please refer to the RFC for the details. This code was tested using vpfe capture driver on TI's DM365. Following is the test configuration used :- Blu-Ray HD DVD source -> TVP7002 -> DM365 (VPFE) ->DDR A draft version of the TVP7002 driver (currently being reviewed in the mailing list) was used that supports V4L2_DV_1080I60 & V4L2_DV_720P60 presets. A loopback video capture application was used for testing these APIs. This calls following IOCTLS :- - verify the new v4l2_input capabilities flag added - Enumerate available presets using VIDIOC_ENUM_DV_PRESETS - Set one of the supported preset using VIDIOC_S_DV_PRESET - Get current preset using VIDIOC_G_DV_PRESET - Detect current preset using VIDIOC_QUERY_DV_PRESET - Using stub functions in tvp7002, verify VIDIOC_S_DV_TIMINGS and VIDIOC_G_DV_TIMINGS ioctls are received at the sub device. - Tested on 64bit platform by Hans Verkuil Signed-off-by: Muralidharan Karicheri Signed-off-by: Hans Verkuil Reviewed-by: Randy Dunlap Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/v4l2-compat-ioctl32.c | 6 ++ drivers/media/video/v4l2-ioctl.c | 147 ++++++++++++++++++++++++++++++ include/linux/videodev2.h | 116 ++++++++++++++++++++++- include/media/v4l2-ioctl.h | 15 +++ include/media/v4l2-subdev.h | 21 +++++ 5 files changed, 303 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c index 997975d5e024..c4150bd26337 100644 --- a/drivers/media/video/v4l2-compat-ioctl32.c +++ b/drivers/media/video/v4l2-compat-ioctl32.c @@ -1077,6 +1077,12 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) case VIDIOC_DBG_G_REGISTER: case VIDIOC_DBG_G_CHIP_IDENT: case VIDIOC_S_HW_FREQ_SEEK: + case VIDIOC_ENUM_DV_PRESETS: + case VIDIOC_S_DV_PRESET: + case VIDIOC_G_DV_PRESET: + case VIDIOC_QUERY_DV_PRESET: + case VIDIOC_S_DV_TIMINGS: + case VIDIOC_G_DV_TIMINGS: ret = do_video_ioctl(file, cmd, arg); break; diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index 30cc3347ae52..4b11257c3184 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c @@ -284,6 +284,12 @@ static const char *v4l2_ioctls[] = { [_IOC_NR(VIDIOC_DBG_G_CHIP_IDENT)] = "VIDIOC_DBG_G_CHIP_IDENT", [_IOC_NR(VIDIOC_S_HW_FREQ_SEEK)] = "VIDIOC_S_HW_FREQ_SEEK", #endif + [_IOC_NR(VIDIOC_ENUM_DV_PRESETS)] = "VIDIOC_ENUM_DV_PRESETS", + [_IOC_NR(VIDIOC_S_DV_PRESET)] = "VIDIOC_S_DV_PRESET", + [_IOC_NR(VIDIOC_G_DV_PRESET)] = "VIDIOC_G_DV_PRESET", + [_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET", + [_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS", + [_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS", }; #define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) @@ -1135,6 +1141,19 @@ static long __video_do_ioctl(struct file *file, { struct v4l2_input *p = arg; + /* + * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific input, it must override these flags. + */ + if (ops->vidioc_s_std) + p->capabilities |= V4L2_IN_CAP_STD; + if (ops->vidioc_s_dv_preset) + p->capabilities |= V4L2_IN_CAP_PRESETS; + if (ops->vidioc_s_dv_timings) + p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS; + if (!ops->vidioc_enum_input) break; @@ -1179,6 +1198,19 @@ static long __video_do_ioctl(struct file *file, if (!ops->vidioc_enum_output) break; + /* + * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS & + * CAP_STD here based on ioctl handler provided by the + * driver. If the driver doesn't support these + * for a specific output, it must override these flags. + */ + if (ops->vidioc_s_std) + p->capabilities |= V4L2_OUT_CAP_STD; + if (ops->vidioc_s_dv_preset) + p->capabilities |= V4L2_OUT_CAP_PRESETS; + if (ops->vidioc_s_dv_timings) + p->capabilities |= V4L2_OUT_CAP_CUSTOM_TIMINGS; + ret = ops->vidioc_enum_output(file, fh, p); if (!ret) dbgarg(cmd, "index=%d, name=%s, type=%d, " @@ -1794,6 +1826,121 @@ static long __video_do_ioctl(struct file *file, } break; } + case VIDIOC_ENUM_DV_PRESETS: + { + struct v4l2_dv_enum_preset *p = arg; + + if (!ops->vidioc_enum_dv_presets) + break; + + ret = ops->vidioc_enum_dv_presets(file, fh, p); + if (!ret) + dbgarg(cmd, + "index=%d, preset=%d, name=%s, width=%d," + " height=%d ", + p->index, p->preset, p->name, p->width, + p->height); + break; + } + case VIDIOC_S_DV_PRESET: + { + struct v4l2_dv_preset *p = arg; + + if (!ops->vidioc_s_dv_preset) + break; + + dbgarg(cmd, "preset=%d\n", p->preset); + ret = ops->vidioc_s_dv_preset(file, fh, p); + break; + } + case VIDIOC_G_DV_PRESET: + { + struct v4l2_dv_preset *p = arg; + + if (!ops->vidioc_g_dv_preset) + break; + + ret = ops->vidioc_g_dv_preset(file, fh, p); + if (!ret) + dbgarg(cmd, "preset=%d\n", p->preset); + break; + } + case VIDIOC_QUERY_DV_PRESET: + { + struct v4l2_dv_preset *p = arg; + + if (!ops->vidioc_query_dv_preset) + break; + + ret = ops->vidioc_query_dv_preset(file, fh, p); + if (!ret) + dbgarg(cmd, "preset=%d\n", p->preset); + break; + } + case VIDIOC_S_DV_TIMINGS: + { + struct v4l2_dv_timings *p = arg; + + if (!ops->vidioc_s_dv_timings) + break; + + switch (p->type) { + case V4L2_DV_BT_656_1120: + dbgarg2("bt-656/1120:interlaced=%d, pixelclock=%lld," + " width=%d, height=%d, polarities=%x," + " hfrontporch=%d, hsync=%d, hbackporch=%d," + " vfrontporch=%d, vsync=%d, vbackporch=%d," + " il_vfrontporch=%d, il_vsync=%d," + " il_vbackporch=%d\n", + p->bt.interlaced, p->bt.pixelclock, + p->bt.width, p->bt.height, p->bt.polarities, + p->bt.hfrontporch, p->bt.hsync, + p->bt.hbackporch, p->bt.vfrontporch, + p->bt.vsync, p->bt.vbackporch, + p->bt.il_vfrontporch, p->bt.il_vsync, + p->bt.il_vbackporch); + ret = ops->vidioc_s_dv_timings(file, fh, p); + break; + default: + dbgarg2("Unknown type %d!\n", p->type); + break; + } + break; + } + case VIDIOC_G_DV_TIMINGS: + { + struct v4l2_dv_timings *p = arg; + + if (!ops->vidioc_g_dv_timings) + break; + + ret = ops->vidioc_g_dv_timings(file, fh, p); + if (!ret) { + switch (p->type) { + case V4L2_DV_BT_656_1120: + dbgarg2("bt-656/1120:interlaced=%d," + " pixelclock=%lld," + " width=%d, height=%d, polarities=%x," + " hfrontporch=%d, hsync=%d," + " hbackporch=%d, vfrontporch=%d," + " vsync=%d, vbackporch=%d," + " il_vfrontporch=%d, il_vsync=%d," + " il_vbackporch=%d\n", + p->bt.interlaced, p->bt.pixelclock, + p->bt.width, p->bt.height, + p->bt.polarities, p->bt.hfrontporch, + p->bt.hsync, p->bt.hbackporch, + p->bt.vfrontporch, p->bt.vsync, + p->bt.vbackporch, p->bt.il_vfrontporch, + p->bt.il_vsync, p->bt.il_vbackporch); + break; + default: + dbgarg2("Unknown type %d!\n", p->type); + break; + } + } + break; + } default: { diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 32b92298fd79..59047ebf452e 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -731,6 +731,99 @@ struct v4l2_standard { __u32 reserved[4]; }; +/* + * V I D E O T I M I N G S D V P R E S E T + */ +struct v4l2_dv_preset { + __u32 preset; + __u32 reserved[4]; +}; + +/* + * D V P R E S E T S E N U M E R A T I O N + */ +struct v4l2_dv_enum_preset { + __u32 index; + __u32 preset; + __u8 name[32]; /* Name of the preset timing */ + __u32 width; + __u32 height; + __u32 reserved[4]; +}; + +/* + * D V P R E S E T V A L U E S + */ +#define V4L2_DV_INVALID 0 +#define V4L2_DV_480P59_94 1 /* BT.1362 */ +#define V4L2_DV_576P50 2 /* BT.1362 */ +#define V4L2_DV_720P24 3 /* SMPTE 296M */ +#define V4L2_DV_720P25 4 /* SMPTE 296M */ +#define V4L2_DV_720P30 5 /* SMPTE 296M */ +#define V4L2_DV_720P50 6 /* SMPTE 296M */ +#define V4L2_DV_720P59_94 7 /* SMPTE 274M */ +#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */ +#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */ +#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */ +#define V4L2_DV_1080I25 11 /* BT.1120 */ +#define V4L2_DV_1080I50 12 /* SMPTE 296M */ +#define V4L2_DV_1080I60 13 /* SMPTE 296M */ +#define V4L2_DV_1080P24 14 /* SMPTE 296M */ +#define V4L2_DV_1080P25 15 /* SMPTE 296M */ +#define V4L2_DV_1080P30 16 /* SMPTE 296M */ +#define V4L2_DV_1080P50 17 /* BT.1120 */ +#define V4L2_DV_1080P60 18 /* BT.1120 */ + +/* + * D V B T T I M I N G S + */ + +/* BT.656/BT.1120 timing data */ +struct v4l2_bt_timings { + __u32 width; /* width in pixels */ + __u32 height; /* height in lines */ + __u32 interlaced; /* Interlaced or progressive */ + __u32 polarities; /* Positive or negative polarity */ + __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */ + __u32 hfrontporch; /* Horizpontal front porch in pixels */ + __u32 hsync; /* Horizontal Sync length in pixels */ + __u32 hbackporch; /* Horizontal back porch in pixels */ + __u32 vfrontporch; /* Vertical front porch in pixels */ + __u32 vsync; /* Vertical Sync length in lines */ + __u32 vbackporch; /* Vertical back porch in lines */ + __u32 il_vfrontporch; /* Vertical front porch for bottom field of + * interlaced field formats + */ + __u32 il_vsync; /* Vertical sync length for bottom field of + * interlaced field formats + */ + __u32 il_vbackporch; /* Vertical back porch for bottom field of + * interlaced field formats + */ + __u32 reserved[16]; +} __attribute__ ((packed)); + +/* Interlaced or progressive format */ +#define V4L2_DV_PROGRESSIVE 0 +#define V4L2_DV_INTERLACED 1 + +/* Polarities. If bit is not set, it is assumed to be negative polarity */ +#define V4L2_DV_VSYNC_POS_POL 0x00000001 +#define V4L2_DV_HSYNC_POS_POL 0x00000002 + + +/* DV timings */ +struct v4l2_dv_timings { + __u32 type; + union { + struct v4l2_bt_timings bt; + __u32 reserved[32]; + }; +} __attribute__ ((packed)); + +/* Values for the type field */ +#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */ + /* * V I D E O I N P U T S */ @@ -742,7 +835,8 @@ struct v4l2_input { __u32 tuner; /* Associated tuner */ v4l2_std_id std; __u32 status; - __u32 reserved[4]; + __u32 capabilities; + __u32 reserved[3]; }; /* Values for the 'type' field */ @@ -773,6 +867,11 @@ struct v4l2_input { #define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ #define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ +/* capabilities flags */ +#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */ +#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ +#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */ + /* * V I D E O O U T P U T S */ @@ -783,13 +882,19 @@ struct v4l2_output { __u32 audioset; /* Associated audios (bitfield) */ __u32 modulator; /* Associated modulator */ v4l2_std_id std; - __u32 reserved[4]; + __u32 capabilities; + __u32 reserved[3]; }; /* Values for the 'type' field */ #define V4L2_OUTPUT_TYPE_MODULATOR 1 #define V4L2_OUTPUT_TYPE_ANALOG 2 #define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 +/* capabilities flags */ +#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */ +#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ +#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */ + /* * C O N T R O L S */ @@ -1624,6 +1729,13 @@ struct v4l2_dbg_chip_ident { #endif #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) +#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset) +#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset) +#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset) +#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset) +#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) +#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings) + /* Reminder: when adding new ioctls please add support for them to drivers/media/video/v4l2-compat-ioctl32.c as well! */ diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 7a4529defa88..e8ba0f2efbae 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -239,6 +239,21 @@ struct v4l2_ioctl_ops { int (*vidioc_enum_frameintervals) (struct file *file, void *fh, struct v4l2_frmivalenum *fival); + /* DV Timings IOCTLs */ + int (*vidioc_enum_dv_presets) (struct file *file, void *fh, + struct v4l2_dv_enum_preset *preset); + + int (*vidioc_s_dv_preset) (struct file *file, void *fh, + struct v4l2_dv_preset *preset); + int (*vidioc_g_dv_preset) (struct file *file, void *fh, + struct v4l2_dv_preset *preset); + int (*vidioc_query_dv_preset) (struct file *file, void *fh, + struct v4l2_dv_preset *qpreset); + int (*vidioc_s_dv_timings) (struct file *file, void *fh, + struct v4l2_dv_timings *timings); + int (*vidioc_g_dv_timings) (struct file *file, void *fh, + struct v4l2_dv_timings *timings); + /* For other private ioctls */ long (*vidioc_default) (struct file *file, void *fh, int cmd, void *arg); diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 00bf17608453..7a3408f9d0f2 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -217,6 +217,19 @@ struct v4l2_subdev_audio_ops { s_routing: see s_routing in audio_ops, except this version is for video devices. + + s_dv_preset: set dv (Digital Video) preset in the sub device. Similar to + s_std() + + query_dv_preset: query dv preset in the sub device. This is similar to + querystd() + + s_dv_timings(): Set custom dv timings in the sub device. This is used + when sub device is capable of setting detailed timing information + in the hardware to generate/detect the video signal. + + g_dv_timings(): Get custom dv timings in the sub device. + */ struct v4l2_subdev_video_ops { int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); @@ -240,6 +253,14 @@ struct v4l2_subdev_video_ops { int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param); int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize); int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival); + int (*s_dv_preset)(struct v4l2_subdev *sd, + struct v4l2_dv_preset *preset); + int (*query_dv_preset)(struct v4l2_subdev *sd, + struct v4l2_dv_preset *preset); + int (*s_dv_timings)(struct v4l2_subdev *sd, + struct v4l2_dv_timings *timings); + int (*g_dv_timings)(struct v4l2_subdev *sd, + struct v4l2_dv_timings *timings); }; /* -- cgit v1.2.3 From 446e4a64d2f6efddc63a47169ba3c8037b620307 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 11 Dec 2009 08:34:07 -0300 Subject: V4L/DVB (13613): IR: create ir-core module Split the ir-common into two separate modules: - ir-core: it is the IR-independent functions; - ir-common: has the common part used by V4L drivers. Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/Kconfig | 7 ++++- drivers/media/IR/Makefile | 4 ++- drivers/media/IR/ir-functions.c | 3 --- drivers/media/IR/ir-keytable.c | 21 +++++++++++++-- include/media/ir-common.h | 39 +--------------------------- include/media/ir-core.h | 57 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 86 insertions(+), 45 deletions(-) create mode 100644 include/media/ir-core.h (limited to 'include') diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig index 5b4ac969a586..4dde7d180a32 100644 --- a/drivers/media/IR/Kconfig +++ b/drivers/media/IR/Kconfig @@ -1,4 +1,9 @@ -config VIDEO_IR +config IR_CORE tristate depends on INPUT default INPUT + +config VIDEO_IR + tristate + depends on IR_CORE + default IR_CORE diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile index 2781f430c6e1..df5ddb4bbbf7 100644 --- a/drivers/media/IR/Makefile +++ b/drivers/media/IR/Makefile @@ -1,3 +1,5 @@ -ir-common-objs := ir-functions.o ir-keymaps.o ir-keytable.o +ir-common-objs := ir-functions.o ir-keymaps.o +ir-core-objs := ir-keytable.o +obj-$(CONFIG_IR_CORE) += ir-core.o obj-$(CONFIG_VIDEO_IR) += ir-common.o diff --git a/drivers/media/IR/ir-functions.c b/drivers/media/IR/ir-functions.c index e616f624ceaa..2db22948a310 100644 --- a/drivers/media/IR/ir-functions.c +++ b/drivers/media/IR/ir-functions.c @@ -34,9 +34,6 @@ static int repeat = 1; module_param(repeat, int, 0444); MODULE_PARM_DESC(repeat,"auto-repeat for IR keys (default: on)"); -int media_ir_debug; /* media_ir_debug level (0,1,2) */ -module_param_named(debug, media_ir_debug, int, 0644); - /* -------------------------------------------------------------------------- */ static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir) diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index 99ed2deceef3..20d642dbab49 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c @@ -1,6 +1,15 @@ /* ir-register.c - handle IR scancode->keycode tables * * Copyright (C) 2009 by Mauro Carvalho Chehab + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ #include @@ -10,7 +19,6 @@ #define IR_TAB_MIN_SIZE 32 #define IR_TAB_MAX_SIZE 1024 - /** * ir_seek_table() - returns the element order on the table * @rc_tab: the ir_scancode_table with the keymap to be used @@ -73,6 +81,7 @@ int ir_roundup_tablesize(int n_elems) return n_elems; } +EXPORT_SYMBOL_GPL(ir_roundup_tablesize); /** * ir_copy_table() - copies a keytable, discarding the unused entries @@ -101,6 +110,7 @@ int ir_copy_table(struct ir_scancode_table *destin, return 0; } +EXPORT_SYMBOL_GPL(ir_copy_table); /** * ir_getkeycode() - get a keycode at the evdev scancode ->keycode table @@ -137,7 +147,6 @@ static int ir_getkeycode(struct input_dev *dev, return 0; } - /** * ir_is_resize_needed() - Check if the table needs rezise * @table: keycode table that may need to resize @@ -379,6 +388,7 @@ u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode) /* Reports userspace that an unknown keycode were got */ return KEY_RESERVED; } +EXPORT_SYMBOL_GPL(ir_g_keycode_from_table); /** * ir_set_keycode_table() - sets the IR keycode table and add the handlers @@ -415,6 +425,7 @@ int ir_set_keycode_table(struct input_dev *input_dev, return 0; } +EXPORT_SYMBOL_GPL(ir_set_keycode_table); void ir_input_free(struct input_dev *dev) { @@ -431,3 +442,9 @@ void ir_input_free(struct input_dev *dev) } EXPORT_SYMBOL_GPL(ir_input_free); +int ir_core_debug; /* ir_debug level (0,1,2) */ +EXPORT_SYMBOL_GPL(ir_core_debug); +module_param_named(debug, ir_core_debug, int, 0644); + +MODULE_AUTHOR("Mauro Carvalho Chehab "); +MODULE_LICENSE("GPL"); diff --git a/include/media/ir-common.h b/include/media/ir-common.h index 45bf5cf59458..18d300414fa2 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -26,31 +26,7 @@ #include #include #include -#include - -extern int media_ir_debug; /* media_ir_debug level (0,1,2) */ -#define IR_dprintk(level, fmt, arg...) if (media_ir_debug >= level) \ - printk(KERN_DEBUG "%s: " fmt , __func__, ## arg) - -enum ir_type { - IR_TYPE_UNKNOWN = 0, - IR_TYPE_RC5 = 1, - IR_TYPE_PD = 2, /* Pulse distance encoded IR */ - IR_TYPE_NEC = 3, - IR_TYPE_OTHER = 99, -}; - -struct ir_scancode { - u16 scancode; - u32 keycode; -}; - -struct ir_scancode_table { - struct ir_scancode *scan; - int size; - enum ir_type ir_type; - spinlock_t lock; -}; +#include #define RC5_START(x) (((x)>>12)&3) #define RC5_TOGGLE(x) (((x)>>11)&1) @@ -123,19 +99,6 @@ u32 ir_rc5_decode(unsigned int code); void ir_rc5_timer_end(unsigned long data); void ir_rc5_timer_keyup(unsigned long data); -/* Routines from ir-keytable.c */ - -u32 ir_g_keycode_from_table(struct input_dev *input_dev, - u32 scancode); - -int ir_set_keycode_table(struct input_dev *input_dev, - struct ir_scancode_table *rc_tab); - -int ir_roundup_tablesize(int n_elems); -int ir_copy_table(struct ir_scancode_table *destin, - const struct ir_scancode_table *origin); -void ir_input_free(struct input_dev *input_dev); - /* scancode->keycode map tables from ir-keymaps.c */ extern struct ir_scancode_table ir_codes_empty_table; diff --git a/include/media/ir-core.h b/include/media/ir-core.h new file mode 100644 index 000000000000..825d04a4e77b --- /dev/null +++ b/include/media/ir-core.h @@ -0,0 +1,57 @@ +/* + * Remote Controller core header + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IR_CORE +#define _IR_CORE + +#include +#include + +extern int ir_core_debug; +#define IR_dprintk(level, fmt, arg...) if (ir_core_debug >= level) \ + printk(KERN_DEBUG "%s: " fmt , __func__, ## arg) + +enum ir_type { + IR_TYPE_UNKNOWN = 0, + IR_TYPE_RC5 = 1, + IR_TYPE_PD = 2, /* Pulse distance encoded IR */ + IR_TYPE_NEC = 3, + IR_TYPE_OTHER = 99, +}; + +struct ir_scancode { + u16 scancode; + u32 keycode; +}; + +struct ir_scancode_table { + struct ir_scancode *scan; + int size; + enum ir_type ir_type; + spinlock_t lock; +}; + +/* Routines from ir-keytable.c */ + +u32 ir_g_keycode_from_table(struct input_dev *input_dev, + u32 scancode); + +int ir_set_keycode_table(struct input_dev *input_dev, + struct ir_scancode_table *rc_tab); + +int ir_roundup_tablesize(int n_elems); +int ir_copy_table(struct ir_scancode_table *destin, + const struct ir_scancode_table *origin); +void ir_input_free(struct input_dev *input_dev); + +#endif -- cgit v1.2.3 From 75543cce0c1f46be495b981d8d3eda0882721d07 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 11 Dec 2009 09:44:23 -0300 Subject: V4L/DVB (13615): ir-core: create ir_input_register Move non-V4L specific stuff from ir-functions ir_input_init() into a new function to register ir devices: ir_input_register(). Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/ir-functions.c | 17 ++------------ drivers/media/IR/ir-keytable.c | 51 +++++++++++++++++++++++++++++++---------- include/media/ir-common.h | 2 -- include/media/ir-core.h | 9 ++++++-- 4 files changed, 48 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/drivers/media/IR/ir-functions.c b/drivers/media/IR/ir-functions.c index 2db22948a310..7401a7989719 100644 --- a/drivers/media/IR/ir-functions.c +++ b/drivers/media/IR/ir-functions.c @@ -56,24 +56,11 @@ int ir_input_init(struct input_dev *dev, struct ir_input_state *ir, { ir->ir_type = ir_type; - ir->keytable.size = ir_roundup_tablesize(ir_codes->size); - ir->keytable.scan = kzalloc(ir->keytable.size * - sizeof(struct ir_scancode), GFP_KERNEL); - if (!ir->keytable.scan) - return -ENOMEM; - - IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n", - ir->keytable.size, - ir->keytable.size * sizeof(ir->keytable.scan)); - - ir_copy_table(&ir->keytable, ir_codes); - ir_set_keycode_table(dev, &ir->keytable); - - clear_bit(0, dev->keybit); - set_bit(EV_KEY, dev->evbit); if (repeat) set_bit(EV_REP, dev->evbit); + ir_input_register(dev, ir_codes); + return 0; } EXPORT_SYMBOL_GPL(ir_input_init); diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index 20d642dbab49..ddb8a0f8af4d 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c @@ -125,7 +125,8 @@ static int ir_getkeycode(struct input_dev *dev, int scancode, int *keycode) { int elem; - struct ir_scancode_table *rc_tab = input_get_drvdata(dev); + struct ir_input_dev *ir_dev = input_get_drvdata(dev); + struct ir_scancode_table *rc_tab = &ir_dev->rc_tab; elem = ir_seek_table(rc_tab, scancode); if (elem >= 0) { @@ -296,7 +297,8 @@ static int ir_setkeycode(struct input_dev *dev, int scancode, int keycode) { int rc = 0; - struct ir_scancode_table *rc_tab = input_get_drvdata(dev); + struct ir_input_dev *ir_dev = input_get_drvdata(dev); + struct ir_scancode_table *rc_tab = &ir_dev->rc_tab; struct ir_scancode *keymap = rc_tab->scan; unsigned long flags; @@ -370,7 +372,8 @@ static int ir_setkeycode(struct input_dev *dev, */ u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode) { - struct ir_scancode_table *rc_tab = input_get_drvdata(dev); + struct ir_input_dev *ir_dev = input_get_drvdata(dev); + struct ir_scancode_table *rc_tab = &ir_dev->rc_tab; struct ir_scancode *keymap = rc_tab->scan; int elem; @@ -391,7 +394,7 @@ u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode) EXPORT_SYMBOL_GPL(ir_g_keycode_from_table); /** - * ir_set_keycode_table() - sets the IR keycode table and add the handlers + * ir_input_register() - sets the IR keycode table and add the handlers * for keymap table get/set * @input_dev: the struct input_dev descriptor of the device * @rc_tab: the struct ir_scancode_table table of scancode/keymap @@ -400,17 +403,34 @@ EXPORT_SYMBOL_GPL(ir_g_keycode_from_table); * an IR. * It should be called before registering the IR device. */ -int ir_set_keycode_table(struct input_dev *input_dev, - struct ir_scancode_table *rc_tab) +int ir_input_register(struct input_dev *input_dev, + struct ir_scancode_table *rc_tab) { - struct ir_scancode *keymap = rc_tab->scan; + struct ir_input_dev *ir_dev; + struct ir_scancode *keymap = rc_tab->scan; int i; - spin_lock_init(&rc_tab->lock); - if (rc_tab->scan == NULL || !rc_tab->size) return -EINVAL; + ir_dev = kzalloc(sizeof(*ir_dev), GFP_KERNEL); + if (!ir_dev) + return -ENOMEM; + + spin_lock_init(&rc_tab->lock); + + ir_dev->rc_tab.size = ir_roundup_tablesize(rc_tab->size); + ir_dev->rc_tab.scan = kzalloc(ir_dev->rc_tab.size * + sizeof(struct ir_scancode), GFP_KERNEL); + if (!ir_dev->rc_tab.scan) + return -ENOMEM; + + IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n", + ir_dev->rc_tab.size, + ir_dev->rc_tab.size * sizeof(ir_dev->rc_tab.scan)); + + ir_copy_table(&ir_dev->rc_tab, rc_tab); + /* set the bits for the keys */ IR_dprintk(1, "key map size: %d\n", rc_tab->size); for (i = 0; i < rc_tab->size; i++) { @@ -418,18 +438,22 @@ int ir_set_keycode_table(struct input_dev *input_dev, i, keymap[i].keycode); set_bit(keymap[i].keycode, input_dev->keybit); } + clear_bit(0, input_dev->keybit); + + set_bit(EV_KEY, input_dev->evbit); input_dev->getkeycode = ir_getkeycode; input_dev->setkeycode = ir_setkeycode; - input_set_drvdata(input_dev, rc_tab); + input_set_drvdata(input_dev, ir_dev); return 0; } -EXPORT_SYMBOL_GPL(ir_set_keycode_table); +EXPORT_SYMBOL_GPL(ir_input_register); void ir_input_free(struct input_dev *dev) { - struct ir_scancode_table *rc_tab = input_get_drvdata(dev); + struct ir_input_dev *ir_dev = input_get_drvdata(dev); + struct ir_scancode_table *rc_tab = &ir_dev->rc_tab; if (!rc_tab) return; @@ -439,6 +463,9 @@ void ir_input_free(struct input_dev *dev) rc_tab->size = 0; kfree(rc_tab->scan); rc_tab->scan = NULL; + + kfree(ir_dev); + input_set_drvdata(dev, NULL); } EXPORT_SYMBOL_GPL(ir_input_free); diff --git a/include/media/ir-common.h b/include/media/ir-common.h index 18d300414fa2..ac8ced6bf3e2 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -37,8 +37,6 @@ struct ir_input_state { /* configuration */ int ir_type; - struct ir_scancode_table keytable; - /* key info */ u32 ir_key; /* ir scancode */ u32 keycode; /* linux key code */ diff --git a/include/media/ir-core.h b/include/media/ir-core.h index 825d04a4e77b..ea76c199b67d 100644 --- a/include/media/ir-core.h +++ b/include/media/ir-core.h @@ -41,6 +41,11 @@ struct ir_scancode_table { spinlock_t lock; }; +struct ir_input_dev { + struct input_dev *dev; + struct ir_scancode_table rc_tab; +}; + /* Routines from ir-keytable.c */ u32 ir_g_keycode_from_table(struct input_dev *input_dev, @@ -50,8 +55,8 @@ int ir_set_keycode_table(struct input_dev *input_dev, struct ir_scancode_table *rc_tab); int ir_roundup_tablesize(int n_elems); -int ir_copy_table(struct ir_scancode_table *destin, - const struct ir_scancode_table *origin); +int ir_input_register(struct input_dev *dev, + struct ir_scancode_table *ir_codes); void ir_input_free(struct input_dev *input_dev); #endif -- cgit v1.2.3 From 38ef6aa884e3fd389f7d444b8dd36c16832e36b4 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 11 Dec 2009 09:47:42 -0300 Subject: V4L/DVB (13616): IR: rename ir_input_free as ir_input_unregister Now, ir_input_free does more than just freeing the keytab. Better to rename it as ir_input_unregister. Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/ir-keytable.c | 4 ++-- drivers/media/dvb/dm1105/dm1105.c | 4 ++-- drivers/media/dvb/ttpci/budget-ci.c | 4 ++-- drivers/media/video/bt8xx/bttv-input.c | 4 ++-- drivers/media/video/cx231xx/cx231xx-input.c | 4 ++-- drivers/media/video/cx23885/cx23885-input.c | 4 ++-- drivers/media/video/cx88/cx88-input.c | 4 ++-- drivers/media/video/em28xx/em28xx-input.c | 4 ++-- drivers/media/video/ir-kbd-i2c.c | 4 ++-- drivers/media/video/saa7134/saa7134-input.c | 4 ++-- include/media/ir-core.h | 2 +- 11 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index ddb8a0f8af4d..e9c9bd34af65 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c @@ -450,7 +450,7 @@ int ir_input_register(struct input_dev *input_dev, } EXPORT_SYMBOL_GPL(ir_input_register); -void ir_input_free(struct input_dev *dev) +void ir_input_unregister(struct input_dev *dev) { struct ir_input_dev *ir_dev = input_get_drvdata(dev); struct ir_scancode_table *rc_tab = &ir_dev->rc_tab; @@ -467,7 +467,7 @@ void ir_input_free(struct input_dev *dev) kfree(ir_dev); input_set_drvdata(dev, NULL); } -EXPORT_SYMBOL_GPL(ir_input_free); +EXPORT_SYMBOL_GPL(ir_input_unregister); int ir_core_debug; /* ir_debug level (0,1,2) */ EXPORT_SYMBOL_GPL(ir_core_debug); diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c index 53e3f2a7d31a..34e4e569cc7d 100644 --- a/drivers/media/dvb/dm1105/dm1105.c +++ b/drivers/media/dvb/dm1105/dm1105.c @@ -613,7 +613,7 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105) err = input_register_device(input_dev); if (err) { - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); return err; } @@ -623,7 +623,7 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105) void __devexit dm1105_ir_exit(struct dm1105dvb *dm1105) { - ir_input_free(dm1105->ir.input_dev); + ir_input_unregister(dm1105->ir.input_dev); input_unregister_device(dm1105->ir.input_dev); } diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c index 7d193ebc0aea..9c9070a403dd 100644 --- a/drivers/media/dvb/ttpci/budget-ci.c +++ b/drivers/media/dvb/ttpci/budget-ci.c @@ -286,7 +286,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci) return 0; out2: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); out1: return error; @@ -304,7 +304,7 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci) del_timer_sync(&dev->timer); ir_input_nokey(dev, &budget_ci->ir.state); - ir_input_free(dev); + ir_input_unregister(dev); input_unregister_device(dev); } diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c index 84a957e52c4b..0e1a7aabefba 100644 --- a/drivers/media/video/bt8xx/bttv-input.c +++ b/drivers/media/video/bt8xx/bttv-input.c @@ -403,7 +403,7 @@ int bttv_input_init(struct bttv *btv) bttv_ir_stop(btv); btv->remote = NULL; err_out_free: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); kfree(ir); return err; @@ -415,7 +415,7 @@ void bttv_input_fini(struct bttv *btv) return; bttv_ir_stop(btv); - ir_input_free(btv->remote->dev); + ir_input_unregister(btv->remote->dev); input_unregister_device(btv->remote->dev); kfree(btv->remote); btv->remote = NULL; diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c index cd135f01b9c1..5eba1ec1205b 100644 --- a/drivers/media/video/cx231xx/cx231xx-input.c +++ b/drivers/media/video/cx231xx/cx231xx-input.c @@ -226,7 +226,7 @@ err_out_stop: cx231xx_ir_stop(ir); dev->ir = NULL; err_out_free: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); kfree(ir); return err; @@ -241,7 +241,7 @@ int cx231xx_ir_fini(struct cx231xx *dev) return 0; cx231xx_ir_stop(ir); - ir_input_free(ir->input); + ir_input_unregister(ir->input); input_unregister_device(ir->input); kfree(ir); diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c index 469e083dd5f8..2a5e4d959e8e 100644 --- a/drivers/media/video/cx23885/cx23885-input.c +++ b/drivers/media/video/cx23885/cx23885-input.c @@ -407,7 +407,7 @@ err_out_stop: cx23885_input_ir_stop(dev); dev->ir_input = NULL; err_out_free: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); kfree(ir); return ret; @@ -420,7 +420,7 @@ void cx23885_input_fini(struct cx23885_dev *dev) if (dev->ir_input == NULL) return; - ir_input_free(dev->ir_input->dev); + ir_input_unregister(dev->ir_input->dev); input_unregister_device(dev->ir_input->dev); kfree(dev->ir_input); dev->ir_input = NULL; diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index 92b8cdf9fb81..d4dc2c2a3cbc 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c @@ -393,7 +393,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) cx88_ir_stop(core, ir); core->ir = NULL; err_out_free: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); kfree(ir); return err; @@ -408,7 +408,7 @@ int cx88_ir_fini(struct cx88_core *core) return 0; cx88_ir_stop(core, ir); - ir_input_free(ir->input); + ir_input_unregister(ir->input); input_unregister_device(ir->input); kfree(ir); diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c index 5ddeb421ed58..9dc4e142726b 100644 --- a/drivers/media/video/em28xx/em28xx-input.c +++ b/drivers/media/video/em28xx/em28xx-input.c @@ -422,7 +422,7 @@ int em28xx_ir_init(struct em28xx *dev) em28xx_ir_stop(ir); dev->ir = NULL; err_out_free: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); kfree(ir); return err; @@ -437,7 +437,7 @@ int em28xx_ir_fini(struct em28xx *dev) return 0; em28xx_ir_stop(ir); - ir_input_free(ir->input); + ir_input_unregister(ir->input); input_unregister_device(ir->input); kfree(ir); diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 2856e780772e..9d8e78dc614e 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c @@ -460,7 +460,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) return 0; err_out_free: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); kfree(ir); return err; @@ -474,7 +474,7 @@ static int ir_remove(struct i2c_client *client) cancel_delayed_work_sync(&ir->work); /* unregister device */ - ir_input_free(ir->input); + ir_input_unregister(ir->input); input_unregister_device(ir->input); /* free memory */ diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c index 411a8410620e..5093e7a0d58c 100644 --- a/drivers/media/video/saa7134/saa7134-input.c +++ b/drivers/media/video/saa7134/saa7134-input.c @@ -742,7 +742,7 @@ int saa7134_input_init1(struct saa7134_dev *dev) saa7134_ir_stop(dev); dev->remote = NULL; err_out_free: - ir_input_free(input_dev); + ir_input_unregister(input_dev); input_free_device(input_dev); kfree(ir); return err; @@ -754,7 +754,7 @@ void saa7134_input_fini(struct saa7134_dev *dev) return; saa7134_ir_stop(dev); - ir_input_free(dev->remote->dev); + ir_input_unregister(dev->remote->dev); input_unregister_device(dev->remote->dev); kfree(dev->remote); dev->remote = NULL; diff --git a/include/media/ir-core.h b/include/media/ir-core.h index ea76c199b67d..299d201e1339 100644 --- a/include/media/ir-core.h +++ b/include/media/ir-core.h @@ -57,6 +57,6 @@ int ir_set_keycode_table(struct input_dev *input_dev, int ir_roundup_tablesize(int n_elems); int ir_input_register(struct input_dev *dev, struct ir_scancode_table *ir_codes); -void ir_input_free(struct input_dev *input_dev); +void ir_input_unregister(struct input_dev *input_dev); #endif -- cgit v1.2.3 From 579e7d60ba0035228aadad69eb2ffeb138c51311 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 11 Dec 2009 11:20:59 -0300 Subject: V4L/DVB (13617): ir: move input_register_device() to happen inside ir_input_register() We'll need to register a sysfs class for the IR devices. As such, the better is to have the input_register_device()/input_unregister_device() inside the ir register/unregister functions. Also, solves a naming problem with V4L ir_input_init() function, that were, in fact, registering a device. While here, do a few cleanups at budget-ci IR logic. Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/ir-functions.c | 4 +-- drivers/media/IR/ir-keytable.c | 20 ++++++++---- drivers/media/dvb/dm1105/dm1105.c | 12 ++----- drivers/media/dvb/ttpci/budget-ci.c | 50 +++++++++-------------------- drivers/media/video/bt8xx/bttv-input.c | 7 ++-- drivers/media/video/cx231xx/cx231xx-input.c | 8 ++--- drivers/media/video/cx23885/cx23885-input.c | 7 ++-- drivers/media/video/cx88/cx88-input.c | 7 ++-- drivers/media/video/em28xx/em28xx-input.c | 8 ++--- drivers/media/video/ir-kbd-i2c.c | 7 ++-- drivers/media/video/saa7134/saa7134-input.c | 7 ++-- include/media/ir-common.h | 2 +- 12 files changed, 48 insertions(+), 91 deletions(-) (limited to 'include') diff --git a/drivers/media/IR/ir-functions.c b/drivers/media/IR/ir-functions.c index 7401a7989719..776a136616d6 100644 --- a/drivers/media/IR/ir-functions.c +++ b/drivers/media/IR/ir-functions.c @@ -52,15 +52,13 @@ static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir) /* -------------------------------------------------------------------------- */ int ir_input_init(struct input_dev *dev, struct ir_input_state *ir, - int ir_type, struct ir_scancode_table *ir_codes) + int ir_type) { ir->ir_type = ir_type; if (repeat) set_bit(EV_REP, dev->evbit); - ir_input_register(dev, ir_codes); - return 0; } EXPORT_SYMBOL_GPL(ir_input_init); diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index e9c9bd34af65..bff7a5356037 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c @@ -12,8 +12,8 @@ * GNU General Public License for more details. */ -#include +#include #include #define IR_TAB_MIN_SIZE 32 @@ -408,7 +408,7 @@ int ir_input_register(struct input_dev *input_dev, { struct ir_input_dev *ir_dev; struct ir_scancode *keymap = rc_tab->scan; - int i; + int i, rc; if (rc_tab->scan == NULL || !rc_tab->size) return -EINVAL; @@ -446,26 +446,34 @@ int ir_input_register(struct input_dev *input_dev, input_dev->setkeycode = ir_setkeycode; input_set_drvdata(input_dev, ir_dev); - return 0; + rc = input_register_device(input_dev); + if (rc < 0) { + kfree(rc_tab->scan); + kfree(ir_dev); + input_set_drvdata(input_dev, NULL); + } + + return rc; } EXPORT_SYMBOL_GPL(ir_input_register); void ir_input_unregister(struct input_dev *dev) { struct ir_input_dev *ir_dev = input_get_drvdata(dev); - struct ir_scancode_table *rc_tab = &ir_dev->rc_tab; + struct ir_scancode_table *rc_tab; - if (!rc_tab) + if (!ir_dev) return; IR_dprintk(1, "Freed keycode table\n"); + rc_tab = &ir_dev->rc_tab; rc_tab->size = 0; kfree(rc_tab->scan); rc_tab->scan = NULL; kfree(ir_dev); - input_set_drvdata(dev, NULL); + input_unregister_device(dev); } EXPORT_SYMBOL_GPL(ir_input_unregister); diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c index 34e4e569cc7d..f0f483ac8b89 100644 --- a/drivers/media/dvb/dm1105/dm1105.c +++ b/drivers/media/dvb/dm1105/dm1105.c @@ -589,7 +589,7 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105) snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys), "pci-%s/ir0", pci_name(dm1105->pdev)); - err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type, ir_codes); + err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type); if (err < 0) { input_free_device(input_dev); return err; @@ -611,20 +611,14 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105) INIT_WORK(&dm1105->ir.work, dm1105_emit_key); - err = input_register_device(input_dev); - if (err) { - ir_input_unregister(input_dev); - input_free_device(input_dev); - return err; - } + err = ir_input_register(input_dev, ir_codes); - return 0; + return err; } void __devexit dm1105_ir_exit(struct dm1105dvb *dm1105) { ir_input_unregister(dm1105->ir.input_dev); - input_unregister_device(dm1105->ir.input_dev); } static int __devinit dm1105dvb_hw_init(struct dm1105dvb *dm1105dvb) diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c index 9c9070a403dd..9782e0593733 100644 --- a/drivers/media/dvb/ttpci/budget-ci.c +++ b/drivers/media/dvb/ttpci/budget-ci.c @@ -190,12 +190,13 @@ static int msp430_ir_init(struct budget_ci *budget_ci) struct saa7146_dev *saa = budget_ci->budget.dev; struct input_dev *input_dev = budget_ci->ir.dev; int error; + struct ir_scancode_table *ir_codes; + budget_ci->ir.dev = input_dev = input_allocate_device(); if (!input_dev) { printk(KERN_ERR "budget_ci: IR interface initialisation failed\n"); - error = -ENOMEM; - goto out1; + return -ENOMEM; } snprintf(budget_ci->ir.name, sizeof(budget_ci->ir.name), @@ -217,6 +218,11 @@ static int msp430_ir_init(struct budget_ci *budget_ci) } input_dev->dev.parent = &saa->pci->dev; + if (rc5_device < 0) + budget_ci->ir.rc5_device = IR_DEVICE_ANY; + else + budget_ci->ir.rc5_device = rc5_device; + /* Select keymap and address */ switch (budget_ci->budget.dev->pci->subsystem_device) { case 0x100c: @@ -224,53 +230,34 @@ static int msp430_ir_init(struct budget_ci *budget_ci) case 0x1011: case 0x1012: /* The hauppauge keymap is a superset of these remotes */ - error = ir_input_init(input_dev, &budget_ci->ir.state, - IR_TYPE_RC5, &ir_codes_hauppauge_new_table); - if (error < 0) - goto out2; + ir_codes = &ir_codes_hauppauge_new_table; if (rc5_device < 0) budget_ci->ir.rc5_device = 0x1f; - else - budget_ci->ir.rc5_device = rc5_device; break; case 0x1010: case 0x1017: case 0x101a: /* for the Technotrend 1500 bundled remote */ - error = ir_input_init(input_dev, &budget_ci->ir.state, - IR_TYPE_RC5, &ir_codes_tt_1500_table); - if (error < 0) - goto out2; - - if (rc5_device < 0) - budget_ci->ir.rc5_device = IR_DEVICE_ANY; - else - budget_ci->ir.rc5_device = rc5_device; + ir_codes = &ir_codes_tt_1500_table; break; default: /* unknown remote */ - error = ir_input_init(input_dev, &budget_ci->ir.state, - IR_TYPE_RC5, &ir_codes_budget_ci_old_table); - if (error < 0) - goto out2; - - if (rc5_device < 0) - budget_ci->ir.rc5_device = IR_DEVICE_ANY; - else - budget_ci->ir.rc5_device = rc5_device; + ir_codes = &ir_codes_budget_ci_old_table; break; } + ir_input_init(input_dev, &budget_ci->ir.state, IR_TYPE_RC5); + /* initialise the key-up timeout handler */ init_timer(&budget_ci->ir.timer_keyup); budget_ci->ir.timer_keyup.function = msp430_ir_keyup; budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir; budget_ci->ir.last_raw = 0xffff; /* An impossible value */ - error = input_register_device(input_dev); + error = ir_input_register(input_dev, ir_codes); if (error) { printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error); - goto out2; + return error; } /* note: these must be after input_register_device */ @@ -284,12 +271,6 @@ static int msp430_ir_init(struct budget_ci *budget_ci) saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI); return 0; - -out2: - ir_input_unregister(input_dev); - input_free_device(input_dev); -out1: - return error; } static void msp430_ir_deinit(struct budget_ci *budget_ci) @@ -305,7 +286,6 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci) ir_input_nokey(dev, &budget_ci->ir.state); ir_input_unregister(dev); - input_unregister_device(dev); } static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c index 0e1a7aabefba..277a092e1214 100644 --- a/drivers/media/video/bt8xx/bttv-input.c +++ b/drivers/media/video/bt8xx/bttv-input.c @@ -368,7 +368,7 @@ int bttv_input_init(struct bttv *btv) snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(btv->c.pci)); - err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); + err = ir_input_init(input_dev, &ir->ir, ir_type); if (err < 0) goto err_out_free; @@ -389,7 +389,7 @@ int bttv_input_init(struct bttv *btv) bttv_ir_start(btv, ir); /* all done */ - err = input_register_device(btv->remote->dev); + err = ir_input_register(btv->remote->dev, ir_codes); if (err) goto err_out_stop; @@ -403,8 +403,6 @@ int bttv_input_init(struct bttv *btv) bttv_ir_stop(btv); btv->remote = NULL; err_out_free: - ir_input_unregister(input_dev); - input_free_device(input_dev); kfree(ir); return err; } @@ -416,7 +414,6 @@ void bttv_input_fini(struct bttv *btv) bttv_ir_stop(btv); ir_input_unregister(btv->remote->dev); - input_unregister_device(btv->remote->dev); kfree(btv->remote); btv->remote = NULL; } diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c index 5eba1ec1205b..15826f98b688 100644 --- a/drivers/media/video/cx231xx/cx231xx-input.c +++ b/drivers/media/video/cx231xx/cx231xx-input.c @@ -197,8 +197,7 @@ int cx231xx_ir_init(struct cx231xx *dev) usb_make_path(dev->udev, ir->phys, sizeof(ir->phys)); strlcat(ir->phys, "/input0", sizeof(ir->phys)); - err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER, - dev->board.ir_codes); + err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER); if (err < 0) goto err_out_free; @@ -217,7 +216,7 @@ int cx231xx_ir_init(struct cx231xx *dev) cx231xx_ir_start(ir); /* all done */ - err = input_register_device(ir->input); + err = ir_input_register(ir->input, dev->board.ir_codes); if (err) goto err_out_stop; @@ -226,8 +225,6 @@ err_out_stop: cx231xx_ir_stop(ir); dev->ir = NULL; err_out_free: - ir_input_unregister(input_dev); - input_free_device(input_dev); kfree(ir); return err; } @@ -242,7 +239,6 @@ int cx231xx_ir_fini(struct cx231xx *dev) cx231xx_ir_stop(ir); ir_input_unregister(ir->input); - input_unregister_device(ir->input); kfree(ir); /* done */ diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c index 2a5e4d959e8e..768eec92ccf9 100644 --- a/drivers/media/video/cx23885/cx23885-input.c +++ b/drivers/media/video/cx23885/cx23885-input.c @@ -377,7 +377,7 @@ int cx23885_input_init(struct cx23885_dev *dev) cx23885_boards[dev->board].name); snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(dev->pci)); - ret = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); + ret = ir_input_init(input_dev, &ir->ir, ir_type); if (ret < 0) goto err_out_free; @@ -397,7 +397,7 @@ int cx23885_input_init(struct cx23885_dev *dev) dev->ir_input = ir; cx23885_input_ir_start(dev); - ret = input_register_device(ir->dev); + ret = ir_input_register(ir->dev, ir_codes); if (ret) goto err_out_stop; @@ -407,8 +407,6 @@ err_out_stop: cx23885_input_ir_stop(dev); dev->ir_input = NULL; err_out_free: - ir_input_unregister(input_dev); - input_free_device(input_dev); kfree(ir); return ret; } @@ -421,7 +419,6 @@ void cx23885_input_fini(struct cx23885_dev *dev) if (dev->ir_input == NULL) return; ir_input_unregister(dev->ir_input->dev); - input_unregister_device(dev->ir_input->dev); kfree(dev->ir_input); dev->ir_input = NULL; } diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index d4dc2c2a3cbc..f9fda18b410c 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c @@ -360,7 +360,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name); snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci)); - err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); + err = ir_input_init(input_dev, &ir->ir, ir_type); if (err < 0) goto err_out_free; @@ -383,7 +383,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) cx88_ir_start(core, ir); /* all done */ - err = input_register_device(ir->input); + err = ir_input_register(ir->input, ir_codes); if (err) goto err_out_stop; @@ -393,8 +393,6 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) cx88_ir_stop(core, ir); core->ir = NULL; err_out_free: - ir_input_unregister(input_dev); - input_free_device(input_dev); kfree(ir); return err; } @@ -409,7 +407,6 @@ int cx88_ir_fini(struct cx88_core *core) cx88_ir_stop(core, ir); ir_input_unregister(ir->input); - input_unregister_device(ir->input); kfree(ir); /* done */ diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c index 9dc4e142726b..af0d935c29be 100644 --- a/drivers/media/video/em28xx/em28xx-input.c +++ b/drivers/media/video/em28xx/em28xx-input.c @@ -393,8 +393,7 @@ int em28xx_ir_init(struct em28xx *dev) usb_make_path(dev->udev, ir->phys, sizeof(ir->phys)); strlcat(ir->phys, "/input0", sizeof(ir->phys)); - err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER, - dev->board.ir_codes); + err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER); if (err < 0) goto err_out_free; @@ -413,7 +412,7 @@ int em28xx_ir_init(struct em28xx *dev) em28xx_ir_start(ir); /* all done */ - err = input_register_device(ir->input); + err = ir_input_register(ir->input, dev->board.ir_codes); if (err) goto err_out_stop; @@ -422,8 +421,6 @@ int em28xx_ir_init(struct em28xx *dev) em28xx_ir_stop(ir); dev->ir = NULL; err_out_free: - ir_input_unregister(input_dev); - input_free_device(input_dev); kfree(ir); return err; } @@ -438,7 +435,6 @@ int em28xx_ir_fini(struct em28xx *dev) em28xx_ir_stop(ir); ir_input_unregister(ir->input); - input_unregister_device(ir->input); kfree(ir); /* done */ diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 9d8e78dc614e..b86e35386cee 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c @@ -438,7 +438,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_name(&client->dev)); /* init + register input device */ - err = ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes); + err = ir_input_init(input_dev, &ir->ir, ir_type); if (err < 0) goto err_out_free; @@ -446,7 +446,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) input_dev->name = ir->name; input_dev->phys = ir->phys; - err = input_register_device(ir->input); + err = ir_input_register(ir->input, ir->ir_codes); if (err) goto err_out_free; @@ -460,8 +460,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) return 0; err_out_free: - ir_input_unregister(input_dev); - input_free_device(input_dev); kfree(ir); return err; } @@ -475,7 +473,6 @@ static int ir_remove(struct i2c_client *client) /* unregister device */ ir_input_unregister(ir->input); - input_unregister_device(ir->input); /* free memory */ kfree(ir); diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c index 5093e7a0d58c..f8e985989ca0 100644 --- a/drivers/media/video/saa7134/saa7134-input.c +++ b/drivers/media/video/saa7134/saa7134-input.c @@ -708,7 +708,7 @@ int saa7134_input_init1(struct saa7134_dev *dev) snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(dev->pci)); - err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes); + err = ir_input_init(input_dev, &ir->ir, ir_type); if (err < 0) goto err_out_free; @@ -728,7 +728,7 @@ int saa7134_input_init1(struct saa7134_dev *dev) dev->remote = ir; saa7134_ir_start(dev, ir); - err = input_register_device(ir->dev); + err = ir_input_register(ir->dev, ir_codes); if (err) goto err_out_stop; @@ -742,8 +742,6 @@ int saa7134_input_init1(struct saa7134_dev *dev) saa7134_ir_stop(dev); dev->remote = NULL; err_out_free: - ir_input_unregister(input_dev); - input_free_device(input_dev); kfree(ir); return err; } @@ -755,7 +753,6 @@ void saa7134_input_fini(struct saa7134_dev *dev) saa7134_ir_stop(dev); ir_input_unregister(dev->remote->dev); - input_unregister_device(dev->remote->dev); kfree(dev->remote); dev->remote = NULL; } diff --git a/include/media/ir-common.h b/include/media/ir-common.h index ac8ced6bf3e2..2c6af24b905e 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h @@ -84,7 +84,7 @@ struct card_ir { /* Routines from ir-functions.c */ int ir_input_init(struct input_dev *dev, struct ir_input_state *ir, - int ir_type, struct ir_scancode_table *ir_codes); + int ir_type); void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir); void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir, u32 ir_key); -- cgit v1.2.3 From 2e535ed5a16b8cc23301f3d26cfd49f3091aadcc Mon Sep 17 00:00:00 2001 From: Muralidharan Karicheri Date: Thu, 10 Dec 2009 04:39:47 -0300 Subject: V4L/DVB (13618): v4l2: Adding helper function to get dv preset description This patch adds a helper function to get description of a digital video preset added by the video timing API. This will be useful for drivers implementing the above API. Signed-off-by: Muralidharan Karicheri Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/v4l2-common.c | 47 +++++++++++++++++++++++++++++++++++++++ include/media/v4l2-common.h | 2 +- 2 files changed, 48 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index e8e5affbabce..36b5cb86fb57 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -1024,3 +1024,50 @@ void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, } } EXPORT_SYMBOL_GPL(v4l_bound_align_image); + +/** + * v4l_fill_dv_preset_info - fill description of a digital video preset + * @preset - preset value + * @info - pointer to struct v4l2_dv_enum_preset + * + * drivers can use this helper function to fill description of dv preset + * in info. + */ +int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info) +{ + static const struct v4l2_dv_preset_info { + u16 width; + u16 height; + const char *name; + } dv_presets[] = { + { 0, 0, "Invalid" }, /* V4L2_DV_INVALID */ + { 720, 480, "480p@59.94" }, /* V4L2_DV_480P59_94 */ + { 720, 576, "576p@50" }, /* V4L2_DV_576P50 */ + { 1280, 720, "720p@24" }, /* V4L2_DV_720P24 */ + { 1280, 720, "720p@25" }, /* V4L2_DV_720P25 */ + { 1280, 720, "720p@30" }, /* V4L2_DV_720P30 */ + { 1280, 720, "720p@50" }, /* V4L2_DV_720P50 */ + { 1280, 720, "720p@59.94" }, /* V4L2_DV_720P59_94 */ + { 1280, 720, "720p@60" }, /* V4L2_DV_720P60 */ + { 1920, 1080, "1080i@29.97" }, /* V4L2_DV_1080I29_97 */ + { 1920, 1080, "1080i@30" }, /* V4L2_DV_1080I30 */ + { 1920, 1080, "1080i@25" }, /* V4L2_DV_1080I25 */ + { 1920, 1080, "1080i@50" }, /* V4L2_DV_1080I50 */ + { 1920, 1080, "1080i@60" }, /* V4L2_DV_1080I60 */ + { 1920, 1080, "1080p@24" }, /* V4L2_DV_1080P24 */ + { 1920, 1080, "1080p@25" }, /* V4L2_DV_1080P25 */ + { 1920, 1080, "1080p@30" }, /* V4L2_DV_1080P30 */ + { 1920, 1080, "1080p@50" }, /* V4L2_DV_1080P50 */ + { 1920, 1080, "1080p@60" }, /* V4L2_DV_1080P60 */ + }; + + if (info == NULL || preset >= ARRAY_SIZE(dv_presets)) + return -EINVAL; + + info->preset = preset; + info->width = dv_presets[preset].width; + info->height = dv_presets[preset].height; + strlcpy(info->name, dv_presets[preset].name, sizeof(info->name)); + return 0; +} +EXPORT_SYMBOL_GPL(v4l_fill_dv_preset_info); diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 1c25b10da34b..1c7b259f341c 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -212,5 +212,5 @@ void v4l_bound_align_image(unsigned int *w, unsigned int wmin, unsigned int *h, unsigned int hmin, unsigned int hmax, unsigned int halign, unsigned int salign); - +int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info); #endif /* V4L2_COMMON_H_ */ -- cgit v1.2.3 From bb5b7c11263dbbe78253cd05945a6bf8f55add8e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 15 Dec 2009 20:56:42 -0800 Subject: tcp: Revert per-route SACK/DSACK/TIMESTAMP changes. It creates a regression, triggering badness for SYN_RECV sockets, for example: [19148.022102] Badness at net/ipv4/inet_connection_sock.c:293 [19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000 [19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32) [19148.023496] MSR: 00029032 CR: 24002442 XER: 00000000 [19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000 This is likely caused by the change in the 'estab' parameter passed to tcp_parse_options() when invoked by the functions in net/ipv4/tcp_minisocks.c But even if that is fixed, the ->conn_request() changes made in this patch series is fundamentally wrong. They try to use the listening socket's 'dst' to probe the route settings. The listening socket doesn't even have a route, and you can't get the right route (the child request one) until much later after we setup all of the state, and it must be done by hand. This stuff really isn't ready, so the best thing to do is a full revert. This reverts the following commits: f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d 022c3f7d82f0f1c68018696f2f027b87b9bb45c2 1aba721eba1d84a2defce45b950272cee1e6c72a cda42ebd67ee5fdf09d7057b5a4584d36fe8a335 345cda2fd695534be5a4494f1b59da9daed33663 dc343475ed062e13fc260acccaab91d7d80fd5b2 05eaade2782fb0c90d3034fd7a7d5a16266182bb 6a2a2d6bf8581216e08be15fcb563cfd6c430e1e Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 6 ++---- include/net/dst.h | 2 +- include/net/tcp.h | 3 +-- net/ipv4/syncookies.c | 27 +++++++++++++-------------- net/ipv4/tcp_input.c | 24 ++++++++---------------- net/ipv4/tcp_ipv4.c | 21 +++++++++------------ net/ipv4/tcp_minisocks.c | 10 +++++----- net/ipv4/tcp_output.c | 18 +++++------------- net/ipv6/syncookies.c | 28 +++++++++++++--------------- net/ipv6/tcp_ipv6.c | 3 +-- 10 files changed, 58 insertions(+), 84 deletions(-) (limited to 'include') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 14fc906ed602..05330fc5b436 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -368,11 +368,9 @@ enum { #define RTAX_MAX (__RTAX_MAX - 1) #define RTAX_FEATURE_ECN 0x00000001 -#define RTAX_FEATURE_NO_SACK 0x00000002 -#define RTAX_FEATURE_NO_TSTAMP 0x00000004 +#define RTAX_FEATURE_SACK 0x00000002 +#define RTAX_FEATURE_TIMESTAMP 0x00000004 #define RTAX_FEATURE_ALLFRAG 0x00000008 -#define RTAX_FEATURE_NO_WSCALE 0x00000010 -#define RTAX_FEATURE_NO_DSACK 0x00000020 struct rta_session { __u8 proto; diff --git a/include/net/dst.h b/include/net/dst.h index 387cb3cfde7e..39c4a5963e12 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -113,7 +113,7 @@ dst_metric(const struct dst_entry *dst, int metric) static inline u32 dst_feature(const struct dst_entry *dst, u32 feature) { - return (dst ? dst_metric(dst, RTAX_FEATURES) & feature : 0); + return dst_metric(dst, RTAX_FEATURES) & feature; } static inline u32 dst_mtu(const struct dst_entry *dst) diff --git a/include/net/tcp.h b/include/net/tcp.h index 1b6f7d348cee..34f5cc24d903 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -408,8 +408,7 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, extern void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, u8 **hvpp, - int estab, - struct dst_entry *dst); + int estab); extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 26399ad2a289..66fd80ef2473 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); + /* check for timestamp cookie support */ + memset(&tcp_opt, 0, sizeof(tcp_opt)); + tcp_parse_options(skb, &tcp_opt, &hash_location, 0); + + if (tcp_opt.saw_tstamp) + cookie_check_timestamp(&tcp_opt); + ret = NULL; req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ if (!req) @@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->ecn_ok = 0; + ireq->snd_wscale = tcp_opt.snd_wscale; + ireq->rcv_wscale = tcp_opt.rcv_wscale; + ireq->sack_ok = tcp_opt.sack_ok; + ireq->wscale_ok = tcp_opt.wscale_ok; + ireq->tstamp_ok = tcp_opt.saw_tstamp; + req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) @@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, } } - /* check for timestamp cookie support */ - memset(&tcp_opt, 0, sizeof(tcp_opt)); - tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst); - - if (tcp_opt.saw_tstamp) - cookie_check_timestamp(&tcp_opt); - - ireq->snd_wscale = tcp_opt.snd_wscale; - ireq->rcv_wscale = tcp_opt.rcv_wscale; - ireq->sack_ok = tcp_opt.sack_ok; - ireq->wscale_ok = tcp_opt.wscale_ok; - ireq->tstamp_ok = tcp_opt.saw_tstamp; - req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; - /* Try to redo what tcp_v4_send_synack did. */ req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 12cab7d74dba..28e029632493 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3727,7 +3727,7 @@ old_ack: * the fast version below fails. */ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, - u8 **hvpp, int estab, struct dst_entry *dst) + u8 **hvpp, int estab) { unsigned char *ptr; struct tcphdr *th = tcp_hdr(skb); @@ -3766,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && - !estab && sysctl_tcp_window_scaling && - !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) { + !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { @@ -3783,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || - (!estab && sysctl_tcp_timestamps && - !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) { + (!estab && sysctl_tcp_timestamps))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); @@ -3792,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && - !estab && sysctl_tcp_sack && - !dst_feature(dst, RTAX_FEATURE_NO_SACK)) { + !estab && sysctl_tcp_sack) { opt_rx->sack_ok = 1; tcp_sack_reset(opt_rx); } @@ -3878,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, if (tcp_parse_aligned_timestamp(tp, th)) return 1; } - tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); + tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); return 1; } @@ -4133,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); - if (tcp_is_sack(tp) && sysctl_tcp_dsack && - !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { int mib_idx; if (before(seq, tp->rcv_nxt)) @@ -4165,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); - if (tcp_is_sack(tp) && sysctl_tcp_dsack && - !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) @@ -5428,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, u8 *hash_location; struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); struct tcp_cookie_values *cvp = tp->cookie_values; int saved_clamp = tp->rx_opt.mss_clamp; - tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); + tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); if (th->ack) { /* rfc793: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 15e96030ce47..65b8ebfd078a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; #endif - ireq = inet_rsk(req); - ireq->loc_addr = daddr; - ireq->rmt_addr = saddr; - ireq->no_srccheck = inet_sk(sk)->transparent; - ireq->opt = tcp_v4_save_options(sk, skb); - - dst = inet_csk_route_req(sk, req); - if(!dst) - goto drop_and_free; - tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = TCP_MSS_DEFAULT; tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.cookie_plus > 0 && tmp_opt.saw_tstamp && @@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb); + ireq = inet_rsk(req); + ireq->loc_addr = daddr; + ireq->rmt_addr = saddr; + ireq->no_srccheck = inet_sk(sk)->transparent; + ireq->opt = tcp_v4_save_options(sk, skb); + if (security_inet_conn_request(sk, skb, req)) - goto drop_and_release; + goto drop_and_free; if (!want_cookie) TCP_ECN_create_request(req, tcp_hdr(skb)); @@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) */ if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle && + (dst = inet_csk_route_req(sk, req)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL && peer->v4daddr == saddr) { if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 87accec8d097..f206ee5dda80 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); int paws_reject = 0; + tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { - tmp_opt.tstamp_ok = 1; - tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = tcptw->tw_ts_recent; @@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; - if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { - tmp_opt.tstamp_ok = 1; - tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); + tmp_opt.saw_tstamp = 0; + if (th->doff > (sizeof(struct tcphdr)>>2)) { + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = req->ts_recent; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 93316a96d820..383ce237640f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; - struct dst_entry *dst = __sk_dst_get(sk); unsigned remaining = MAX_TCP_OPTION_SPACE; u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? tcp_cookie_size_check(cvp->cookie_desired) : @@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, opts->mss = tcp_advertise_mss(sk); remaining -= TCPOLEN_MSS_ALIGNED; - if (likely(sysctl_tcp_timestamps && - !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) && - *md5 == NULL)) { + if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { opts->options |= OPTION_TS; opts->tsval = TCP_SKB_CB(skb)->when; opts->tsecr = tp->rx_opt.ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } - if (likely(sysctl_tcp_window_scaling && - !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) { + if (likely(sysctl_tcp_window_scaling)) { opts->ws = tp->rx_opt.rcv_wscale; opts->options |= OPTION_WSCALE; remaining -= TCPOLEN_WSCALE_ALIGNED; } - if (likely(sysctl_tcp_sack && - !dst_feature(dst, RTAX_FEATURE_NO_SACK))) { + if (likely(sysctl_tcp_sack)) { opts->options |= OPTION_SACK_ADVERTISE; if (unlikely(!(OPTION_TS & opts->options))) remaining -= TCPOLEN_SACKPERM_ALIGNED; @@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk) * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. */ tp->tcp_header_len = sizeof(struct tcphdr) + - (sysctl_tcp_timestamps && - (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ? - TCPOLEN_TSTAMP_ALIGNED : 0)); + (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); #ifdef CONFIG_TCP_MD5SIG if (tp->af_specific->md5_lookup(sk, sk) != NULL) @@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk) tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, &tp->window_clamp, - (sysctl_tcp_window_scaling && - !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)), + sysctl_tcp_window_scaling, &rcv_wscale); tp->rx_opt.rcv_wscale = rcv_wscale; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5b9af508b8f2..7208a06576c6 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); + /* check for timestamp cookie support */ + memset(&tcp_opt, 0, sizeof(tcp_opt)); + tcp_parse_options(skb, &tcp_opt, &hash_location, 0); + + if (tcp_opt.saw_tstamp) + cookie_check_timestamp(&tcp_opt); + ret = NULL; req = inet6_reqsk_alloc(&tcp6_request_sock_ops); if (!req) @@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) req->expires = 0UL; req->retrans = 0; ireq->ecn_ok = 0; + ireq->snd_wscale = tcp_opt.snd_wscale; + ireq->rcv_wscale = tcp_opt.rcv_wscale; + ireq->sack_ok = tcp_opt.sack_ok; + ireq->wscale_ok = tcp_opt.wscale_ok; + ireq->tstamp_ok = tcp_opt.saw_tstamp; + req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; @@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out_free; } - /* check for timestamp cookie support */ - memset(&tcp_opt, 0, sizeof(tcp_opt)); - tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst); - - if (tcp_opt.saw_tstamp) - cookie_check_timestamp(&tcp_opt); - - req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; - - ireq->snd_wscale = tcp_opt.snd_wscale; - ireq->rcv_wscale = tcp_opt.rcv_wscale; - ireq->sack_ok = tcp_opt.sack_ok; - ireq->wscale_ok = tcp_opt.wscale_ok; - ireq->tstamp_ok = tcp_opt.saw_tstamp; - req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, &req->rcv_wnd, &req->window_clamp, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ee9cf62458d4..febfd595a40d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) struct inet6_request_sock *treq; struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct dst_entry *dst = __sk_dst_get(sk); __u32 isn = TCP_SKB_CB(skb)->when; #ifdef CONFIG_SYN_COOKIES int want_cookie = 0; @@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); + tcp_parse_options(skb, &tmp_opt, &hash_location, 0); if (tmp_opt.cookie_plus > 0 && tmp_opt.saw_tstamp && -- cgit v1.2.3 From 588f9ce6ca61ecb4663ee6ef2f75d2d96c73151e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Dec 2009 12:19:57 +0100 Subject: HWPOISON: Be more aggressive at freeing non LRU caches shake_page handles more types of page caches than lru_drain_all() - per cpu page allocator pages - per CPU LRU Stops early when the page became free. Used in followon patches. Signed-off-by: Andi Kleen --- include/linux/mm.h | 1 + mm/memory-failure.c | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 9d65ae4ba0e0..68c84bb2ad3f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1335,6 +1335,7 @@ extern void memory_failure(unsigned long pfn, int trapno); extern int __memory_failure(unsigned long pfn, int trapno, int ref); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; +extern void shake_page(struct page *p); extern atomic_long_t mce_bad_pages; #endif /* __KERNEL__ */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 50d4f8d7024a..38fcbb22eab9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -82,6 +82,28 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, return ret; } +/* + * When a unknown page type is encountered drain as many buffers as possible + * in the hope to turn the page into a LRU or free page, which we can handle. + */ +void shake_page(struct page *p) +{ + if (!PageSlab(p)) { + lru_add_drain_all(); + if (PageLRU(p)) + return; + drain_all_pages(); + if (PageLRU(p) || is_free_buddy_page(p)) + return; + } + /* + * Could call shrink_slab here (which would also + * shrink other caches). Unfortunately that might + * also access the corrupted page, which could be fatal. + */ +} +EXPORT_SYMBOL_GPL(shake_page); + /* * Kill all processes that have a poisoned page mapped and then isolate * the page. -- cgit v1.2.3 From 82ba011b9041dd31c15e4f63797b08aa0a288e61 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Dec 2009 12:19:57 +0100 Subject: HWPOISON: Turn ref argument into flags argument Now that "ref" is just a boolean turn it into a flags argument. First step is only a single flag that makes the code's intention more clear, but more may follow. Signed-off-by: Andi Kleen --- include/linux/mm.h | 5 ++++- mm/madvise.c | 2 +- mm/memory-failure.c | 5 +++-- 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 68c84bb2ad3f..135e19198cd3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1331,8 +1331,11 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, size_t size); extern void refund_locked_memory(struct mm_struct *mm, size_t size); +enum mf_flags { + MF_COUNT_INCREASED = 1 << 0, +}; extern void memory_failure(unsigned long pfn, int trapno); -extern int __memory_failure(unsigned long pfn, int trapno, int ref); +extern int __memory_failure(unsigned long pfn, int trapno, int flags); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p); diff --git a/mm/madvise.c b/mm/madvise.c index 18970aec0d2f..6ca34f0cd4aa 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -237,7 +237,7 @@ static int madvise_hwpoison(unsigned long start, unsigned long end) printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", page_to_pfn(p), start); /* Ignore return value for now */ - __memory_failure(page_to_pfn(p), 0, 1); + __memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); } return ret; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4253e14fa709..3338c443272c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -737,7 +737,7 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn, ret != SWAP_SUCCESS, pfn); } -int __memory_failure(unsigned long pfn, int trapno, int ref) +int __memory_failure(unsigned long pfn, int trapno, int flags) { unsigned long lru_flag; struct page_state *ps; @@ -773,7 +773,8 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) * In fact it's dangerous to directly bump up page count from 0, * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. */ - if (!ref && !get_page_unless_zero(compound_head(p))) { + if (!(flags & MF_COUNT_INCREASED) && + !get_page_unless_zero(compound_head(p))) { action_result(pfn, "free or high order kernel", IGNORED); return PageBuddy(compound_head(p)) ? 0 : -EBUSY; } -- cgit v1.2.3 From 847ce401df392b0704369fd3f75df614ac1414b4 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 16 Dec 2009 12:19:58 +0100 Subject: HWPOISON: Add unpoisoning support The unpoisoning interface is useful for stress testing tools to reclaim poisoned pages (to prevent OOM) There is no hardware level unpoisioning, so this cannot be used for real memory errors, only for software injected errors. Note that it may leak pages silently - those who have been removed from LRU cache, but not isolated from page cache/swap cache at hwpoison time. Especially the stress test of dirty swap cache pages shall reboot system before exhausting memory. AK: Fix comments, add documentation, add printks, rename symbol Signed-off-by: Wu Fengguang Signed-off-by: Andi Kleen --- Documentation/vm/hwpoison.txt | 16 ++++++++-- include/linux/mm.h | 1 + include/linux/page-flags.h | 2 +- mm/hwpoison-inject.c | 36 +++++++++++++++++++---- mm/memory-failure.c | 68 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 114 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt index 3ffadf8da61f..f047e75acb23 100644 --- a/Documentation/vm/hwpoison.txt +++ b/Documentation/vm/hwpoison.txt @@ -98,10 +98,22 @@ madvise(MADV_POISON, ....) hwpoison-inject module through debugfs - /sys/debug/hwpoison/corrupt-pfn -Inject hwpoison fault at PFN echoed into this file +/sys/debug/hwpoison/ +corrupt-pfn + +Inject hwpoison fault at PFN echoed into this file. + +unpoison-pfn + +Software-unpoison page at PFN echoed into this file. This +way a page can be reused again. +This only works for Linux injected failures, not for real +memory failures. + +Note these injection interfaces are not stable and might change between +kernel versions Architecture specific MCE injector diff --git a/include/linux/mm.h b/include/linux/mm.h index 135e19198cd3..8cdb941fc7b5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1336,6 +1336,7 @@ enum mf_flags { }; extern void memory_failure(unsigned long pfn, int trapno); extern int __memory_failure(unsigned long pfn, int trapno, int flags); +extern int unpoison_memory(unsigned long pfn); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 49e907bd067f..f9df6308af95 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -275,7 +275,7 @@ PAGEFLAG_FALSE(Uncached) #ifdef CONFIG_MEMORY_FAILURE PAGEFLAG(HWPoison, hwpoison) -TESTSETFLAG(HWPoison, hwpoison) +TESTSCFLAG(HWPoison, hwpoison) #define __PG_HWPOISON (1UL << PG_hwpoison) #else PAGEFLAG_FALSE(HWPoison) diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index e1d85137f086..6e35e563bf50 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -4,7 +4,7 @@ #include #include -static struct dentry *hwpoison_dir, *corrupt_pfn; +static struct dentry *hwpoison_dir; static int hwpoison_inject(void *data, u64 val) { @@ -14,7 +14,16 @@ static int hwpoison_inject(void *data, u64 val) return __memory_failure(val, 18, 0); } +static int hwpoison_unpoison(void *data, u64 val) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return unpoison_memory(val); +} + DEFINE_SIMPLE_ATTRIBUTE(hwpoison_fops, NULL, hwpoison_inject, "%lli\n"); +DEFINE_SIMPLE_ATTRIBUTE(unpoison_fops, NULL, hwpoison_unpoison, "%lli\n"); static void pfn_inject_exit(void) { @@ -24,16 +33,31 @@ static void pfn_inject_exit(void) static int pfn_inject_init(void) { + struct dentry *dentry; + hwpoison_dir = debugfs_create_dir("hwpoison", NULL); if (hwpoison_dir == NULL) return -ENOMEM; - corrupt_pfn = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir, + + /* + * Note that the below poison/unpoison interfaces do not involve + * hardware status change, hence do not require hardware support. + * They are mainly for testing hwpoison in software level. + */ + dentry = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir, NULL, &hwpoison_fops); - if (corrupt_pfn == NULL) { - pfn_inject_exit(); - return -ENOMEM; - } + if (!dentry) + goto fail; + + dentry = debugfs_create_file("unpoison-pfn", 0600, hwpoison_dir, + NULL, &unpoison_fops); + if (!dentry) + goto fail; + return 0; +fail: + pfn_inject_exit(); + return -ENOMEM; } module_init(pfn_inject_init); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5055b940df5f..ed6e91c87a54 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -838,6 +838,16 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) * and in many cases impossible, so we just avoid it here. */ lock_page_nosync(p); + + /* + * unpoison always clear PG_hwpoison inside page lock + */ + if (!PageHWPoison(p)) { + action_result(pfn, "unpoisoned", IGNORED); + res = 0; + goto out; + } + wait_on_page_writeback(p); /* @@ -893,3 +903,61 @@ void memory_failure(unsigned long pfn, int trapno) { __memory_failure(pfn, trapno, 0); } + +/** + * unpoison_memory - Unpoison a previously poisoned page + * @pfn: Page number of the to be unpoisoned page + * + * Software-unpoison a page that has been poisoned by + * memory_failure() earlier. + * + * This is only done on the software-level, so it only works + * for linux injected failures, not real hardware failures + * + * Returns 0 for success, otherwise -errno. + */ +int unpoison_memory(unsigned long pfn) +{ + struct page *page; + struct page *p; + int freeit = 0; + + if (!pfn_valid(pfn)) + return -ENXIO; + + p = pfn_to_page(pfn); + page = compound_head(p); + + if (!PageHWPoison(p)) { + pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn); + return 0; + } + + if (!get_page_unless_zero(page)) { + if (TestClearPageHWPoison(p)) + atomic_long_dec(&mce_bad_pages); + pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn); + return 0; + } + + lock_page_nosync(page); + /* + * This test is racy because PG_hwpoison is set outside of page lock. + * That's acceptable because that won't trigger kernel panic. Instead, + * the PG_hwpoison page will be caught and isolated on the entrance to + * the free buddy page pool. + */ + if (TestClearPageHWPoison(p)) { + pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn); + atomic_long_dec(&mce_bad_pages); + freeit = 1; + } + unlock_page(page); + + put_page(page); + if (freeit) + put_page(page); + + return 0; +} +EXPORT_SYMBOL(unpoison_memory); -- cgit v1.2.3 From 1a9b5b7fe0c5dad8a635288882d36785dea742f9 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 16 Dec 2009 12:19:59 +0100 Subject: mm: export stable page flags Rename get_uflags() to stable_page_flags() and make it a global function for use in the hwpoison page flags filter, which need to compare user page flags with the value provided by user space. Also move KPF_* to kernel-page-flags.h for use by user space tools. Acked-by: Matt Mackall Signed-off-by: Andi Kleen CC: Nick Piggin CC: Christoph Lameter Signed-off-by: Wu Fengguang Signed-off-by: Andi Kleen --- fs/proc/page.c | 45 +++----------------------------------- include/linux/kernel-page-flags.h | 46 +++++++++++++++++++++++++++++++++++++++ include/linux/page-flags.h | 2 ++ 3 files changed, 51 insertions(+), 42 deletions(-) create mode 100644 include/linux/kernel-page-flags.h (limited to 'include') diff --git a/fs/proc/page.c b/fs/proc/page.c index 5033ce0d254b..180cf5a0bd67 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "internal.h" @@ -71,52 +72,12 @@ static const struct file_operations proc_kpagecount_operations = { * physical page flags. */ -/* These macros are used to decouple internal flags from exported ones */ - -#define KPF_LOCKED 0 -#define KPF_ERROR 1 -#define KPF_REFERENCED 2 -#define KPF_UPTODATE 3 -#define KPF_DIRTY 4 -#define KPF_LRU 5 -#define KPF_ACTIVE 6 -#define KPF_SLAB 7 -#define KPF_WRITEBACK 8 -#define KPF_RECLAIM 9 -#define KPF_BUDDY 10 - -/* 11-20: new additions in 2.6.31 */ -#define KPF_MMAP 11 -#define KPF_ANON 12 -#define KPF_SWAPCACHE 13 -#define KPF_SWAPBACKED 14 -#define KPF_COMPOUND_HEAD 15 -#define KPF_COMPOUND_TAIL 16 -#define KPF_HUGE 17 -#define KPF_UNEVICTABLE 18 -#define KPF_HWPOISON 19 -#define KPF_NOPAGE 20 - -#define KPF_KSM 21 - -/* kernel hacking assistances - * WARNING: subject to change, never rely on them! - */ -#define KPF_RESERVED 32 -#define KPF_MLOCKED 33 -#define KPF_MAPPEDTODISK 34 -#define KPF_PRIVATE 35 -#define KPF_PRIVATE_2 36 -#define KPF_OWNER_PRIVATE 37 -#define KPF_ARCH 38 -#define KPF_UNCACHED 39 - static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) { return ((kflags >> kbit) & 1) << ubit; } -static u64 get_uflags(struct page *page) +u64 stable_page_flags(struct page *page) { u64 k; u64 u; @@ -219,7 +180,7 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, else ppage = NULL; - if (put_user(get_uflags(ppage), out)) { + if (put_user(stable_page_flags(ppage), out)) { ret = -EFAULT; break; } diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h new file mode 100644 index 000000000000..bd92a89f4b0a --- /dev/null +++ b/include/linux/kernel-page-flags.h @@ -0,0 +1,46 @@ +#ifndef LINUX_KERNEL_PAGE_FLAGS_H +#define LINUX_KERNEL_PAGE_FLAGS_H + +/* + * Stable page flag bits exported to user space + */ + +#define KPF_LOCKED 0 +#define KPF_ERROR 1 +#define KPF_REFERENCED 2 +#define KPF_UPTODATE 3 +#define KPF_DIRTY 4 +#define KPF_LRU 5 +#define KPF_ACTIVE 6 +#define KPF_SLAB 7 +#define KPF_WRITEBACK 8 +#define KPF_RECLAIM 9 +#define KPF_BUDDY 10 + +/* 11-20: new additions in 2.6.31 */ +#define KPF_MMAP 11 +#define KPF_ANON 12 +#define KPF_SWAPCACHE 13 +#define KPF_SWAPBACKED 14 +#define KPF_COMPOUND_HEAD 15 +#define KPF_COMPOUND_TAIL 16 +#define KPF_HUGE 17 +#define KPF_UNEVICTABLE 18 +#define KPF_HWPOISON 19 +#define KPF_NOPAGE 20 + +#define KPF_KSM 21 + +/* kernel hacking assistances + * WARNING: subject to change, never rely on them! + */ +#define KPF_RESERVED 32 +#define KPF_MLOCKED 33 +#define KPF_MAPPEDTODISK 34 +#define KPF_PRIVATE 35 +#define KPF_PRIVATE_2 36 +#define KPF_OWNER_PRIVATE 37 +#define KPF_ARCH 38 +#define KPF_UNCACHED 39 + +#endif /* LINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f9df6308af95..feee2ba8d06a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -282,6 +282,8 @@ PAGEFLAG_FALSE(HWPoison) #define __PG_HWPOISON 0 #endif +u64 stable_page_flags(struct page *page); + static inline int PageUptodate(struct page *page) { int ret = test_bit(PG_uptodate, &(page)->flags); -- cgit v1.2.3 From e42d9d5d47961fb5db0be65b56dd52fe7b2421f1 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 16 Dec 2009 12:19:59 +0100 Subject: memcg: rename and export try_get_mem_cgroup_from_page() So that the hwpoison injector can get mem_cgroup for arbitrary page and thus know whether it is owned by some mem_cgroup task(s). [AK: Merged with latest git tree] CC: KOSAKI Motohiro CC: Hugh Dickins CC: Daisuke Nishimura CC: Balbir Singh Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Wu Fengguang Signed-off-by: Andi Kleen --- include/linux/memcontrol.h | 6 ++++++ mm/memcontrol.c | 11 ++++------- 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bf9213b2db8f..fc9bae82ac42 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -68,6 +68,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); +extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); static inline @@ -189,6 +190,11 @@ mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) { } +static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) +{ + return NULL; +} + static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) { return 1; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e0c2066495e3..b5ac61ce7346 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1379,25 +1379,22 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) return container_of(css, struct mem_cgroup, css); } -static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page) +struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) { - struct mem_cgroup *mem; + struct mem_cgroup *mem = NULL; struct page_cgroup *pc; unsigned short id; swp_entry_t ent; VM_BUG_ON(!PageLocked(page)); - if (!PageSwapCache(page)) - return NULL; - pc = lookup_page_cgroup(page); lock_page_cgroup(pc); if (PageCgroupUsed(pc)) { mem = pc->mem_cgroup; if (mem && !css_tryget(&mem->css)) mem = NULL; - } else { + } else if (PageSwapCache(page)) { ent.val = page_private(page); id = lookup_swap_cgroup(ent); rcu_read_lock(); @@ -1743,7 +1740,7 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, */ if (!PageSwapCache(page)) goto charge_cur_mm; - mem = try_get_mem_cgroup_from_swapcache(page); + mem = try_get_mem_cgroup_from_page(page); if (!mem) goto charge_cur_mm; *ptr = mem; -- cgit v1.2.3 From d324236b3333e87c8825b35f2104184734020d35 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 16 Dec 2009 12:19:59 +0100 Subject: memcg: add accessor to mem_cgroup.css So that an outside user can free the reference count grabbed by try_get_mem_cgroup_from_page(). CC: KOSAKI Motohiro CC: Hugh Dickins CC: Daisuke Nishimura CC: Balbir Singh Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Wu Fengguang Signed-off-by: Andi Kleen --- include/linux/memcontrol.h | 7 +++++++ mm/memcontrol.c | 5 +++++ 2 files changed, 12 insertions(+) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index fc9bae82ac42..2c30a1116d84 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -81,6 +81,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) return cgroup == mem; } +extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); + extern int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); extern void mem_cgroup_end_migration(struct mem_cgroup *mem, @@ -206,6 +208,11 @@ static inline int task_in_mem_cgroup(struct task_struct *task, return 1; } +static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) +{ + return NULL; +} + static inline int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b5ac61ce7346..9eee80d6d490 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -282,6 +282,11 @@ mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) return &mem->info.nodeinfo[nid]->zoneinfo[zid]; } +struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) +{ + return &mem->css; +} + static struct mem_cgroup_per_zone * page_cgroup_zoneinfo(struct page_cgroup *pc) { -- cgit v1.2.3 From facb6011f3993947283fa15d039dacb4ad140230 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Dec 2009 12:20:00 +0100 Subject: HWPOISON: Add soft page offline support This is a simpler, gentler variant of memory_failure() for soft page offlining controlled from user space. It doesn't kill anything, just tries to invalidate and if that doesn't work migrate the page away. This is useful for predictive failure analysis, where a page has a high rate of corrected errors, but hasn't gone bad yet. Instead it can be offlined early and avoided. The offlining is controlled from sysfs, including a new generic entry point for hard page offlining for symmetry too. We use the page isolate facility to prevent re-allocation race. Normally this is only used by memory hotplug. To avoid races with memory allocation I am using lock_system_sleep(). This avoids the situation where memory hotplug is about to isolate a page range and then hwpoison undoes that work. This is a big hammer currently, but the simplest solution currently. When the page is not free or LRU we try to free pages from slab and other caches. The slab freeing is currently quite dumb and does not try to focus on the specific slab cache which might own the page. This could be potentially improved later. Thanks to Fengguang Wu and Haicheng Li for some fixes. [Added fix from Andrew Morton to adapt to new migrate_pages prototype] Signed-off-by: Andi Kleen --- .../ABI/testing/sysfs-memory-page-offline | 44 +++++ drivers/base/memory.c | 61 +++++++ include/linux/mm.h | 3 +- mm/hwpoison-inject.c | 2 +- mm/memory-failure.c | 194 ++++++++++++++++++++- 5 files changed, 297 insertions(+), 7 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-memory-page-offline (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-memory-page-offline b/Documentation/ABI/testing/sysfs-memory-page-offline new file mode 100644 index 000000000000..e14703f12fdf --- /dev/null +++ b/Documentation/ABI/testing/sysfs-memory-page-offline @@ -0,0 +1,44 @@ +What: /sys/devices/system/memory/soft_offline_page +Date: Sep 2009 +KernelVersion: 2.6.33 +Contact: andi@firstfloor.org +Description: + Soft-offline the memory page containing the physical address + written into this file. Input is a hex number specifying the + physical address of the page. The kernel will then attempt + to soft-offline it, by moving the contents elsewhere or + dropping it if possible. The kernel will then be placed + on the bad page list and never be reused. + + The offlining is done in kernel specific granuality. + Normally it's the base page size of the kernel, but + this might change. + + The page must be still accessible, not poisoned. The + kernel will never kill anything for this, but rather + fail the offline. Return value is the size of the + number, or a error when the offlining failed. Reading + the file is not allowed. + +What: /sys/devices/system/memory/hard_offline_page +Date: Sep 2009 +KernelVersion: 2.6.33 +Contact: andi@firstfloor.org +Description: + Hard-offline the memory page containing the physical + address written into this file. Input is a hex number + specifying the physical address of the page. The + kernel will then attempt to hard-offline the page, by + trying to drop the page or killing any owner or + triggering IO errors if needed. Note this may kill + any processes owning the page. The kernel will avoid + to access this page assuming it's poisoned by the + hardware. + + The offlining is done in kernel specific granuality. + Normally it's the base page size of the kernel, but + this might change. + + Return value is the size of the number, or a error when + the offlining failed. + Reading the file is not allowed. diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 989429cfed88..c4c8f2e1dd15 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -341,6 +341,64 @@ static inline int memory_probe_init(void) } #endif +#ifdef CONFIG_MEMORY_FAILURE +/* + * Support for offlining pages of memory + */ + +/* Soft offline a page */ +static ssize_t +store_soft_offline_page(struct class *class, const char *buf, size_t count) +{ + int ret; + u64 pfn; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (strict_strtoull(buf, 0, &pfn) < 0) + return -EINVAL; + pfn >>= PAGE_SHIFT; + if (!pfn_valid(pfn)) + return -ENXIO; + ret = soft_offline_page(pfn_to_page(pfn), 0); + return ret == 0 ? count : ret; +} + +/* Forcibly offline a page, including killing processes. */ +static ssize_t +store_hard_offline_page(struct class *class, const char *buf, size_t count) +{ + int ret; + u64 pfn; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (strict_strtoull(buf, 0, &pfn) < 0) + return -EINVAL; + pfn >>= PAGE_SHIFT; + ret = __memory_failure(pfn, 0, 0); + return ret ? ret : count; +} + +static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); +static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); + +static __init int memory_fail_init(void) +{ + int err; + + err = sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_soft_offline_page.attr); + if (!err) + err = sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_hard_offline_page.attr); + return err; +} +#else +static inline int memory_fail_init(void) +{ + return 0; +} +#endif + /* * Note that phys_device is optional. It is here to allow for * differentiation between which *physical* devices each @@ -471,6 +529,9 @@ int __init memory_dev_init(void) } err = memory_probe_init(); + if (!ret) + ret = err; + err = memory_fail_init(); if (!ret) ret = err; err = block_size_init(); diff --git a/include/linux/mm.h b/include/linux/mm.h index 8cdb941fc7b5..849b4a61bd8f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1339,8 +1339,9 @@ extern int __memory_failure(unsigned long pfn, int trapno, int flags); extern int unpoison_memory(unsigned long pfn); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; -extern void shake_page(struct page *p); +extern void shake_page(struct page *p, int access); extern atomic_long_t mce_bad_pages; +extern int soft_offline_page(struct page *page, int flags); #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index c597f46ac18a..a77fe3f9e211 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -29,7 +29,7 @@ static int hwpoison_inject(void *data, u64 val) return 0; if (!PageLRU(p)) - shake_page(p); + shake_page(p, 0); /* * This implies unable to support non-LRU pages. */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b5c3b6bd511f..bcce28755832 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -41,6 +41,9 @@ #include #include #include +#include +#include +#include #include "internal.h" int sysctl_memory_failure_early_kill __read_mostly = 0; @@ -201,7 +204,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, * When a unknown page type is encountered drain as many buffers as possible * in the hope to turn the page into a LRU or free page, which we can handle. */ -void shake_page(struct page *p) +void shake_page(struct page *p, int access) { if (!PageSlab(p)) { lru_add_drain_all(); @@ -211,11 +214,19 @@ void shake_page(struct page *p) if (PageLRU(p) || is_free_buddy_page(p)) return; } + /* - * Could call shrink_slab here (which would also - * shrink other caches). Unfortunately that might - * also access the corrupted page, which could be fatal. + * Only all shrink_slab here (which would also + * shrink other caches) if access is not potentially fatal. */ + if (access) { + int nr; + do { + nr = shrink_slab(1000, GFP_KERNEL, 1000); + if (page_count(p) == 0) + break; + } while (nr > 10); + } } EXPORT_SYMBOL_GPL(shake_page); @@ -949,7 +960,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) * walked by the page reclaim code, however that's not a big loss. */ if (!PageLRU(p)) - shake_page(p); + shake_page(p, 0); if (!PageLRU(p)) { /* * shake_page could have turned it free. @@ -1099,3 +1110,176 @@ int unpoison_memory(unsigned long pfn) return 0; } EXPORT_SYMBOL(unpoison_memory); + +static struct page *new_page(struct page *p, unsigned long private, int **x) +{ + return alloc_pages(GFP_HIGHUSER_MOVABLE, 0); +} + +/* + * Safely get reference count of an arbitrary page. + * Returns 0 for a free page, -EIO for a zero refcount page + * that is not free, and 1 for any other page type. + * For 1 the page is returned with increased page count, otherwise not. + */ +static int get_any_page(struct page *p, unsigned long pfn, int flags) +{ + int ret; + + if (flags & MF_COUNT_INCREASED) + return 1; + + /* + * The lock_system_sleep prevents a race with memory hotplug, + * because the isolation assumes there's only a single user. + * This is a big hammer, a better would be nicer. + */ + lock_system_sleep(); + + /* + * Isolate the page, so that it doesn't get reallocated if it + * was free. + */ + set_migratetype_isolate(p); + if (!get_page_unless_zero(compound_head(p))) { + if (is_free_buddy_page(p)) { + pr_debug("get_any_page: %#lx free buddy page\n", pfn); + /* Set hwpoison bit while page is still isolated */ + SetPageHWPoison(p); + ret = 0; + } else { + pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n", + pfn, p->flags); + ret = -EIO; + } + } else { + /* Not a free page */ + ret = 1; + } + unset_migratetype_isolate(p); + unlock_system_sleep(); + return ret; +} + +/** + * soft_offline_page - Soft offline a page. + * @page: page to offline + * @flags: flags. Same as memory_failure(). + * + * Returns 0 on success, otherwise negated errno. + * + * Soft offline a page, by migration or invalidation, + * without killing anything. This is for the case when + * a page is not corrupted yet (so it's still valid to access), + * but has had a number of corrected errors and is better taken + * out. + * + * The actual policy on when to do that is maintained by + * user space. + * + * This should never impact any application or cause data loss, + * however it might take some time. + * + * This is not a 100% solution for all memory, but tries to be + * ``good enough'' for the majority of memory. + */ +int soft_offline_page(struct page *page, int flags) +{ + int ret; + unsigned long pfn = page_to_pfn(page); + + ret = get_any_page(page, pfn, flags); + if (ret < 0) + return ret; + if (ret == 0) + goto done; + + /* + * Page cache page we can handle? + */ + if (!PageLRU(page)) { + /* + * Try to free it. + */ + put_page(page); + shake_page(page, 1); + + /* + * Did it turn free? + */ + ret = get_any_page(page, pfn, 0); + if (ret < 0) + return ret; + if (ret == 0) + goto done; + } + if (!PageLRU(page)) { + pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n", + pfn, page->flags); + return -EIO; + } + + lock_page(page); + wait_on_page_writeback(page); + + /* + * Synchronized using the page lock with memory_failure() + */ + if (PageHWPoison(page)) { + unlock_page(page); + put_page(page); + pr_debug("soft offline: %#lx page already poisoned\n", pfn); + return -EBUSY; + } + + /* + * Try to invalidate first. This should work for + * non dirty unmapped page cache pages. + */ + ret = invalidate_inode_page(page); + unlock_page(page); + + /* + * Drop count because page migration doesn't like raised + * counts. The page could get re-allocated, but if it becomes + * LRU the isolation will just fail. + * RED-PEN would be better to keep it isolated here, but we + * would need to fix isolation locking first. + */ + put_page(page); + if (ret == 1) { + ret = 0; + pr_debug("soft_offline: %#lx: invalidated\n", pfn); + goto done; + } + + /* + * Simple invalidation didn't work. + * Try to migrate to a new page instead. migrate.c + * handles a large number of cases for us. + */ + ret = isolate_lru_page(page); + if (!ret) { + LIST_HEAD(pagelist); + + list_add(&page->lru, &pagelist); + ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); + if (ret) { + pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", + pfn, ret, page->flags); + if (ret > 0) + ret = -EIO; + } + } else { + pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", + pfn, ret, page_count(page), page->flags); + } + if (ret) + return ret; + +done: + atomic_long_add(1, &mce_bad_pages); + SetPageHWPoison(page); + /* keep elevated page count for bad page */ + return ret; +} -- cgit v1.2.3 From afcf938ee0aac4ef95b1a23bac704c6fbeb26de6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 16 Dec 2009 12:20:00 +0100 Subject: HWPOISON: Add a madvise() injector for soft page offlining Process based injection is much easier to handle for test programs, who can first bring a page into a specific state and then test. So add a new MADV_SOFT_OFFLINE to soft offline a page, similar to the existing hard offline injector. Signed-off-by: Andi Kleen --- include/asm-generic/mman-common.h | 1 + mm/madvise.c | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h index 20111265afd8..3da9e2742fa0 100644 --- a/include/asm-generic/mman-common.h +++ b/include/asm-generic/mman-common.h @@ -40,6 +40,7 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ #define MADV_HWPOISON 100 /* poison a page for testing */ +#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ diff --git a/mm/madvise.c b/mm/madvise.c index 7964e36ba915..319528b8db74 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -222,7 +223,7 @@ static long madvise_remove(struct vm_area_struct *vma, /* * Error injection support for memory error handling. */ -static int madvise_hwpoison(unsigned long start, unsigned long end) +static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) { int ret = 0; @@ -233,6 +234,14 @@ static int madvise_hwpoison(unsigned long start, unsigned long end) int ret = get_user_pages_fast(start, 1, 0, &p); if (ret != 1) return ret; + if (bhv == MADV_SOFT_OFFLINE) { + printk(KERN_INFO "Soft offlining page %lx at %lx\n", + page_to_pfn(p), start); + ret = soft_offline_page(p, MF_COUNT_INCREASED); + if (ret) + break; + continue; + } printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", page_to_pfn(p), start); /* Ignore return value for now */ @@ -333,8 +342,8 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) size_t len; #ifdef CONFIG_MEMORY_FAILURE - if (behavior == MADV_HWPOISON) - return madvise_hwpoison(start, start+len_in); + if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) + return madvise_hwpoison(behavior, start, start+len_in); #endif if (!madvise_behavior_valid(behavior)) return error; -- cgit v1.2.3 From 11e3d1adbe0246fc8d6c06f7e42aff5bead25670 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:06:00 -0300 Subject: V4L/DVB (13643): soc-camera: remove no longer needed struct members Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- include/media/soc_camera.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 3d74e60032dd..c5afc8cf8e1a 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -24,8 +24,6 @@ struct soc_camera_device { struct device *pdev; /* Platform device */ s32 user_width; s32 user_height; - unsigned short width_min; - unsigned short height_min; unsigned short y_skip_top; /* Lines to skip at the top */ unsigned char iface; /* Host number */ unsigned char devnum; /* Device number per host */ -- cgit v1.2.3 From 325361088b73269f4cc96256276a142addbf3454 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:14:46 -0300 Subject: V4L/DVB (13644): v4l: add new v4l2-subdev sensor operations, use g_skip_top_lines in soc-camera Introduce new v4l2-subdev sensor operations, move .enum_framesizes() and .enum_frameintervals() methods to it, add a new .g_skip_top_lines() method and switch soc-camera to use it instead of .y_skip_top soc_camera_device member, which can now be removed. Signed-off-by: Guennadi Liakhovetski Reviewed-by: Hans Verkuil Reviewed-by: Sergio Aguirre Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 30 ++++++++++++++++++++++------- drivers/media/video/mt9m111.c | 1 - drivers/media/video/mt9t031.c | 24 +++++++++++++++++++---- drivers/media/video/mt9v022.c | 32 ++++++++++++++++++++++--------- drivers/media/video/pxa_camera.c | 9 +++++++-- drivers/media/video/soc_camera_platform.c | 1 - include/media/soc_camera.h | 1 - include/media/v4l2-subdev.h | 22 ++++++++++++++++----- 8 files changed, 90 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 45388d2ce2fd..17be2d46fd40 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -82,6 +82,7 @@ struct mt9m001 { int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ unsigned int gain; unsigned int exposure; + unsigned short y_skip_top; /* Lines to skip at the top */ unsigned char autoexposure; }; @@ -222,7 +223,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) soc_camera_limit_side(&rect.top, &rect.height, MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT); - total_h = rect.height + icd->y_skip_top + vblank; + total_h = rect.height + mt9m001->y_skip_top + vblank; /* Blanking and start values - default... */ ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank); @@ -239,7 +240,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1); if (!ret) ret = reg_write(client, MT9M001_WINDOW_HEIGHT, - rect.height + icd->y_skip_top - 1); + rect.height + mt9m001->y_skip_top - 1); if (!ret && mt9m001->autoexposure) { ret = reg_write(client, MT9M001_SHUTTER_WIDTH, total_h); if (!ret) { @@ -327,13 +328,13 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; - struct soc_camera_device *icd = client->dev.platform_data; + struct mt9m001 *mt9m001 = to_mt9m001(client); struct v4l2_pix_format *pix = &f->fmt.pix; v4l_bound_align_image(&pix->width, MT9M001_MIN_WIDTH, MT9M001_MAX_WIDTH, 1, - &pix->height, MT9M001_MIN_HEIGHT + icd->y_skip_top, - MT9M001_MAX_HEIGHT + icd->y_skip_top, 0, 0); + &pix->height, MT9M001_MIN_HEIGHT + mt9m001->y_skip_top, + MT9M001_MAX_HEIGHT + mt9m001->y_skip_top, 0, 0); if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || pix->pixelformat == V4L2_PIX_FMT_SBGGR16) @@ -552,7 +553,7 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) if (ctrl->value) { const u16 vblank = 25; unsigned int total_h = mt9m001->rect.height + - icd->y_skip_top + vblank; + mt9m001->y_skip_top + vblank; if (reg_write(client, MT9M001_SHUTTER_WIDTH, total_h) < 0) return -EIO; @@ -655,6 +656,16 @@ static void mt9m001_video_remove(struct soc_camera_device *icd) icl->free_bus(icl); } +static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) +{ + struct i2c_client *client = sd->priv; + struct mt9m001 *mt9m001 = to_mt9m001(client); + + *lines = mt9m001->y_skip_top; + + return 0; +} + static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { .g_ctrl = mt9m001_g_ctrl, .s_ctrl = mt9m001_s_ctrl, @@ -675,9 +686,14 @@ static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { .cropcap = mt9m001_cropcap, }; +static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = { + .g_skip_top_lines = mt9m001_g_skip_top_lines, +}; + static struct v4l2_subdev_ops mt9m001_subdev_ops = { .core = &mt9m001_subdev_core_ops, .video = &mt9m001_subdev_video_ops, + .sensor = &mt9m001_subdev_sensor_ops, }; static int mt9m001_probe(struct i2c_client *client, @@ -714,8 +730,8 @@ static int mt9m001_probe(struct i2c_client *client, /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m001_ops; - icd->y_skip_top = 0; + mt9m001->y_skip_top = 0; mt9m001->rect.left = MT9M001_COLUMN_SKIP; mt9m001->rect.top = MT9M001_ROW_SKIP; mt9m001->rect.width = MT9M001_MAX_WIDTH; diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 90da699601ea..30db625455e4 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -1019,7 +1019,6 @@ static int mt9m111_probe(struct i2c_client *client, /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9m111_ops; - icd->y_skip_top = 0; mt9m111->rect.left = MT9M111_MIN_DARK_COLS; mt9m111->rect.top = MT9M111_MIN_DARK_ROWS; diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index 6966f644977e..1adbb7b2b580 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -74,6 +74,7 @@ struct mt9t031 { u16 xskip; u16 yskip; unsigned int gain; + unsigned short y_skip_top; /* Lines to skip at the top */ unsigned int exposure; unsigned char autoexposure; }; @@ -301,9 +302,9 @@ static int mt9t031_set_params(struct soc_camera_device *icd, ret = reg_write(client, MT9T031_WINDOW_WIDTH, rect->width - 1); if (ret >= 0) ret = reg_write(client, MT9T031_WINDOW_HEIGHT, - rect->height + icd->y_skip_top - 1); + rect->height + mt9t031->y_skip_top - 1); if (ret >= 0 && mt9t031->autoexposure) { - unsigned int total_h = rect->height + icd->y_skip_top + vblank; + unsigned int total_h = rect->height + mt9t031->y_skip_top + vblank; ret = set_shutter(client, total_h); if (ret >= 0) { const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; @@ -657,7 +658,7 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) const u16 vblank = MT9T031_VERTICAL_BLANK; const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; unsigned int total_h = mt9t031->rect.height + - icd->y_skip_top + vblank; + mt9t031->y_skip_top + vblank; if (set_shutter(client, total_h) < 0) return -EIO; @@ -714,6 +715,16 @@ static int mt9t031_video_probe(struct i2c_client *client) return ret; } +static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) +{ + struct i2c_client *client = sd->priv; + struct mt9t031 *mt9t031 = to_mt9t031(client); + + *lines = mt9t031->y_skip_top; + + return 0; +} + static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { .g_ctrl = mt9t031_g_ctrl, .s_ctrl = mt9t031_s_ctrl, @@ -734,9 +745,14 @@ static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { .cropcap = mt9t031_cropcap, }; +static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = { + .g_skip_top_lines = mt9t031_g_skip_top_lines, +}; + static struct v4l2_subdev_ops mt9t031_subdev_ops = { .core = &mt9t031_subdev_core_ops, .video = &mt9t031_subdev_video_ops, + .sensor = &mt9t031_subdev_sensor_ops, }; static int mt9t031_probe(struct i2c_client *client, @@ -773,8 +789,8 @@ static int mt9t031_probe(struct i2c_client *client, /* Second stage probe - when a capture adapter is there */ icd->ops = &mt9t031_ops; - icd->y_skip_top = 0; + mt9t031->y_skip_top = 0; mt9t031->rect.left = MT9T031_COLUMN_SKIP; mt9t031->rect.top = MT9T031_ROW_SKIP; mt9t031->rect.width = MT9T031_MAX_WIDTH; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index 995607f9d3ba..b71898f1b085 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -97,6 +97,7 @@ struct mt9v022 { __u32 fourcc; int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */ u16 chip_control; + unsigned short y_skip_top; /* Lines to skip at the top */ }; static struct mt9v022 *to_mt9v022(const struct i2c_client *client) @@ -265,7 +266,6 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) struct i2c_client *client = sd->priv; struct mt9v022 *mt9v022 = to_mt9v022(client); struct v4l2_rect rect = a->c; - struct soc_camera_device *icd = client->dev.platform_data; int ret; /* Bayer format - even size lengths */ @@ -287,10 +287,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) if (ret >= 0) { if (ret & 1) /* Autoexposure */ ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, - rect.height + icd->y_skip_top + 43); + rect.height + mt9v022->y_skip_top + 43); else ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH, - rect.height + icd->y_skip_top + 43); + rect.height + mt9v022->y_skip_top + 43); } /* Setup frame format: defaults apart from width and height */ if (!ret) @@ -309,7 +309,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width); if (!ret) ret = reg_write(client, MT9V022_WINDOW_HEIGHT, - rect.height + icd->y_skip_top); + rect.height + mt9v022->y_skip_top); if (ret < 0) return ret; @@ -410,15 +410,15 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) { struct i2c_client *client = sd->priv; - struct soc_camera_device *icd = client->dev.platform_data; + struct mt9v022 *mt9v022 = to_mt9v022(client); struct v4l2_pix_format *pix = &f->fmt.pix; int align = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || pix->pixelformat == V4L2_PIX_FMT_SBGGR16; v4l_bound_align_image(&pix->width, MT9V022_MIN_WIDTH, MT9V022_MAX_WIDTH, align, - &pix->height, MT9V022_MIN_HEIGHT + icd->y_skip_top, - MT9V022_MAX_HEIGHT + icd->y_skip_top, align, 0); + &pix->height, MT9V022_MIN_HEIGHT + mt9v022->y_skip_top, + MT9V022_MAX_HEIGHT + mt9v022->y_skip_top, align, 0); return 0; } @@ -787,6 +787,16 @@ static void mt9v022_video_remove(struct soc_camera_device *icd) icl->free_bus(icl); } +static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) +{ + struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); + + *lines = mt9v022->y_skip_top; + + return 0; +} + static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = { .g_ctrl = mt9v022_g_ctrl, .s_ctrl = mt9v022_s_ctrl, @@ -807,9 +817,14 @@ static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { .cropcap = mt9v022_cropcap, }; +static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = { + .g_skip_top_lines = mt9v022_g_skip_top_lines, +}; + static struct v4l2_subdev_ops mt9v022_subdev_ops = { .core = &mt9v022_subdev_core_ops, .video = &mt9v022_subdev_video_ops, + .sensor = &mt9v022_subdev_sensor_ops, }; static int mt9v022_probe(struct i2c_client *client, @@ -851,8 +866,7 @@ static int mt9v022_probe(struct i2c_client *client, * MT9V022 _really_ corrupts the first read out line. * TODO: verify on i.MX31 */ - icd->y_skip_top = 1; - + mt9v022->y_skip_top = 1; mt9v022->rect.left = MT9V022_COLUMN_SKIP; mt9v022->rect.top = MT9V022_ROW_SKIP; mt9v022->rect.width = MT9V022_MAX_WIDTH; diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 51b683c63b70..4df09a693ec7 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -1051,8 +1051,13 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd, { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); unsigned long dw, bpp; - u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0; + u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top; + int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top); + + if (ret < 0) + y_skip_top = 0; /* Datawidth is now guaranteed to be equal to one of the three values. * We fix bit-per-pixel equal to data-width... */ @@ -1118,7 +1123,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd, cicr2 = 0; cicr3 = CICR3_LPF_VAL(icd->user_height - 1) | - CICR3_BFW_VAL(min((unsigned short)255, icd->y_skip_top)); + CICR3_BFW_VAL(min((u32)255, y_skip_top)); cicr4 |= pcdev->mclk_divisor; __raw_writel(cicr1, pcdev->base + CICR1); diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index b6a575ce5da2..c7c91518c391 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -128,7 +128,6 @@ static int soc_camera_platform_probe(struct platform_device *pdev) /* Set the control device reference */ dev_set_drvdata(&icd->dev, &pdev->dev); - icd->y_skip_top = 0; icd->ops = &soc_camera_platform_ops; ici = to_soc_camera_host(icd->dev.parent); diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index c5afc8cf8e1a..218639f133d3 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -24,7 +24,6 @@ struct soc_camera_device { struct device *pdev; /* Platform device */ s32 user_width; s32 user_height; - unsigned short y_skip_top; /* Lines to skip at the top */ unsigned char iface; /* Host number */ unsigned char devnum; /* Device number per host */ unsigned char buswidth; /* See comment in .c */ diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 7a3408f9d0f2..a128458dc9eb 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -263,6 +263,17 @@ struct v4l2_subdev_video_ops { struct v4l2_dv_timings *timings); }; +/** + * struct v4l2_subdev_sensor_ops - v4l2-subdev sensor operations + * @g_skip_top_lines: number of lines at the top of the image to be skipped. + * This is needed for some sensors, which always corrupt + * several top lines of the output image, or which send their + * metadata in them. + */ +struct v4l2_subdev_sensor_ops { + int (*g_skip_top_lines)(struct v4l2_subdev *sd, u32 *lines); +}; + /* interrupt_service_routine: Called by the bridge chip's interrupt service handler, when an IR interrupt status has be raised due to this subdev, @@ -347,11 +358,12 @@ struct v4l2_subdev_ir_ops { }; struct v4l2_subdev_ops { - const struct v4l2_subdev_core_ops *core; - const struct v4l2_subdev_tuner_ops *tuner; - const struct v4l2_subdev_audio_ops *audio; - const struct v4l2_subdev_video_ops *video; - const struct v4l2_subdev_ir_ops *ir; + const struct v4l2_subdev_core_ops *core; + const struct v4l2_subdev_tuner_ops *tuner; + const struct v4l2_subdev_audio_ops *audio; + const struct v4l2_subdev_video_ops *video; + const struct v4l2_subdev_ir_ops *ir; + const struct v4l2_subdev_sensor_ops *sensor; }; #define V4L2_SUBDEV_NAME_SIZE 32 -- cgit v1.2.3 From 5d28d525452f170e30bc038955439731462a5228 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:15:05 -0300 Subject: V4L/DVB (13645): soc-camera: fix multi-line comment coding style Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/mt9m001.c | 36 +++++++++++++------- drivers/media/video/mt9t031.c | 24 ++++++++----- drivers/media/video/mt9v022.c | 48 +++++++++++++++++--------- drivers/media/video/mx1_camera.c | 36 +++++++++++++------- drivers/media/video/mx3_camera.c | 18 ++++++---- drivers/media/video/pxa_camera.c | 54 ++++++++++++++++++++---------- drivers/media/video/sh_mobile_ceu_camera.c | 18 ++++++---- drivers/media/video/soc_camera.c | 24 ++++++++----- drivers/media/video/tw9910.c | 8 ++--- include/media/ov772x.h | 3 +- 10 files changed, 178 insertions(+), 91 deletions(-) (limited to 'include') diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index 17be2d46fd40..cc9066000c2d 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -17,9 +17,11 @@ #include #include -/* mt9m001 i2c address 0x5d +/* + * mt9m001 i2c address 0x5d * The platform has to define ctruct i2c_board_info objects and link to them - * from struct soc_camera_link */ + * from struct soc_camera_link + */ /* mt9m001 selected register addresses */ #define MT9M001_CHIP_VERSION 0x00 @@ -47,8 +49,10 @@ #define MT9M001_ROW_SKIP 12 static const struct soc_camera_data_format mt9m001_colour_formats[] = { - /* Order important: first natively supported, - * second supported with a GPIO extender */ + /* + * Order important: first natively supported, + * second supported with a GPIO extender + */ { .name = "Bayer (sRGB) 10 bit", .depth = 10, @@ -230,8 +234,10 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) if (!ret) ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank); - /* The caller provides a supported format, as verified per - * call to icd->try_fmt() */ + /* + * The caller provides a supported format, as verified per + * call to icd->try_fmt() + */ if (!ret) ret = reg_write(client, MT9M001_COLUMN_START, rect.left); if (!ret) @@ -569,8 +575,10 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) return 0; } -/* Interface active, can use i2c. If it fails, it can indeed mean, that - * this wasn't our capture interface, so, we wait for the right one */ +/* + * Interface active, can use i2c. If it fails, it can indeed mean, that + * this wasn't our capture interface, so, we wait for the right one + */ static int mt9m001_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { @@ -580,8 +588,10 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, unsigned long flags; int ret; - /* We must have a parent by now. And it cannot be a wrong one. - * So this entire test is completely redundant. */ + /* + * We must have a parent by now. And it cannot be a wrong one. + * So this entire test is completely redundant. + */ if (!icd->dev.parent || to_soc_camera_host(icd->dev.parent)->nr != icd->iface) return -ENODEV; @@ -737,8 +747,10 @@ static int mt9m001_probe(struct i2c_client *client, mt9m001->rect.width = MT9M001_MAX_WIDTH; mt9m001->rect.height = MT9M001_MAX_HEIGHT; - /* Simulated autoexposure. If enabled, we calculate shutter width - * ourselves in the driver based on vertical blanking and frame width */ + /* + * Simulated autoexposure. If enabled, we calculate shutter width + * ourselves in the driver based on vertical blanking and frame width + */ mt9m001->autoexposure = 1; ret = mt9m001_video_probe(icd, client); diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index 1adbb7b2b580..e3f664f21c48 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -17,9 +17,11 @@ #include #include -/* mt9t031 i2c address 0x5d +/* + * mt9t031 i2c address 0x5d * The platform has to define i2c_board_info and link to it from - * struct soc_camera_link */ + * struct soc_camera_link + */ /* mt9t031 selected register addresses */ #define MT9T031_CHIP_VERSION 0x00 @@ -292,8 +294,10 @@ static int mt9t031_set_params(struct soc_camera_device *icd, dev_dbg(&client->dev, "new physical left %u, top %u\n", rect->left, rect->top); - /* The caller provides a supported format, as guaranteed by - * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */ + /* + * The caller provides a supported format, as guaranteed by + * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() + */ if (ret >= 0) ret = reg_write(client, MT9T031_COLUMN_START, rect->left); if (ret >= 0) @@ -674,8 +678,10 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) return 0; } -/* Interface active, can use i2c. If it fails, it can indeed mean, that - * this wasn't our capture interface, so, we wait for the right one */ +/* + * Interface active, can use i2c. If it fails, it can indeed mean, that + * this wasn't our capture interface, so, we wait for the right one + */ static int mt9t031_video_probe(struct i2c_client *client) { struct soc_camera_device *icd = client->dev.platform_data; @@ -796,8 +802,10 @@ static int mt9t031_probe(struct i2c_client *client, mt9t031->rect.width = MT9T031_MAX_WIDTH; mt9t031->rect.height = MT9T031_MAX_HEIGHT; - /* Simulated autoexposure. If enabled, we calculate shutter width - * ourselves in the driver based on vertical blanking and frame width */ + /* + * Simulated autoexposure. If enabled, we calculate shutter width + * ourselves in the driver based on vertical blanking and frame width + */ mt9t031->autoexposure = 1; mt9t031->xskip = 1; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index b71898f1b085..f60a9a107f20 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -18,9 +18,11 @@ #include #include -/* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c +/* + * mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c * The platform has to define ctruct i2c_board_info objects and link to them - * from struct soc_camera_link */ + * from struct soc_camera_link + */ static char *sensor_type; module_param(sensor_type, charp, S_IRUGO); @@ -63,8 +65,10 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\""); #define MT9V022_ROW_SKIP 4 static const struct soc_camera_data_format mt9v022_colour_formats[] = { - /* Order important: first natively supported, - * second supported with a GPIO extender */ + /* + * Order important: first natively supported, + * second supported with a GPIO extender + */ { .name = "Bayer (sRGB) 10 bit", .depth = 10, @@ -144,9 +148,11 @@ static int mt9v022_init(struct i2c_client *client) struct mt9v022 *mt9v022 = to_mt9v022(client); int ret; - /* Almost the default mode: master, parallel, simultaneous, and an + /* + * Almost the default mode: master, parallel, simultaneous, and an * undocumented bit 0x200, which is present in table 7, but not in 8, - * plus snapshot mode to disable scan for now */ + * plus snapshot mode to disable scan for now + */ mt9v022->chip_control |= 0x10; ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control); if (!ret) @@ -298,8 +304,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) if (!ret) ret = reg_write(client, MT9V022_ROW_START, rect.top); if (!ret) - /* Default 94, Phytec driver says: - * "width + horizontal blank >= 660" */ + /* + * Default 94, Phytec driver says: + * "width + horizontal blank >= 660" + */ ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING, rect.width > 660 - 43 ? 43 : 660 - rect.width); @@ -376,8 +384,10 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) }; int ret; - /* The caller provides a supported format, as verified per call to - * icd->try_fmt(), datawidth is from our supported format list */ + /* + * The caller provides a supported format, as verified per call to + * icd->try_fmt(), datawidth is from our supported format list + */ switch (pix->pixelformat) { case V4L2_PIX_FMT_GREY: case V4L2_PIX_FMT_Y16: @@ -635,8 +645,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) 48 + range / 2) / range + 16; if (gain >= 32) gain &= ~1; - /* The user wants to set gain manually, hope, she - * knows, what she's doing... Switch AGC off. */ + /* + * The user wants to set gain manually, hope, she + * knows, what she's doing... Switch AGC off. + */ if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0) return -EIO; @@ -655,8 +667,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) unsigned long range = qctrl->maximum - qctrl->minimum; unsigned long shutter = ((ctrl->value - qctrl->minimum) * 479 + range / 2) / range + 1; - /* The user wants to set shutter width manually, hope, - * she knows, what she's doing... Switch AEC off. */ + /* + * The user wants to set shutter width manually, hope, + * she knows, what she's doing... Switch AEC off. + */ if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0) return -EIO; @@ -689,8 +703,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) return 0; } -/* Interface active, can use i2c. If it fails, it can indeed mean, that - * this wasn't our capture interface, so, we wait for the right one */ +/* + * Interface active, can use i2c. If it fails, it can indeed mean, that + * this wasn't our capture interface, so, we wait for the right one + */ static int mt9v022_video_probe(struct soc_camera_device *icd, struct i2c_client *client) { diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 72802291e812..4c1a439373c5 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -99,9 +99,11 @@ struct mx1_buffer { int inwork; }; -/* i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor +/* + * i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor * Interface. If anyone ever builds hardware to enable more than - * one camera, they will have to modify this driver too */ + * one camera, they will have to modify this driver too + */ struct mx1_camera_dev { struct soc_camera_host soc_host; struct soc_camera_device *icd; @@ -151,8 +153,10 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf) dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); - /* This waits until this buffer is out of danger, i.e., until it is no - * longer in STATE_QUEUED or STATE_ACTIVE */ + /* + * This waits until this buffer is out of danger, i.e., until it is no + * longer in STATE_QUEUED or STATE_ACTIVE + */ videobuf_waiton(vb, 0, 0); videobuf_dma_contig_free(vq, vb); @@ -174,8 +178,10 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq, BUG_ON(NULL == icd->current_fmt); - /* I think, in buf_prepare you only have to protect global data, - * the actual buffer is yours */ + /* + * I think, in buf_prepare you only have to protect global data, + * the actual buffer is yours + */ buf->inwork = 1; if (buf->fmt != icd->current_fmt || @@ -381,8 +387,10 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev) lcdclk = clk_get_rate(pcdev->clk); - /* We verify platform_mclk_10khz != 0, so if anyone breaks it, here - * they get a nice Oops */ + /* + * We verify platform_mclk_10khz != 0, so if anyone breaks it, here + * they get a nice Oops + */ div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1; dev_dbg(pcdev->icd->dev.parent, @@ -420,8 +428,10 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev) clk_disable(pcdev->clk); } -/* The following two functions absolutely depend on the fact, that - * there can be only one camera on i.MX1/i.MXL camera sensor interface */ +/* + * The following two functions absolutely depend on the fact, that + * there can be only one camera on i.MX1/i.MXL camera sensor interface + */ static int mx1_camera_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); @@ -578,10 +588,12 @@ static int mx1_camera_reqbufs(struct soc_camera_file *icf, { int i; - /* This is for locking debugging only. I removed spinlocks and now I + /* + * This is for locking debugging only. I removed spinlocks and now I * check whether .prepare is ever called on a linked buffer, or whether * a dma IRQ can occur for an in-work or unlinked buffer. Until now - * it hadn't triggered */ + * it hadn't triggered + */ for (i = 0; i < p->count; i++) { struct mx1_buffer *buf = container_of(icf->vb_vidq.bufs[i], struct mx1_buffer, vb); diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 7db82bdf6f31..ae7d48324493 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -564,8 +564,10 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam, SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_LOW; - /* If requested data width is supported by the platform, use it or any - * possible lower value - i.MX31 is smart enough to schift bits */ + /* + * If requested data width is supported by the platform, use it or any + * possible lower value - i.MX31 is smart enough to schift bits + */ switch (buswidth) { case 15: if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)) @@ -1027,8 +1029,10 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING; } - /* Make the camera work in widest common mode, we'll take care of - * the rest */ + /* + * Make the camera work in widest common mode, we'll take care of + * the rest + */ if (common_flags & SOCAM_DATAWIDTH_15) common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) | SOCAM_DATAWIDTH_15; @@ -1152,8 +1156,10 @@ static int __devinit mx3_camera_probe(struct platform_device *pdev) if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15))) { - /* Platform hasn't set available data widths. This is bad. - * Warn and use a default. */ + /* + * Platform hasn't set available data widths. This is bad. + * Warn and use a default. + */ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available " "data widths, using default 8 bit\n"); mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8; diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 4df09a693ec7..f063f5981f43 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -197,9 +197,11 @@ struct pxa_buffer { struct pxa_camera_dev { struct soc_camera_host soc_host; - /* PXA27x is only supposed to handle one camera on its Quick Capture + /* + * PXA27x is only supposed to handle one camera on its Quick Capture * interface. If anyone ever builds hardware to enable more than - * one camera, they will have to modify this driver too */ + * one camera, they will have to modify this driver too + */ struct soc_camera_device *icd; struct clk *clk; @@ -267,8 +269,10 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, &buf->vb, buf->vb.baddr, buf->vb.bsize); - /* This waits until this buffer is out of danger, i.e., until it is no - * longer in STATE_QUEUED or STATE_ACTIVE */ + /* + * This waits until this buffer is out of danger, i.e., until it is no + * longer in STATE_QUEUED or STATE_ACTIVE + */ videobuf_waiton(&buf->vb, 0, 0); videobuf_dma_unmap(vq, dma); videobuf_dma_free(dma); @@ -437,15 +441,19 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, WARN_ON(!list_empty(&vb->queue)); #ifdef DEBUG - /* This can be useful if you want to see if we actually fill - * the buffer with something */ + /* + * This can be useful if you want to see if we actually fill + * the buffer with something + */ memset((void *)vb->baddr, 0xaa, vb->bsize); #endif BUG_ON(NULL == icd->current_fmt); - /* I think, in buf_prepare you only have to protect global data, - * the actual buffer is yours */ + /* + * I think, in buf_prepare you only have to protect global data, + * the actual buffer is yours + */ buf->inwork = 1; if (buf->fmt != icd->current_fmt || @@ -834,8 +842,10 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; - /* We must pass NULL as dev pointer, then all pci_* dma operations - * transform to normal dma_* ones. */ + /* + * We must pass NULL as dev pointer, then all pci_* dma operations + * transform to normal dma_* ones. + */ videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, sizeof(struct pxa_buffer), icd); @@ -1059,8 +1069,10 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd, if (ret < 0) y_skip_top = 0; - /* Datawidth is now guaranteed to be equal to one of the three values. - * We fix bit-per-pixel equal to data-width... */ + /* + * Datawidth is now guaranteed to be equal to one of the three values. + * We fix bit-per-pixel equal to data-width... + */ switch (flags & SOCAM_DATAWIDTH_MASK) { case SOCAM_DATAWIDTH_10: dw = 4; @@ -1071,8 +1083,10 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd, bpp = 0x20; break; default: - /* Actually it can only be 8 now, - * default is just to silence compiler warnings */ + /* + * Actually it can only be 8 now, + * default is just to silence compiler warnings + */ case SOCAM_DATAWIDTH_8: dw = 2; bpp = 0; @@ -1524,10 +1538,12 @@ static int pxa_camera_reqbufs(struct soc_camera_file *icf, { int i; - /* This is for locking debugging only. I removed spinlocks and now I + /* + * This is for locking debugging only. I removed spinlocks and now I * check whether .prepare is ever called on a linked buffer, or whether * a dma IRQ can occur for an in-work or unlinked buffer. Until now - * it hadn't triggered */ + * it hadn't triggered + */ for (i = 0; i < p->count; i++) { struct pxa_buffer *buf = container_of(icf->vb_vidq.bufs[i], struct pxa_buffer, vb); @@ -1662,8 +1678,10 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev) pcdev->platform_flags = pcdev->pdata->flags; if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) { - /* Platform hasn't set available data widths. This is bad. - * Warn and use a default. */ + /* + * Platform hasn't set available data widths. This is bad. + * Warn and use a default. + */ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available " "data widths, using default 10 bit\n"); pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10; diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 7509990c65d7..3271944f261c 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -252,7 +252,8 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) u32 status; int ret = 0; - /* The hardware is _very_ picky about this sequence. Especially + /* + * The hardware is _very_ picky about this sequence. Especially * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge * several not-so-well documented interrupt sources in CETCR. */ @@ -321,8 +322,10 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq, WARN_ON(!list_empty(&vb->queue)); #ifdef DEBUG - /* This can be useful if you want to see if we actually fill - * the buffer with something */ + /* + * This can be useful if you want to see if we actually fill + * the buffer with something + */ memset((void *)vb->baddr, 0xaa, vb->bsize); #endif @@ -709,7 +712,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, ceu_write(pcdev, CFLCR, pcdev->cflcr); - /* A few words about byte order (observed in Big Endian mode) + /* + * A few words about byte order (observed in Big Endian mode) * * In data fetch mode bytes are received in chunks of 8 bytes. * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) @@ -1573,10 +1577,12 @@ static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf, { int i; - /* This is for locking debugging only. I removed spinlocks and now I + /* + * This is for locking debugging only. I removed spinlocks and now I * check whether .prepare is ever called on a linked buffer, or whether * a dma IRQ can occur for an in-work or unlinked buffer. Until now - * it hadn't triggered */ + * it hadn't triggered + */ for (i = 0; i < p->count; i++) { struct sh_mobile_ceu_buffer *buf; diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 804c675f2cb9..5fdedc766401 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -621,8 +621,10 @@ static int soc_camera_streamoff(struct file *file, void *priv, mutex_lock(&icd->video_lock); - /* This calls buf_release from host driver's videobuf_queue_ops for all - * remaining buffers. When the last buffer is freed, stop capture */ + /* + * This calls buf_release from host driver's videobuf_queue_ops for all + * remaining buffers. When the last buffer is freed, stop capture + */ videobuf_streamoff(&icf->vb_vidq); v4l2_subdev_call(sd, video, s_stream, 0); @@ -1004,8 +1006,10 @@ epower: return ret; } -/* This is called on device_unregister, which only means we have to disconnect - * from the host, but not remove ourselves from the device list */ +/* + * This is called on device_unregister, which only means we have to disconnect + * from the host, but not remove ourselves from the device list + */ static int soc_camera_remove(struct device *dev) { struct soc_camera_device *icd = to_soc_camera_dev(dev); @@ -1205,8 +1209,10 @@ static int soc_camera_device_register(struct soc_camera_device *icd) } if (num < 0) - /* ok, we have 256 cameras on this host... - * man, stay reasonable... */ + /* + * ok, we have 256 cameras on this host... + * man, stay reasonable... + */ return -ENOMEM; icd->devnum = num; @@ -1333,9 +1339,11 @@ escdevreg: return ret; } -/* Only called on rmmod for each platform device, since they are not +/* + * Only called on rmmod for each platform device, since they are not * hot-pluggable. Now we know, that all our users - hosts and devices have - * been unloaded already */ + * been unloaded already + */ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev) { struct soc_camera_device *icd = platform_get_drvdata(pdev); diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index 7bf90a29daee..3cb9ba6caa41 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -180,9 +180,8 @@ */ /* VBICNTL */ -/* RTSEL : control the real time signal -* output from the MPOUT pin -*/ + +/* RTSEL : control the real time signal output from the MPOUT pin */ #define RTSEL_MASK 0x07 #define RTSEL_VLOSS 0x00 /* 0000 = Video loss */ #define RTSEL_HLOCK 0x01 /* 0001 = H-lock */ @@ -596,7 +595,8 @@ static int tw9910_g_register(struct v4l2_subdev *sd, if (ret < 0) return ret; - /* ret = int + /* + * ret = int * reg->val = __u64 */ reg->val = (__u64)ret; diff --git a/include/media/ov772x.h b/include/media/ov772x.h index 30d9629198ef..37bcd096dded 100644 --- a/include/media/ov772x.h +++ b/include/media/ov772x.h @@ -1,4 +1,5 @@ -/* ov772x Camera +/* + * ov772x Camera * * Copyright (C) 2008 Renesas Solutions Corp. * Kuninori Morimoto -- cgit v1.2.3 From ee81152ff007a94f90327809566a4b7ff771ffd3 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:15:06 -0300 Subject: V4L/DVB (13647): v4l: Add a 10-bit monochrome and missing 8- and 10-bit Bayer fourcc codes The 16-bit monochrome fourcc code has been previously abused for a 10-bit format, add a new 10-bit code instead. Also add missing 8- and 10-bit Bayer fourcc codes for completeness. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- include/linux/videodev2.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 59047ebf452e..d4962a782b8a 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -294,6 +294,7 @@ struct v4l2_pix_format { /* Grey formats */ #define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ #define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ /* Palette formats */ @@ -329,7 +330,11 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ #define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ #define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ -#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */ +#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */ +#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */ /* 10bit raw bayer DPCM compressed to 8 bits */ #define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0') /* -- cgit v1.2.3 From 3fd7ceffddd52c2f8c004a7999ccb705d592cbfd Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:15:06 -0300 Subject: V4L/DVB (13648): soc-camera: add a private field to struct soc_camera_link Up to now, if a client driver needed platform data apart from those contained in struct soc_camera_link, it had to embed the struct into its own object. This makes the use of such a driver in configurations other than soc-camera Signed-off-by: Mauro Carvalho Chehab --- include/media/soc_camera.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 218639f133d3..831efffaf2ae 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -104,6 +104,8 @@ struct soc_camera_link { int i2c_adapter_id; struct i2c_board_info *board_info; const char *module_name; + void *priv; + /* * For non-I2C devices platform platform has to provide methods to * add a device to the system and to remove -- cgit v1.2.3 From 0f4482940a75b52db931e1fff181c9d267e462d2 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:31:35 -0300 Subject: V4L/DVB (13650): soc-camera: switch drivers and platforms to use .priv in struct soc_camera_link After this change drivers can be further extended to not fail, if they don't get platform data, but to use defaults. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- arch/sh/boards/mach-ap325rxa/setup.c | 38 ++++++++++++++++++++---------------- arch/sh/boards/mach-migor/setup.c | 32 +++++++++++++++++------------- drivers/media/video/ov772x.c | 4 ++-- drivers/media/video/tw9910.c | 6 +++--- include/media/ov772x.h | 1 - include/media/soc_camera_platform.h | 1 - include/media/tw9910.h | 1 - 7 files changed, 44 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index cf9dc12dfeb1..4c8602884573 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -324,12 +324,14 @@ static struct soc_camera_platform_info camera_info = { .bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, .set_capture = camera_set_capture, - .link = { - .bus_id = 0, - .add_device = ap325rxa_camera_add, - .del_device = ap325rxa_camera_del, - .module_name = "soc_camera_platform", - }, +}; + +struct soc_camera_link camera_link = { + .bus_id = 0, + .add_device = ap325rxa_camera_add, + .del_device = ap325rxa_camera_del, + .module_name = "soc_camera_platform", + .priv = &camera_info, }; static void dummy_release(struct device *dev) @@ -347,7 +349,7 @@ static struct platform_device camera_device = { static int ap325rxa_camera_add(struct soc_camera_link *icl, struct device *dev) { - if (icl != &camera_info.link || camera_probe() <= 0) + if (icl != &camera_link || camera_probe() <= 0) return -ENODEV; camera_info.dev = dev; @@ -357,7 +359,7 @@ static int ap325rxa_camera_add(struct soc_camera_link *icl, static void ap325rxa_camera_del(struct soc_camera_link *icl) { - if (icl != &camera_info.link) + if (icl != &camera_link) return; platform_device_unregister(&camera_device); @@ -470,13 +472,15 @@ static struct ov772x_camera_info ov7725_info = { .buswidth = SOCAM_DATAWIDTH_8, .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP, .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0), - .link = { - .bus_id = 0, - .power = ov7725_power, - .board_info = &ap325rxa_i2c_camera[0], - .i2c_adapter_id = 0, - .module_name = "ov772x", - }, +}; + +static struct soc_camera_link ov7725_link = { + .bus_id = 0, + .power = ov7725_power, + .board_info = &ap325rxa_i2c_camera[0], + .i2c_adapter_id = 0, + .module_name = "ov772x", + .priv = &ov7725_info, }; static struct platform_device ap325rxa_camera[] = { @@ -484,13 +488,13 @@ static struct platform_device ap325rxa_camera[] = { .name = "soc-camera-pdrv", .id = 0, .dev = { - .platform_data = &ov7725_info.link, + .platform_data = &ov7725_link, }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { - .platform_data = &camera_info.link, + .platform_data = &camera_link, }, }, }; diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 9099b6da9957..507c77be476d 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -432,23 +432,27 @@ static struct i2c_board_info migor_i2c_camera[] = { static struct ov772x_camera_info ov7725_info = { .buswidth = SOCAM_DATAWIDTH_8, - .link = { - .power = ov7725_power, - .board_info = &migor_i2c_camera[0], - .i2c_adapter_id = 0, - .module_name = "ov772x", - }, +}; + +static struct soc_camera_link ov7725_link = { + .power = ov7725_power, + .board_info = &migor_i2c_camera[0], + .i2c_adapter_id = 0, + .module_name = "ov772x", + .priv = &ov7725_info, }; static struct tw9910_video_info tw9910_info = { .buswidth = SOCAM_DATAWIDTH_8, .mpout = TW9910_MPO_FIELD, - .link = { - .power = tw9910_power, - .board_info = &migor_i2c_camera[1], - .i2c_adapter_id = 0, - .module_name = "tw9910", - } +}; + +static struct soc_camera_link tw9910_link = { + .power = tw9910_power, + .board_info = &migor_i2c_camera[1], + .i2c_adapter_id = 0, + .module_name = "tw9910", + .priv = &tw9910_info, }; static struct platform_device migor_camera[] = { @@ -456,13 +460,13 @@ static struct platform_device migor_camera[] = { .name = "soc-camera-pdrv", .id = 0, .dev = { - .platform_data = &ov7725_info.link, + .platform_data = &ov7725_link, }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { - .platform_data = &tw9910_info.link, + .platform_data = &tw9910_link, }, }, }; diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 205229333466..dcb690cb5ae7 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -1143,10 +1143,10 @@ static int ov772x_probe(struct i2c_client *client, } icl = to_soc_camera_link(icd); - if (!icl) + if (!icl || !icl->priv) return -EINVAL; - info = container_of(icl, struct ov772x_camera_info, link); + info = icl->priv; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&adapter->dev, diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index 3cb9ba6caa41..35373d8bf6a0 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -955,10 +955,10 @@ static int tw9910_probe(struct i2c_client *client, } icl = to_soc_camera_link(icd); - if (!icl) + if (!icl || !icl->priv) return -EINVAL; - info = container_of(icl, struct tw9910_video_info, link); + info = icl->priv; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, @@ -976,7 +976,7 @@ static int tw9910_probe(struct i2c_client *client, v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops); icd->ops = &tw9910_ops; - icd->iface = info->link.bus_id; + icd->iface = icl->bus_id; ret = tw9910_video_probe(icd, client); if (ret) { diff --git a/include/media/ov772x.h b/include/media/ov772x.h index 37bcd096dded..14c77efd6a85 100644 --- a/include/media/ov772x.h +++ b/include/media/ov772x.h @@ -55,7 +55,6 @@ struct ov772x_edge_ctrl { struct ov772x_camera_info { unsigned long buswidth; unsigned long flags; - struct soc_camera_link link; struct ov772x_edge_ctrl edgectrl; }; diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h index bb70401b8141..88b3b5747f62 100644 --- a/include/media/soc_camera_platform.h +++ b/include/media/soc_camera_platform.h @@ -23,7 +23,6 @@ struct soc_camera_platform_info { unsigned long bus_param; struct device *dev; int (*set_capture)(struct soc_camera_platform_info *info, int enable); - struct soc_camera_link link; }; #endif /* __SOC_CAMERA_H__ */ diff --git a/include/media/tw9910.h b/include/media/tw9910.h index 73231e7880d8..5e2895a05e6b 100644 --- a/include/media/tw9910.h +++ b/include/media/tw9910.h @@ -32,7 +32,6 @@ enum tw9910_mpout_pin { struct tw9910_video_info { unsigned long buswidth; enum tw9910_mpout_pin mpout; - struct soc_camera_link link; }; -- cgit v1.2.3 From 9a74251d8bee7a25fee89a0be3ccea73e01c1a05 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:41:28 -0300 Subject: V4L/DVB (13658): v4l: add a media-bus API for configuring v4l2 subdev pixel and frame formats Video subdevices, like cameras, decoders, connect to video bridges over specialised busses. Data is being transferred over these busses in various formats, which only loosely correspond to fourcc codes, describing how video data is stored in RAM. This is not a one-to-one correspondence, therefore we cannot use fourcc codes to configure subdevice output data formats. This patch adds codes for several such on-the-bus formats and an API, similar to the familiar .s_fmt(), .g_fmt(), .try_fmt(), .enum_fmt() API for configuring those codes. After all users of the old API in struct v4l2_subdev_video_ops are converted, it will be removed. Also add helper routines to support generic pass-through mode for the soc-camera framework. create mode 100644 drivers/media/video/soc_mediabus.c create mode 100644 include/media/soc_mediabus.h create mode 100644 include/media/v4l2-mediabus.h Signed-off-by: Guennadi Liakhovetski Acked-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/Makefile | 2 +- drivers/media/video/soc_mediabus.c | 157 +++++++++++++++++++++++++++++++++++++ include/media/soc_mediabus.h | 65 +++++++++++++++ include/media/v4l2-mediabus.h | 61 ++++++++++++++ include/media/v4l2-subdev.h | 18 ++++- 5 files changed, 301 insertions(+), 2 deletions(-) create mode 100644 drivers/media/video/soc_mediabus.c create mode 100644 include/media/soc_mediabus.h create mode 100644 include/media/v4l2-mediabus.h (limited to 'include') diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 7a2dcc34111c..e7bc8daae064 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -149,7 +149,7 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o obj-$(CONFIG_VIDEO_CX23885) += cx23885/ obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o -obj-$(CONFIG_SOC_CAMERA) += soc_camera.o +obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o # soc-camera host drivers have to be linked after camera drivers obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c new file mode 100644 index 000000000000..f8d5c87dc2aa --- /dev/null +++ b/drivers/media/video/soc_mediabus.c @@ -0,0 +1,157 @@ +/* + * soc-camera media bus helper routines + * + * Copyright (C) 2009, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include +#include +#include + +#define MBUS_IDX(f) (V4L2_MBUS_FMT_ ## f - V4L2_MBUS_FMT_FIXED - 1) + +static const struct soc_mbus_pixelfmt mbus_fmt[] = { + [MBUS_IDX(YUYV8_2X8_LE)] = { + .fourcc = V4L2_PIX_FMT_YUYV, + .name = "YUYV", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(YVYU8_2X8_LE)] = { + .fourcc = V4L2_PIX_FMT_YVYU, + .name = "YVYU", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(YUYV8_2X8_BE)] = { + .fourcc = V4L2_PIX_FMT_UYVY, + .name = "UYVY", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(YVYU8_2X8_BE)] = { + .fourcc = V4L2_PIX_FMT_VYUY, + .name = "VYUY", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(RGB555_2X8_PADHI_LE)] = { + .fourcc = V4L2_PIX_FMT_RGB555, + .name = "RGB555", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(RGB555_2X8_PADHI_BE)] = { + .fourcc = V4L2_PIX_FMT_RGB555X, + .name = "RGB555X", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(RGB565_2X8_LE)] = { + .fourcc = V4L2_PIX_FMT_RGB565, + .name = "RGB565", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(RGB565_2X8_BE)] = { + .fourcc = V4L2_PIX_FMT_RGB565X, + .name = "RGB565X", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(SBGGR8_1X8)] = { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .name = "Bayer 8 BGGR", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(SBGGR10_1X10)] = { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .name = "Bayer 10 BGGR", + .bits_per_sample = 10, + .packing = SOC_MBUS_PACKING_EXTEND16, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(GREY8_1X8)] = { + .fourcc = V4L2_PIX_FMT_GREY, + .name = "Grey", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(Y10_1X10)] = { + .fourcc = V4L2_PIX_FMT_Y10, + .name = "Grey 10bit", + .bits_per_sample = 10, + .packing = SOC_MBUS_PACKING_EXTEND16, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(SBGGR10_2X8_PADHI_LE)] = { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .name = "Bayer 10 BGGR", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(SBGGR10_2X8_PADLO_LE)] = { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .name = "Bayer 10 BGGR", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADLO, + .order = SOC_MBUS_ORDER_LE, + }, [MBUS_IDX(SBGGR10_2X8_PADHI_BE)] = { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .name = "Bayer 10 BGGR", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_BE, + }, [MBUS_IDX(SBGGR10_2X8_PADLO_BE)] = { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .name = "Bayer 10 BGGR", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADLO, + .order = SOC_MBUS_ORDER_BE, + }, +}; + +s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) +{ + switch (mf->packing) { + case SOC_MBUS_PACKING_NONE: + return width * mf->bits_per_sample / 8; + case SOC_MBUS_PACKING_2X8_PADHI: + case SOC_MBUS_PACKING_2X8_PADLO: + case SOC_MBUS_PACKING_EXTEND16: + return width * 2; + } + return -EINVAL; +} +EXPORT_SYMBOL(soc_mbus_bytes_per_line); + +const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( + enum v4l2_mbus_pixelcode code) +{ + if ((unsigned int)(code - V4L2_MBUS_FMT_FIXED) > ARRAY_SIZE(mbus_fmt)) + return NULL; + return mbus_fmt + code - V4L2_MBUS_FMT_FIXED - 1; +} +EXPORT_SYMBOL(soc_mbus_get_fmtdesc); + +static int __init soc_mbus_init(void) +{ + return 0; +} + +static void __exit soc_mbus_exit(void) +{ +} + +module_init(soc_mbus_init); +module_exit(soc_mbus_exit); + +MODULE_DESCRIPTION("soc-camera media bus interface"); +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_LICENSE("GPL v2"); diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h new file mode 100644 index 000000000000..037cd7be001e --- /dev/null +++ b/include/media/soc_mediabus.h @@ -0,0 +1,65 @@ +/* + * SoC-camera Media Bus API extensions + * + * Copyright (C) 2009, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SOC_MEDIABUS_H +#define SOC_MEDIABUS_H + +#include + +#include + +/** + * enum soc_mbus_packing - data packing types on the media-bus + * @SOC_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM + * @SOC_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the + * possibly incomplete byte high bits are padding + * @SOC_MBUS_PACKING_2X8_PADLO: as above, but low bits are padding + * @SOC_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended + * to 16 bits + */ +enum soc_mbus_packing { + SOC_MBUS_PACKING_NONE, + SOC_MBUS_PACKING_2X8_PADHI, + SOC_MBUS_PACKING_2X8_PADLO, + SOC_MBUS_PACKING_EXTEND16, +}; + +/** + * enum soc_mbus_order - sample order on the media bus + * @SOC_MBUS_ORDER_LE: least significant sample first + * @SOC_MBUS_ORDER_BE: most significant sample first + */ +enum soc_mbus_order { + SOC_MBUS_ORDER_LE, + SOC_MBUS_ORDER_BE, +}; + +/** + * struct soc_mbus_pixelfmt - Data format on the media bus + * @name: Name of the format + * @fourcc: Fourcc code, that will be obtained if the data is + * stored in memory in the following way: + * @packing: Type of sample-packing, that has to be used + * @order: Sample order when storing in memory + * @bits_per_sample: How many bits the bridge has to sample + */ +struct soc_mbus_pixelfmt { + const char *name; + u32 fourcc; + enum soc_mbus_packing packing; + enum soc_mbus_order order; + u8 bits_per_sample; +}; + +const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( + enum v4l2_mbus_pixelcode code); +s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf); + +#endif diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h new file mode 100644 index 000000000000..0dbe02ada259 --- /dev/null +++ b/include/media/v4l2-mediabus.h @@ -0,0 +1,61 @@ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef V4L2_MEDIABUS_H +#define V4L2_MEDIABUS_H + +/* + * These pixel codes uniquely identify data formats on the media bus. Mostly + * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is + * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the + * data format is fixed. Additionally, "2X8" means that one pixel is transferred + * in two 8-bit samples, "BE" or "LE" specify in which order those samples are + * transferred over the bus: "LE" means that the least significant bits are + * transferred first, "BE" means that the most significant bits are transferred + * first, and "PADHI" and "PADLO" define which bits - low or high, in the + * incomplete high byte, are filled with padding bits. + */ +enum v4l2_mbus_pixelcode { + V4L2_MBUS_FMT_FIXED = 1, + V4L2_MBUS_FMT_YUYV8_2X8_LE, + V4L2_MBUS_FMT_YVYU8_2X8_LE, + V4L2_MBUS_FMT_YUYV8_2X8_BE, + V4L2_MBUS_FMT_YVYU8_2X8_BE, + V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, + V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, + V4L2_MBUS_FMT_RGB565_2X8_LE, + V4L2_MBUS_FMT_RGB565_2X8_BE, + V4L2_MBUS_FMT_SBGGR8_1X8, + V4L2_MBUS_FMT_SBGGR10_1X10, + V4L2_MBUS_FMT_GREY8_1X8, + V4L2_MBUS_FMT_Y10_1X10, + V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, + V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, + V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, + V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, +}; + +/** + * struct v4l2_mbus_framefmt - frame format on the media bus + * @width: frame width + * @height: frame height + * @code: data format code + * @field: used interlacing type + * @colorspace: colorspace of the data + */ +struct v4l2_mbus_framefmt { + __u32 width; + __u32 height; + enum v4l2_mbus_pixelcode code; + enum v4l2_field field; + enum v4l2_colorspace colorspace; +}; + +#endif diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index a128458dc9eb..9ba99cd39ee7 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -22,6 +22,7 @@ #define _V4L2_SUBDEV_H #include +#include /* generic v4l2_device notify callback notification values */ #define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32) @@ -207,7 +208,7 @@ struct v4l2_subdev_audio_ops { s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by video input devices. - s_crystal_freq: sets the frequency of the crystal used to generate the + s_crystal_freq: sets the frequency of the crystal used to generate the clocks in Hz. An extra flags field allows device specific configuration regarding clock frequency dividers, etc. If not used, then set flags to 0. If the frequency is not supported, then -EINVAL is returned. @@ -230,6 +231,13 @@ struct v4l2_subdev_audio_ops { g_dv_timings(): Get custom dv timings in the sub device. + enum_mbus_fmt: enumerate pixel formats, provided by a video data source + + g_mbus_fmt: get the current pixel format, provided by a video data source + + try_mbus_fmt: try to set a pixel format on a video data source + + s_mbus_fmt: set a pixel format on a video data source */ struct v4l2_subdev_video_ops { int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); @@ -261,6 +269,14 @@ struct v4l2_subdev_video_ops { struct v4l2_dv_timings *timings); int (*g_dv_timings)(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings); + int (*enum_mbus_fmt)(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code); + int (*g_mbus_fmt)(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *fmt); + int (*try_mbus_fmt)(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *fmt); + int (*s_mbus_fmt)(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *fmt); }; /** -- cgit v1.2.3 From 760697beca338599a65484389c7abbe54aedb664 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:46:49 -0300 Subject: V4L/DVB (13659): soc-camera: convert to the new mediabus API Convert soc-camera core and all soc-camera drivers to the new mediabus API. This also takes soc-camera client drivers one step closer to also be usable with generic v4l2-subdev host drivers. Signed-off-by: Guennadi Liakhovetski Acked-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- arch/sh/boards/mach-ap325rxa/setup.c | 3 +- drivers/media/video/mt9m001.c | 141 ++++++---- drivers/media/video/mt9m111.c | 188 ++++++++------ drivers/media/video/mt9t031.c | 66 ++--- drivers/media/video/mt9v022.c | 150 ++++++----- drivers/media/video/mx1_camera.c | 90 +++++-- drivers/media/video/mx3_camera.c | 278 +++++++++++--------- drivers/media/video/ov772x.c | 229 ++++++++-------- drivers/media/video/ov9640.c | 107 +++++--- drivers/media/video/pxa_camera.c | 272 ++++++++++--------- drivers/media/video/rj54n1cb0c.c | 201 ++++++++++---- drivers/media/video/sh_mobile_ceu_camera.c | 403 +++++++++++++++++------------ drivers/media/video/soc_camera.c | 78 +++--- drivers/media/video/soc_camera_platform.c | 39 ++- drivers/media/video/tw9910.c | 91 +++---- include/media/soc_camera.h | 25 +- include/media/soc_camera_platform.h | 2 +- 17 files changed, 1383 insertions(+), 980 deletions(-) (limited to 'include') diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index 4c8602884573..7a9f69663f1a 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -316,8 +316,9 @@ static struct soc_camera_platform_info camera_info = { .format_name = "UYVY", .format_depth = 16, .format = { - .pixelformat = V4L2_PIX_FMT_UYVY, + .code = V4L2_MBUS_FMT_YUYV8_2X8_BE, .colorspace = V4L2_COLORSPACE_SMPTE170M, + .field = V4L2_FIELD_NONE, .width = 640, .height = 480, }, diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index cc9066000c2d..b62c0bd3f8ea 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -48,41 +48,46 @@ #define MT9M001_COLUMN_SKIP 20 #define MT9M001_ROW_SKIP 12 -static const struct soc_camera_data_format mt9m001_colour_formats[] = { +/* MT9M001 has only one fixed colorspace per pixelcode */ +struct mt9m001_datafmt { + enum v4l2_mbus_pixelcode code; + enum v4l2_colorspace colorspace; +}; + +/* Find a data format by a pixel code in an array */ +static const struct mt9m001_datafmt *mt9m001_find_datafmt( + enum v4l2_mbus_pixelcode code, const struct mt9m001_datafmt *fmt, + int n) +{ + int i; + for (i = 0; i < n; i++) + if (fmt[i].code == code) + return fmt + i; + + return NULL; +} + +static const struct mt9m001_datafmt mt9m001_colour_fmts[] = { /* * Order important: first natively supported, * second supported with a GPIO extender */ - { - .name = "Bayer (sRGB) 10 bit", - .depth = 10, - .fourcc = V4L2_PIX_FMT_SBGGR16, - .colorspace = V4L2_COLORSPACE_SRGB, - }, { - .name = "Bayer (sRGB) 8 bit", - .depth = 8, - .fourcc = V4L2_PIX_FMT_SBGGR8, - .colorspace = V4L2_COLORSPACE_SRGB, - } + {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, }; -static const struct soc_camera_data_format mt9m001_monochrome_formats[] = { +static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = { /* Order important - see above */ - { - .name = "Monochrome 10 bit", - .depth = 10, - .fourcc = V4L2_PIX_FMT_Y16, - }, { - .name = "Monochrome 8 bit", - .depth = 8, - .fourcc = V4L2_PIX_FMT_GREY, - }, + {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_GREY8_1X8, V4L2_COLORSPACE_JPEG}, }; struct mt9m001 { struct v4l2_subdev subdev; struct v4l2_rect rect; /* Sensor window */ - __u32 fourcc; + const struct mt9m001_datafmt *fmt; + const struct mt9m001_datafmt *fmts; + int num_fmts; int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */ unsigned int gain; unsigned int exposure; @@ -209,8 +214,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) const u16 hblank = 9, vblank = 25; unsigned int total_h; - if (mt9m001->fourcc == V4L2_PIX_FMT_SBGGR8 || - mt9m001->fourcc == V4L2_PIX_FMT_SBGGR16) + if (mt9m001->fmts == mt9m001_colour_fmts) /* * Bayer format - even number of rows for simplicity, * but let the user play with the top row. @@ -290,32 +294,32 @@ static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } -static int mt9m001_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9m001_g_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9m001 *mt9m001 = to_mt9m001(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - pix->width = mt9m001->rect.width; - pix->height = mt9m001->rect.height; - pix->pixelformat = mt9m001->fourcc; - pix->field = V4L2_FIELD_NONE; - pix->colorspace = V4L2_COLORSPACE_SRGB; + mf->width = mt9m001->rect.width; + mf->height = mt9m001->rect.height; + mf->code = mt9m001->fmt->code; + mf->colorspace = mt9m001->fmt->colorspace; + mf->field = V4L2_FIELD_NONE; return 0; } -static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9m001_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9m001 *mt9m001 = to_mt9m001(client); - struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_crop a = { .c = { .left = mt9m001->rect.left, .top = mt9m001->rect.top, - .width = pix->width, - .height = pix->height, + .width = mf->width, + .height = mf->height, }, }; int ret; @@ -323,28 +327,39 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) /* No support for scaling so far, just crop. TODO: use skipping */ ret = mt9m001_s_crop(sd, &a); if (!ret) { - pix->width = mt9m001->rect.width; - pix->height = mt9m001->rect.height; - mt9m001->fourcc = pix->pixelformat; + mf->width = mt9m001->rect.width; + mf->height = mt9m001->rect.height; + mt9m001->fmt = mt9m001_find_datafmt(mf->code, + mt9m001->fmts, mt9m001->num_fmts); + mf->colorspace = mt9m001->fmt->colorspace; } return ret; } -static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9m001_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9m001 *mt9m001 = to_mt9m001(client); - struct v4l2_pix_format *pix = &f->fmt.pix; + const struct mt9m001_datafmt *fmt; - v4l_bound_align_image(&pix->width, MT9M001_MIN_WIDTH, + v4l_bound_align_image(&mf->width, MT9M001_MIN_WIDTH, MT9M001_MAX_WIDTH, 1, - &pix->height, MT9M001_MIN_HEIGHT + mt9m001->y_skip_top, + &mf->height, MT9M001_MIN_HEIGHT + mt9m001->y_skip_top, MT9M001_MAX_HEIGHT + mt9m001->y_skip_top, 0, 0); - if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || - pix->pixelformat == V4L2_PIX_FMT_SBGGR16) - pix->height = ALIGN(pix->height - 1, 2); + if (mt9m001->fmts == mt9m001_colour_fmts) + mf->height = ALIGN(mf->height - 1, 2); + + fmt = mt9m001_find_datafmt(mf->code, mt9m001->fmts, + mt9m001->num_fmts); + if (!fmt) { + fmt = mt9m001->fmt; + mf->code = fmt->code; + } + + mf->colorspace = fmt->colorspace; return 0; } @@ -608,11 +623,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, case 0x8411: case 0x8421: mt9m001->model = V4L2_IDENT_MT9M001C12ST; - icd->formats = mt9m001_colour_formats; + mt9m001->fmts = mt9m001_colour_fmts; break; case 0x8431: mt9m001->model = V4L2_IDENT_MT9M001C12STM; - icd->formats = mt9m001_monochrome_formats; + mt9m001->fmts = mt9m001_monochrome_fmts; break; default: dev_err(&client->dev, @@ -620,7 +635,7 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, return -ENODEV; } - icd->num_formats = 0; + mt9m001->num_fmts = 0; /* * This is a 10bit sensor, so by default we only allow 10bit. @@ -633,14 +648,14 @@ static int mt9m001_video_probe(struct soc_camera_device *icd, flags = SOCAM_DATAWIDTH_10; if (flags & SOCAM_DATAWIDTH_10) - icd->num_formats++; + mt9m001->num_fmts++; else - icd->formats++; + mt9m001->fmts++; if (flags & SOCAM_DATAWIDTH_8) - icd->num_formats++; + mt9m001->num_fmts++; - mt9m001->fourcc = icd->formats->fourcc; + mt9m001->fmt = &mt9m001->fmts[0]; dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data, data == 0x8431 ? "C12STM" : "C12ST"); @@ -686,14 +701,28 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { #endif }; +static int mt9m001_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + struct i2c_client *client = sd->priv; + struct mt9m001 *mt9m001 = to_mt9m001(client); + + if ((unsigned int)index >= mt9m001->num_fmts) + return -EINVAL; + + *code = mt9m001->fmts[index].code; + return 0; +} + static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { .s_stream = mt9m001_s_stream, - .s_fmt = mt9m001_s_fmt, - .g_fmt = mt9m001_g_fmt, - .try_fmt = mt9m001_try_fmt, + .s_mbus_fmt = mt9m001_s_fmt, + .g_mbus_fmt = mt9m001_g_fmt, + .try_mbus_fmt = mt9m001_try_fmt, .s_crop = mt9m001_s_crop, .g_crop = mt9m001_g_crop, .cropcap = mt9m001_cropcap, + .enum_mbus_fmt = mt9m001_enum_fmt, }; static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = { diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 30db625455e4..d35f536f9fc3 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -123,23 +123,34 @@ #define MT9M111_MAX_HEIGHT 1024 #define MT9M111_MAX_WIDTH 1280 -#define COL_FMT(_name, _depth, _fourcc, _colorspace) \ - { .name = _name, .depth = _depth, .fourcc = _fourcc, \ - .colorspace = _colorspace } -#define RGB_FMT(_name, _depth, _fourcc) \ - COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_SRGB) -#define JPG_FMT(_name, _depth, _fourcc) \ - COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_JPEG) - -static const struct soc_camera_data_format mt9m111_colour_formats[] = { - JPG_FMT("CbYCrY 16 bit", 16, V4L2_PIX_FMT_UYVY), - JPG_FMT("CrYCbY 16 bit", 16, V4L2_PIX_FMT_VYUY), - JPG_FMT("YCbYCr 16 bit", 16, V4L2_PIX_FMT_YUYV), - JPG_FMT("YCrYCb 16 bit", 16, V4L2_PIX_FMT_YVYU), - RGB_FMT("RGB 565", 16, V4L2_PIX_FMT_RGB565), - RGB_FMT("RGB 555", 16, V4L2_PIX_FMT_RGB555), - RGB_FMT("Bayer (sRGB) 10 bit", 10, V4L2_PIX_FMT_SBGGR16), - RGB_FMT("Bayer (sRGB) 8 bit", 8, V4L2_PIX_FMT_SBGGR8), +/* MT9M111 has only one fixed colorspace per pixelcode */ +struct mt9m111_datafmt { + enum v4l2_mbus_pixelcode code; + enum v4l2_colorspace colorspace; +}; + +/* Find a data format by a pixel code in an array */ +static const struct mt9m111_datafmt *mt9m111_find_datafmt( + enum v4l2_mbus_pixelcode code, const struct mt9m111_datafmt *fmt, + int n) +{ + int i; + for (i = 0; i < n; i++) + if (fmt[i].code == code) + return fmt + i; + + return NULL; +} + +static const struct mt9m111_datafmt mt9m111_colour_fmts[] = { + {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YUYV8_2X8_BE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YVYU8_2X8_BE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, }; enum mt9m111_context { @@ -152,7 +163,7 @@ struct mt9m111 { int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */ enum mt9m111_context context; struct v4l2_rect rect; - u32 pixfmt; + const struct mt9m111_datafmt *fmt; unsigned int gain; unsigned char autoexposure; unsigned char datawidth; @@ -258,8 +269,8 @@ static int mt9m111_setup_rect(struct i2c_client *client, int width = rect->width; int height = rect->height; - if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || - mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) + if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 || + mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) is_raw_format = 1; else is_raw_format = 0; @@ -307,7 +318,8 @@ static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt) static int mt9m111_setfmt_bayer8(struct i2c_client *client) { - return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER); + return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER | + MT9M111_OUTFMT_RGB); } static int mt9m111_setfmt_bayer10(struct i2c_client *client) @@ -401,8 +413,8 @@ static int mt9m111_make_rect(struct i2c_client *client, { struct mt9m111 *mt9m111 = to_mt9m111(client); - if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 || - mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) { + if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 || + mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) { /* Bayer format - even size lengths */ rect->width = ALIGN(rect->width, 2); rect->height = ALIGN(rect->height, 2); @@ -460,120 +472,139 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } -static int mt9m111_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9m111_g_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9m111 *mt9m111 = to_mt9m111(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - pix->width = mt9m111->rect.width; - pix->height = mt9m111->rect.height; - pix->pixelformat = mt9m111->pixfmt; - pix->field = V4L2_FIELD_NONE; - pix->colorspace = V4L2_COLORSPACE_SRGB; + mf->width = mt9m111->rect.width; + mf->height = mt9m111->rect.height; + mf->code = mt9m111->fmt->code; + mf->field = V4L2_FIELD_NONE; return 0; } -static int mt9m111_set_pixfmt(struct i2c_client *client, u32 pixfmt) +static int mt9m111_set_pixfmt(struct i2c_client *client, + enum v4l2_mbus_pixelcode code) { struct mt9m111 *mt9m111 = to_mt9m111(client); int ret; - switch (pixfmt) { - case V4L2_PIX_FMT_SBGGR8: + switch (code) { + case V4L2_MBUS_FMT_SBGGR8_1X8: ret = mt9m111_setfmt_bayer8(client); break; - case V4L2_PIX_FMT_SBGGR16: + case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: ret = mt9m111_setfmt_bayer10(client); break; - case V4L2_PIX_FMT_RGB555: + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: ret = mt9m111_setfmt_rgb555(client); break; - case V4L2_PIX_FMT_RGB565: + case V4L2_MBUS_FMT_RGB565_2X8_LE: ret = mt9m111_setfmt_rgb565(client); break; - case V4L2_PIX_FMT_UYVY: + case V4L2_MBUS_FMT_YUYV8_2X8_BE: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 0; ret = mt9m111_setfmt_yuv(client); break; - case V4L2_PIX_FMT_VYUY: + case V4L2_MBUS_FMT_YVYU8_2X8_BE: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 1; ret = mt9m111_setfmt_yuv(client); break; - case V4L2_PIX_FMT_YUYV: + case V4L2_MBUS_FMT_YUYV8_2X8_LE: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 0; ret = mt9m111_setfmt_yuv(client); break; - case V4L2_PIX_FMT_YVYU: + case V4L2_MBUS_FMT_YVYU8_2X8_LE: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 1; ret = mt9m111_setfmt_yuv(client); break; default: dev_err(&client->dev, "Pixel format not handled : %x\n", - pixfmt); + code); ret = -EINVAL; } - if (!ret) - mt9m111->pixfmt = pixfmt; - return ret; } -static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9m111_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; + const struct mt9m111_datafmt *fmt; struct mt9m111 *mt9m111 = to_mt9m111(client); - struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_rect rect = { .left = mt9m111->rect.left, .top = mt9m111->rect.top, - .width = pix->width, - .height = pix->height, + .width = mf->width, + .height = mf->height, }; int ret; + fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts, + ARRAY_SIZE(mt9m111_colour_fmts)); + if (!fmt) + return -EINVAL; + dev_dbg(&client->dev, - "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", __func__, - pix->pixelformat, rect.left, rect.top, rect.width, rect.height); + "%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__, + mf->code, rect.left, rect.top, rect.width, rect.height); ret = mt9m111_make_rect(client, &rect); if (!ret) - ret = mt9m111_set_pixfmt(client, pix->pixelformat); - if (!ret) - mt9m111->rect = rect; + ret = mt9m111_set_pixfmt(client, mf->code); + if (!ret) { + mt9m111->rect = rect; + mt9m111->fmt = fmt; + mf->colorspace = fmt->colorspace; + } + return ret; } -static int mt9m111_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9m111_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { - struct v4l2_pix_format *pix = &f->fmt.pix; - bool bayer = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || - pix->pixelformat == V4L2_PIX_FMT_SBGGR16; + struct i2c_client *client = sd->priv; + struct mt9m111 *mt9m111 = to_mt9m111(client); + const struct mt9m111_datafmt *fmt; + bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 || + mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE; + + fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts, + ARRAY_SIZE(mt9m111_colour_fmts)); + if (!fmt) { + fmt = mt9m111->fmt; + mf->code = fmt->code; + } /* * With Bayer format enforce even side lengths, but let the user play * with the starting pixel */ - if (pix->height > MT9M111_MAX_HEIGHT) - pix->height = MT9M111_MAX_HEIGHT; - else if (pix->height < 2) - pix->height = 2; + if (mf->height > MT9M111_MAX_HEIGHT) + mf->height = MT9M111_MAX_HEIGHT; + else if (mf->height < 2) + mf->height = 2; else if (bayer) - pix->height = ALIGN(pix->height, 2); + mf->height = ALIGN(mf->height, 2); - if (pix->width > MT9M111_MAX_WIDTH) - pix->width = MT9M111_MAX_WIDTH; - else if (pix->width < 2) - pix->width = 2; + if (mf->width > MT9M111_MAX_WIDTH) + mf->width = MT9M111_MAX_WIDTH; + else if (mf->width < 2) + mf->width = 2; else if (bayer) - pix->width = ALIGN(pix->width, 2); + mf->width = ALIGN(mf->width, 2); + + mf->colorspace = fmt->colorspace; return 0; } @@ -863,7 +894,7 @@ static int mt9m111_restore_state(struct i2c_client *client) struct mt9m111 *mt9m111 = to_mt9m111(client); mt9m111_set_context(client, mt9m111->context); - mt9m111_set_pixfmt(client, mt9m111->pixfmt); + mt9m111_set_pixfmt(client, mt9m111->fmt->code); mt9m111_setup_rect(client, &mt9m111->rect); mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS); mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS); @@ -952,9 +983,6 @@ static int mt9m111_video_probe(struct soc_camera_device *icd, goto ei2c; } - icd->formats = mt9m111_colour_formats; - icd->num_formats = ARRAY_SIZE(mt9m111_colour_formats); - dev_info(&client->dev, "Detected a MT9M11x chip ID %x\n", data); ei2c: @@ -971,13 +999,24 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { #endif }; +static int mt9m111_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + if ((unsigned int)index >= ARRAY_SIZE(mt9m111_colour_fmts)) + return -EINVAL; + + *code = mt9m111_colour_fmts[index].code; + return 0; +} + static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { - .s_fmt = mt9m111_s_fmt, - .g_fmt = mt9m111_g_fmt, - .try_fmt = mt9m111_try_fmt, + .s_mbus_fmt = mt9m111_s_fmt, + .g_mbus_fmt = mt9m111_g_fmt, + .try_mbus_fmt = mt9m111_try_fmt, .s_crop = mt9m111_s_crop, .g_crop = mt9m111_g_crop, .cropcap = mt9m111_cropcap, + .enum_mbus_fmt = mt9m111_enum_fmt, }; static struct v4l2_subdev_ops mt9m111_subdev_ops = { @@ -1024,6 +1063,7 @@ static int mt9m111_probe(struct i2c_client *client, mt9m111->rect.top = MT9M111_MIN_DARK_ROWS; mt9m111->rect.width = MT9M111_MAX_WIDTH; mt9m111->rect.height = MT9M111_MAX_HEIGHT; + mt9m111->fmt = &mt9m111_colour_fmts[0]; ret = mt9m111_video_probe(icd, client); if (ret) { diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index e3f664f21c48..69c227f65bcb 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -60,15 +60,6 @@ SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | \ SOCAM_MASTER | SOCAM_DATAWIDTH_10) -static const struct soc_camera_data_format mt9t031_colour_formats[] = { - { - .name = "Bayer (sRGB) 10 bit", - .depth = 10, - .fourcc = V4L2_PIX_FMT_SGRBG10, - .colorspace = V4L2_COLORSPACE_SRGB, - } -}; - struct mt9t031 { struct v4l2_subdev subdev; struct v4l2_rect rect; /* Sensor window */ @@ -378,27 +369,27 @@ static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } -static int mt9t031_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9t031_g_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9t031 *mt9t031 = to_mt9t031(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - pix->width = mt9t031->rect.width / mt9t031->xskip; - pix->height = mt9t031->rect.height / mt9t031->yskip; - pix->pixelformat = V4L2_PIX_FMT_SGRBG10; - pix->field = V4L2_FIELD_NONE; - pix->colorspace = V4L2_COLORSPACE_SRGB; + mf->width = mt9t031->rect.width / mt9t031->xskip; + mf->height = mt9t031->rect.height / mt9t031->yskip; + mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; + mf->colorspace = V4L2_COLORSPACE_SRGB; + mf->field = V4L2_FIELD_NONE; return 0; } -static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9t031_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9t031 *mt9t031 = to_mt9t031(client); struct soc_camera_device *icd = client->dev.platform_data; - struct v4l2_pix_format *pix = &f->fmt.pix; u16 xskip, yskip; struct v4l2_rect rect = mt9t031->rect; @@ -406,8 +397,11 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) * try_fmt has put width and height within limits. * S_FMT: use binning and skipping for scaling */ - xskip = mt9t031_skip(&rect.width, pix->width, MT9T031_MAX_WIDTH); - yskip = mt9t031_skip(&rect.height, pix->height, MT9T031_MAX_HEIGHT); + xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH); + yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT); + + mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; + mf->colorspace = V4L2_COLORSPACE_SRGB; /* mt9t031_set_params() doesn't change width and height */ return mt9t031_set_params(icd, &rect, xskip, yskip); @@ -417,13 +411,15 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) * If a user window larger than sensor window is requested, we'll increase the * sensor window. */ -static int mt9t031_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9t031_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { - struct v4l2_pix_format *pix = &f->fmt.pix; - v4l_bound_align_image( - &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1, - &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0); + &mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1, + &mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0); + + mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; + mf->colorspace = V4L2_COLORSPACE_SRGB; return 0; } @@ -684,7 +680,6 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) */ static int mt9t031_video_probe(struct i2c_client *client) { - struct soc_camera_device *icd = client->dev.platform_data; struct mt9t031 *mt9t031 = to_mt9t031(client); s32 data; int ret; @@ -699,8 +694,6 @@ static int mt9t031_video_probe(struct i2c_client *client) switch (data) { case 0x1621: mt9t031->model = V4L2_IDENT_MT9T031; - icd->formats = mt9t031_colour_formats; - icd->num_formats = ARRAY_SIZE(mt9t031_colour_formats); break; default: dev_err(&client->dev, @@ -741,14 +734,25 @@ static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { #endif }; +static int mt9t031_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + if (index) + return -EINVAL; + + *code = V4L2_MBUS_FMT_SBGGR10_1X10; + return 0; +} + static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { .s_stream = mt9t031_s_stream, - .s_fmt = mt9t031_s_fmt, - .g_fmt = mt9t031_g_fmt, - .try_fmt = mt9t031_try_fmt, + .s_mbus_fmt = mt9t031_s_fmt, + .g_mbus_fmt = mt9t031_g_fmt, + .try_mbus_fmt = mt9t031_try_fmt, .s_crop = mt9t031_s_crop, .g_crop = mt9t031_g_crop, .cropcap = mt9t031_cropcap, + .enum_mbus_fmt = mt9t031_enum_fmt, }; static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = { diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index f60a9a107f20..91df7ec91fb6 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -64,41 +64,46 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\""); #define MT9V022_COLUMN_SKIP 1 #define MT9V022_ROW_SKIP 4 -static const struct soc_camera_data_format mt9v022_colour_formats[] = { +/* MT9V022 has only one fixed colorspace per pixelcode */ +struct mt9v022_datafmt { + enum v4l2_mbus_pixelcode code; + enum v4l2_colorspace colorspace; +}; + +/* Find a data format by a pixel code in an array */ +static const struct mt9v022_datafmt *mt9v022_find_datafmt( + enum v4l2_mbus_pixelcode code, const struct mt9v022_datafmt *fmt, + int n) +{ + int i; + for (i = 0; i < n; i++) + if (fmt[i].code == code) + return fmt + i; + + return NULL; +} + +static const struct mt9v022_datafmt mt9v022_colour_fmts[] = { /* * Order important: first natively supported, * second supported with a GPIO extender */ - { - .name = "Bayer (sRGB) 10 bit", - .depth = 10, - .fourcc = V4L2_PIX_FMT_SBGGR16, - .colorspace = V4L2_COLORSPACE_SRGB, - }, { - .name = "Bayer (sRGB) 8 bit", - .depth = 8, - .fourcc = V4L2_PIX_FMT_SBGGR8, - .colorspace = V4L2_COLORSPACE_SRGB, - } + {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, }; -static const struct soc_camera_data_format mt9v022_monochrome_formats[] = { +static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = { /* Order important - see above */ - { - .name = "Monochrome 10 bit", - .depth = 10, - .fourcc = V4L2_PIX_FMT_Y16, - }, { - .name = "Monochrome 8 bit", - .depth = 8, - .fourcc = V4L2_PIX_FMT_GREY, - }, + {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_GREY8_1X8, V4L2_COLORSPACE_JPEG}, }; struct mt9v022 { struct v4l2_subdev subdev; struct v4l2_rect rect; /* Sensor window */ - __u32 fourcc; + const struct mt9v022_datafmt *fmt; + const struct mt9v022_datafmt *fmts; + int num_fmts; int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */ u16 chip_control; unsigned short y_skip_top; /* Lines to skip at the top */ @@ -275,8 +280,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) int ret; /* Bayer format - even size lengths */ - if (mt9v022->fourcc == V4L2_PIX_FMT_SBGGR8 || - mt9v022->fourcc == V4L2_PIX_FMT_SBGGR16) { + if (mt9v022->fmts == mt9v022_colour_fmts) { rect.width = ALIGN(rect.width, 2); rect.height = ALIGN(rect.height, 2); /* Let the user play with the starting pixel */ @@ -354,32 +358,32 @@ static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } -static int mt9v022_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9v022_g_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9v022 *mt9v022 = to_mt9v022(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - pix->width = mt9v022->rect.width; - pix->height = mt9v022->rect.height; - pix->pixelformat = mt9v022->fourcc; - pix->field = V4L2_FIELD_NONE; - pix->colorspace = V4L2_COLORSPACE_SRGB; + mf->width = mt9v022->rect.width; + mf->height = mt9v022->rect.height; + mf->code = mt9v022->fmt->code; + mf->colorspace = mt9v022->fmt->colorspace; + mf->field = V4L2_FIELD_NONE; return 0; } -static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9v022_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9v022 *mt9v022 = to_mt9v022(client); - struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_crop a = { .c = { .left = mt9v022->rect.left, .top = mt9v022->rect.top, - .width = pix->width, - .height = pix->height, + .width = mf->width, + .height = mf->height, }, }; int ret; @@ -388,14 +392,14 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) * The caller provides a supported format, as verified per call to * icd->try_fmt(), datawidth is from our supported format list */ - switch (pix->pixelformat) { - case V4L2_PIX_FMT_GREY: - case V4L2_PIX_FMT_Y16: + switch (mf->code) { + case V4L2_MBUS_FMT_GREY8_1X8: + case V4L2_MBUS_FMT_Y10_1X10: if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATM) return -EINVAL; break; - case V4L2_PIX_FMT_SBGGR8: - case V4L2_PIX_FMT_SBGGR16: + case V4L2_MBUS_FMT_SBGGR8_1X8: + case V4L2_MBUS_FMT_SBGGR10_1X10: if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC) return -EINVAL; break; @@ -409,27 +413,39 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) /* No support for scaling on this camera, just crop. */ ret = mt9v022_s_crop(sd, &a); if (!ret) { - pix->width = mt9v022->rect.width; - pix->height = mt9v022->rect.height; - mt9v022->fourcc = pix->pixelformat; + mf->width = mt9v022->rect.width; + mf->height = mt9v022->rect.height; + mt9v022->fmt = mt9v022_find_datafmt(mf->code, + mt9v022->fmts, mt9v022->num_fmts); + mf->colorspace = mt9v022->fmt->colorspace; } return ret; } -static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int mt9v022_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct mt9v022 *mt9v022 = to_mt9v022(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - int align = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 || - pix->pixelformat == V4L2_PIX_FMT_SBGGR16; + const struct mt9v022_datafmt *fmt; + int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 || + mf->code == V4L2_MBUS_FMT_SBGGR10_1X10; - v4l_bound_align_image(&pix->width, MT9V022_MIN_WIDTH, + v4l_bound_align_image(&mf->width, MT9V022_MIN_WIDTH, MT9V022_MAX_WIDTH, align, - &pix->height, MT9V022_MIN_HEIGHT + mt9v022->y_skip_top, + &mf->height, MT9V022_MIN_HEIGHT + mt9v022->y_skip_top, MT9V022_MAX_HEIGHT + mt9v022->y_skip_top, align, 0); + fmt = mt9v022_find_datafmt(mf->code, mt9v022->fmts, + mt9v022->num_fmts); + if (!fmt) { + fmt = mt9v022->fmt; + mf->code = fmt->code; + } + + mf->colorspace = fmt->colorspace; + return 0; } @@ -749,17 +765,17 @@ static int mt9v022_video_probe(struct soc_camera_device *icd, !strcmp("color", sensor_type))) { ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11); mt9v022->model = V4L2_IDENT_MT9V022IX7ATC; - icd->formats = mt9v022_colour_formats; + mt9v022->fmts = mt9v022_colour_fmts; } else { ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 0x11); mt9v022->model = V4L2_IDENT_MT9V022IX7ATM; - icd->formats = mt9v022_monochrome_formats; + mt9v022->fmts = mt9v022_monochrome_fmts; } if (ret < 0) goto ei2c; - icd->num_formats = 0; + mt9v022->num_fmts = 0; /* * This is a 10bit sensor, so by default we only allow 10bit. @@ -772,14 +788,14 @@ static int mt9v022_video_probe(struct soc_camera_device *icd, flags = SOCAM_DATAWIDTH_10; if (flags & SOCAM_DATAWIDTH_10) - icd->num_formats++; + mt9v022->num_fmts++; else - icd->formats++; + mt9v022->fmts++; if (flags & SOCAM_DATAWIDTH_8) - icd->num_formats++; + mt9v022->num_fmts++; - mt9v022->fourcc = icd->formats->fourcc; + mt9v022->fmt = &mt9v022->fmts[0]; dev_info(&client->dev, "Detected a MT9V022 chip ID %x, %s sensor\n", data, mt9v022->model == V4L2_IDENT_MT9V022IX7ATM ? @@ -823,14 +839,28 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = { #endif }; +static int mt9v022_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + struct i2c_client *client = sd->priv; + struct mt9v022 *mt9v022 = to_mt9v022(client); + + if ((unsigned int)index >= mt9v022->num_fmts) + return -EINVAL; + + *code = mt9v022->fmts[index].code; + return 0; +} + static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { .s_stream = mt9v022_s_stream, - .s_fmt = mt9v022_s_fmt, - .g_fmt = mt9v022_g_fmt, - .try_fmt = mt9v022_try_fmt, + .s_mbus_fmt = mt9v022_s_fmt, + .g_mbus_fmt = mt9v022_g_fmt, + .try_mbus_fmt = mt9v022_try_fmt, .s_crop = mt9v022_s_crop, .g_crop = mt9v022_g_crop, .cropcap = mt9v022_cropcap, + .enum_mbus_fmt = mt9v022_enum_fmt, }; static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = { diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index 4c1a439373c5..2ba14fb5b031 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -94,9 +95,9 @@ /* buffer for one video frame */ struct mx1_buffer { /* common v4l buffer stuff -- must be first */ - struct videobuf_buffer vb; - const struct soc_camera_data_format *fmt; - int inwork; + struct videobuf_buffer vb; + enum v4l2_mbus_pixelcode code; + int inwork; }; /* @@ -128,9 +129,13 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct soc_camera_device *icd = vq->priv_data; + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); - *size = icd->user_width * icd->user_height * - ((icd->current_fmt->depth + 7) >> 3); + if (bytes_per_line < 0) + return bytes_per_line; + + *size = bytes_per_line * icd->user_height; if (!*count) *count = 32; @@ -169,6 +174,11 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq, struct soc_camera_device *icd = vq->priv_data; struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb); int ret; + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); + + if (bytes_per_line < 0) + return bytes_per_line; dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); @@ -184,18 +194,18 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq, */ buf->inwork = 1; - if (buf->fmt != icd->current_fmt || + if (buf->code != icd->current_fmt->code || vb->width != icd->user_width || vb->height != icd->user_height || vb->field != field) { - buf->fmt = icd->current_fmt; + buf->code = icd->current_fmt->code; vb->width = icd->user_width; vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } - vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3); + vb->size = bytes_per_line * vb->height; if (0 != vb->baddr && vb->bsize < vb->size) { ret = -EINVAL; goto out; @@ -497,12 +507,10 @@ static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) /* MX1 supports only 8bit buswidth */ common_flags = soc_camera_bus_param_compatible(camera_flags, - CSI_BUS_FLAGS); + CSI_BUS_FLAGS); if (!common_flags) return -EINVAL; - icd->buswidth = 8; - /* Make choises, based on platform choice */ if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) && (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) { @@ -555,7 +563,8 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; - int ret; + struct v4l2_mbus_framefmt mf; + int ret, buswidth; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { @@ -564,12 +573,33 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd, return -EINVAL; } - ret = v4l2_subdev_call(sd, video, s_fmt, f); - if (!ret) { - icd->buswidth = xlate->buswidth; - icd->current_fmt = xlate->host_fmt; + buswidth = xlate->host_fmt->bits_per_sample; + if (buswidth > 8) { + dev_warn(icd->dev.parent, + "bits-per-sample %d for format %x unsupported\n", + buswidth, pix->pixelformat); + return -EINVAL; } + mf.width = pix->width; + mf.height = pix->height; + mf.field = pix->field; + mf.colorspace = pix->colorspace; + mf.code = xlate->code; + + ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); + if (ret < 0) + return ret; + + if (mf.code != xlate->code) + return -EINVAL; + + pix->width = mf.width; + pix->height = mf.height; + pix->field = mf.field; + pix->colorspace = mf.colorspace; + icd->current_fmt = xlate; + return ret; } @@ -577,10 +607,36 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + const struct soc_camera_format_xlate *xlate; + struct v4l2_pix_format *pix = &f->fmt.pix; + struct v4l2_mbus_framefmt mf; + int ret; /* TODO: limit to mx1 hardware capabilities */ + xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); + if (!xlate) { + dev_warn(icd->dev.parent, "Format %x not found\n", + pix->pixelformat); + return -EINVAL; + } + + mf.width = pix->width; + mf.height = pix->height; + mf.field = pix->field; + mf.colorspace = pix->colorspace; + mf.code = xlate->code; + /* limit to sensor capabilities */ - return v4l2_subdev_call(sd, video, try_fmt, f); + ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); + if (ret < 0) + return ret; + + pix->width = mf.width; + pix->height = mf.height; + pix->field = mf.field; + pix->colorspace = mf.colorspace; + + return 0; } static int mx1_camera_reqbufs(struct soc_camera_file *icf, diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index ae7d48324493..bd297f567dc7 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -63,7 +64,7 @@ struct mx3_camera_buffer { /* common v4l buffer stuff -- must be first */ struct videobuf_buffer vb; - const struct soc_camera_data_format *fmt; + enum v4l2_mbus_pixelcode code; /* One descriptot per scatterlist (per frame) */ struct dma_async_tx_descriptor *txd; @@ -118,8 +119,6 @@ struct dma_chan_request { enum ipu_channel id; }; -static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt); - static u32 csi_reg_read(struct mx3_camera_dev *mx3, off_t reg) { return __raw_readl(mx3->base + reg); @@ -211,17 +210,16 @@ static int mx3_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; - /* - * bits-per-pixel (depth) as specified in camera's pixel format does - * not necessarily match what the camera interface writes to RAM, but - * it should be good enough for now. - */ - unsigned int bpp = DIV_ROUND_UP(icd->current_fmt->depth, 8); + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); + + if (bytes_per_line < 0) + return bytes_per_line; if (!mx3_cam->idmac_channel[0]) return -EINVAL; - *size = icd->user_width * icd->user_height * bpp; + *size = bytes_per_line * icd->user_height; if (!*count) *count = 32; @@ -241,21 +239,26 @@ static int mx3_videobuf_prepare(struct videobuf_queue *vq, struct mx3_camera_dev *mx3_cam = ici->priv; struct mx3_camera_buffer *buf = container_of(vb, struct mx3_camera_buffer, vb); - /* current_fmt _must_ always be set */ - size_t new_size = icd->user_width * icd->user_height * - ((icd->current_fmt->depth + 7) >> 3); + size_t new_size; int ret; + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); + + if (bytes_per_line < 0) + return bytes_per_line; + + new_size = bytes_per_line * icd->user_height; /* * I think, in buf_prepare you only have to protect global data, * the actual buffer is yours */ - if (buf->fmt != icd->current_fmt || + if (buf->code != icd->current_fmt->code || vb->width != icd->user_width || vb->height != icd->user_height || vb->field != field) { - buf->fmt = icd->current_fmt; + buf->code = icd->current_fmt->code; vb->width = icd->user_width; vb->height = icd->user_height; vb->field = field; @@ -348,13 +351,13 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq, struct dma_async_tx_descriptor *txd = buf->txd; struct idmac_channel *ichan = to_idmac_chan(txd->chan); struct idmac_video_param *video = &ichan->params.video; - const struct soc_camera_data_format *data_fmt = icd->current_fmt; dma_cookie_t cookie; + u32 fourcc = icd->current_fmt->host_fmt->fourcc; BUG_ON(!irqs_disabled()); /* This is the configuration of one sg-element */ - video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc); + video->out_pixel_fmt = fourcc_to_ipu_pix(fourcc); video->out_width = icd->user_width; video->out_height = icd->user_height; video->out_stride = icd->user_width; @@ -568,28 +571,33 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam, * If requested data width is supported by the platform, use it or any * possible lower value - i.MX31 is smart enough to schift bits */ + if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15) + *flags |= SOCAM_DATAWIDTH_15 | SOCAM_DATAWIDTH_10 | + SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4; + else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10) + *flags |= SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8 | + SOCAM_DATAWIDTH_4; + else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8) + *flags |= SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4; + else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4) + *flags |= SOCAM_DATAWIDTH_4; + switch (buswidth) { case 15: - if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)) + if (!(*flags & SOCAM_DATAWIDTH_15)) return -EINVAL; - *flags |= SOCAM_DATAWIDTH_15 | SOCAM_DATAWIDTH_10 | - SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4; break; case 10: - if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)) + if (!(*flags & SOCAM_DATAWIDTH_10)) return -EINVAL; - *flags |= SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8 | - SOCAM_DATAWIDTH_4; break; case 8: - if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)) + if (!(*flags & SOCAM_DATAWIDTH_8)) return -EINVAL; - *flags |= SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4; break; case 4: - if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)) + if (!(*flags & SOCAM_DATAWIDTH_4)) return -EINVAL; - *flags |= SOCAM_DATAWIDTH_4; break; default: dev_warn(mx3_cam->soc_host.v4l2_dev.dev, @@ -638,91 +646,92 @@ static bool chan_filter(struct dma_chan *chan, void *arg) pdata->dma_dev == chan->device->dev; } -static const struct soc_camera_data_format mx3_camera_formats[] = { +static const struct soc_mbus_pixelfmt mx3_camera_formats[] = { { - .name = "Bayer (sRGB) 8 bit", - .depth = 8, - .fourcc = V4L2_PIX_FMT_SBGGR8, - .colorspace = V4L2_COLORSPACE_SRGB, + .fourcc = V4L2_PIX_FMT_SBGGR8, + .name = "Bayer BGGR (sRGB) 8 bit", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, }, { - .name = "Monochrome 8 bit", - .depth = 8, - .fourcc = V4L2_PIX_FMT_GREY, - .colorspace = V4L2_COLORSPACE_JPEG, + .fourcc = V4L2_PIX_FMT_GREY, + .name = "Monochrome 8 bit", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, }, }; -static bool buswidth_supported(struct soc_camera_host *ici, int depth) +/* This will be corrected as we get more formats */ +static bool mx3_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt) { - struct mx3_camera_dev *mx3_cam = ici->priv; - - switch (depth) { - case 4: - return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4); - case 8: - return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8); - case 10: - return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10); - case 15: - return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15); - } - return false; + return fmt->packing == SOC_MBUS_PACKING_NONE || + (fmt->bits_per_sample == 8 && + fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || + (fmt->bits_per_sample > 8 && + fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx, struct soc_camera_format_xlate *xlate) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - int formats = 0, buswidth, ret; + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct device *dev = icd->dev.parent; + int formats = 0, ret; + enum v4l2_mbus_pixelcode code; + const struct soc_mbus_pixelfmt *fmt; - buswidth = icd->formats[idx].depth; + ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); + if (ret < 0) + /* No more formats */ + return 0; - if (!buswidth_supported(ici, buswidth)) + fmt = soc_mbus_get_fmtdesc(code); + if (!fmt) { + dev_err(icd->dev.parent, + "Invalid format code #%d: %d\n", idx, code); return 0; + } - ret = mx3_camera_try_bus_param(icd, buswidth); + /* This also checks support for the requested bits-per-sample */ + ret = mx3_camera_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; - switch (icd->formats[idx].fourcc) { - case V4L2_PIX_FMT_SGRBG10: + switch (code) { + case V4L2_MBUS_FMT_SBGGR10_1X10: formats++; if (xlate) { - xlate->host_fmt = &mx3_camera_formats[0]; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = buswidth; + xlate->host_fmt = &mx3_camera_formats[0]; + xlate->code = code; xlate++; - dev_dbg(icd->dev.parent, - "Providing format %s using %s\n", - mx3_camera_formats[0].name, - icd->formats[idx].name); + dev_dbg(dev, "Providing format %s using code %d\n", + mx3_camera_formats[0].name, code); } - goto passthrough; - case V4L2_PIX_FMT_Y16: + break; + case V4L2_MBUS_FMT_Y10_1X10: formats++; if (xlate) { - xlate->host_fmt = &mx3_camera_formats[1]; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = buswidth; + xlate->host_fmt = &mx3_camera_formats[1]; + xlate->code = code; xlate++; - dev_dbg(icd->dev.parent, - "Providing format %s using %s\n", - mx3_camera_formats[0].name, - icd->formats[idx].name); + dev_dbg(dev, "Providing format %s using code %d\n", + mx3_camera_formats[1].name, code); } + break; default: -passthrough: - /* Generic pass-through */ - formats++; - if (xlate) { - xlate->host_fmt = icd->formats + idx; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = buswidth; - xlate++; - dev_dbg(icd->dev.parent, - "Providing format %s in pass-through mode\n", - icd->formats[idx].name); - } + if (!mx3_camera_packing_supported(fmt)) + return 0; + } + + /* Generic pass-through */ + formats++; + if (xlate) { + xlate->host_fmt = fmt; + xlate->code = code; + xlate++; + dev_dbg(dev, "Providing format %x in pass-through mode\n", + xlate->host_fmt->fourcc); } return formats; @@ -806,8 +815,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd, struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE}; - struct v4l2_pix_format *pix = &f.fmt.pix; + struct v4l2_mbus_framefmt mf; int ret; soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096); @@ -818,19 +826,19 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd, return ret; /* The capture device might have changed its output */ - ret = v4l2_subdev_call(sd, video, g_fmt, &f); + ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; - if (pix->width & 7) { + if (mf.width & 7) { /* Ouch! We can only handle 8-byte aligned width... */ - stride_align(&pix->width); - ret = v4l2_subdev_call(sd, video, s_fmt, &f); + stride_align(&mf.width); + ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; } - if (pix->width != icd->user_width || pix->height != icd->user_height) { + if (mf.width != icd->user_width || mf.height != icd->user_height) { /* * We now know pixel formats and can decide upon DMA-channel(s) * So far only direct camera-to-memory is supported @@ -841,14 +849,14 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd, return ret; } - configure_geometry(mx3_cam, pix->width, pix->height); + configure_geometry(mx3_cam, mf.width, mf.height); } dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n", - pix->width, pix->height); + mf.width, mf.height); - icd->user_width = pix->width; - icd->user_height = pix->height; + icd->user_width = mf.width; + icd->user_height = mf.height; return ret; } @@ -861,6 +869,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; + struct v4l2_mbus_framefmt mf; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); @@ -885,11 +894,24 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd, configure_geometry(mx3_cam, pix->width, pix->height); - ret = v4l2_subdev_call(sd, video, s_fmt, f); - if (!ret) { - icd->buswidth = xlate->buswidth; - icd->current_fmt = xlate->host_fmt; - } + mf.width = pix->width; + mf.height = pix->height; + mf.field = pix->field; + mf.colorspace = pix->colorspace; + mf.code = xlate->code; + + ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); + if (ret < 0) + return ret; + + if (mf.code != xlate->code) + return -EINVAL; + + pix->width = mf.width; + pix->height = mf.height; + pix->field = mf.field; + pix->colorspace = mf.colorspace; + icd->current_fmt = xlate; dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height); @@ -902,8 +924,8 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; + struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; - enum v4l2_field field; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); @@ -918,23 +940,37 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd, if (pix->width > 4096) pix->width = 4096; - pix->bytesperline = pix->width * - DIV_ROUND_UP(xlate->host_fmt->depth, 8); + pix->bytesperline = soc_mbus_bytes_per_line(pix->width, + xlate->host_fmt); + if (pix->bytesperline < 0) + return pix->bytesperline; pix->sizeimage = pix->height * pix->bytesperline; - /* camera has to see its format, but the user the original one */ - pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ - ret = v4l2_subdev_call(sd, video, try_fmt, f); - pix->pixelformat = xlate->host_fmt->fourcc; + mf.width = pix->width; + mf.height = pix->height; + mf.field = pix->field; + mf.colorspace = pix->colorspace; + mf.code = xlate->code; - field = pix->field; + ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); + if (ret < 0) + return ret; - if (field == V4L2_FIELD_ANY) { + pix->width = mf.width; + pix->height = mf.height; + pix->colorspace = mf.colorspace; + + switch (mf.field) { + case V4L2_FIELD_ANY: pix->field = V4L2_FIELD_NONE; - } else if (field != V4L2_FIELD_NONE) { - dev_err(icd->dev.parent, "Field type %d unsupported.\n", field); - return -EINVAL; + break; + case V4L2_FIELD_NONE: + break; + default: + dev_err(icd->dev.parent, "Field type %d unsupported.\n", + mf.field); + ret = -EINVAL; } return ret; @@ -970,18 +1006,26 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) struct mx3_camera_dev *mx3_cam = ici->priv; unsigned long bus_flags, camera_flags, common_flags; u32 dw, sens_conf; - int ret = test_platform_param(mx3_cam, icd->buswidth, &bus_flags); + const struct soc_mbus_pixelfmt *fmt; + int buswidth; + int ret; const struct soc_camera_format_xlate *xlate; struct device *dev = icd->dev.parent; + fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code); + if (!fmt) + return -EINVAL; + + buswidth = fmt->bits_per_sample; + ret = test_platform_param(mx3_cam, buswidth, &bus_flags); + xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } - dev_dbg(dev, "requested bus width %d bit: %d\n", - icd->buswidth, ret); + dev_dbg(dev, "requested bus width %d bit: %d\n", buswidth, ret); if (ret < 0) return ret; @@ -1082,7 +1126,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) sens_conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT; /* Just do what we're asked to do */ - switch (xlate->host_fmt->depth) { + switch (xlate->host_fmt->bits_per_sample) { case 4: dw = 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT; break; diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index dcb690cb5ae7..3a45e945a528 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* @@ -382,7 +383,8 @@ struct regval_list { }; struct ov772x_color_format { - const struct soc_camera_data_format *format; + enum v4l2_mbus_pixelcode code; + enum v4l2_colorspace colorspace; u8 dsp3; u8 com3; u8 com7; @@ -399,7 +401,7 @@ struct ov772x_win_size { struct ov772x_priv { struct v4l2_subdev subdev; struct ov772x_camera_info *info; - const struct ov772x_color_format *fmt; + const struct ov772x_color_format *cfmt; const struct ov772x_win_size *win; int model; unsigned short flag_vflip:1; @@ -434,93 +436,57 @@ static const struct regval_list ov772x_vga_regs[] = { }; /* - * supported format list - */ - -#define SETFOURCC(type) .name = (#type), .fourcc = (V4L2_PIX_FMT_ ## type) -static const struct soc_camera_data_format ov772x_fmt_lists[] = { - { - SETFOURCC(YUYV), - .depth = 16, - .colorspace = V4L2_COLORSPACE_JPEG, - }, - { - SETFOURCC(YVYU), - .depth = 16, - .colorspace = V4L2_COLORSPACE_JPEG, - }, - { - SETFOURCC(UYVY), - .depth = 16, - .colorspace = V4L2_COLORSPACE_JPEG, - }, - { - SETFOURCC(RGB555), - .depth = 16, - .colorspace = V4L2_COLORSPACE_SRGB, - }, - { - SETFOURCC(RGB555X), - .depth = 16, - .colorspace = V4L2_COLORSPACE_SRGB, - }, - { - SETFOURCC(RGB565), - .depth = 16, - .colorspace = V4L2_COLORSPACE_SRGB, - }, - { - SETFOURCC(RGB565X), - .depth = 16, - .colorspace = V4L2_COLORSPACE_SRGB, - }, -}; - -/* - * color format list + * supported color format list */ static const struct ov772x_color_format ov772x_cfmts[] = { { - .format = &ov772x_fmt_lists[0], - .dsp3 = 0x0, - .com3 = SWAP_YUV, - .com7 = OFMT_YUV, + .code = V4L2_MBUS_FMT_YUYV8_2X8_LE, + .colorspace = V4L2_COLORSPACE_JPEG, + .dsp3 = 0x0, + .com3 = SWAP_YUV, + .com7 = OFMT_YUV, }, { - .format = &ov772x_fmt_lists[1], - .dsp3 = UV_ON, - .com3 = SWAP_YUV, - .com7 = OFMT_YUV, + .code = V4L2_MBUS_FMT_YVYU8_2X8_LE, + .colorspace = V4L2_COLORSPACE_JPEG, + .dsp3 = UV_ON, + .com3 = SWAP_YUV, + .com7 = OFMT_YUV, }, { - .format = &ov772x_fmt_lists[2], - .dsp3 = 0x0, - .com3 = 0x0, - .com7 = OFMT_YUV, + .code = V4L2_MBUS_FMT_YUYV8_2X8_BE, + .colorspace = V4L2_COLORSPACE_JPEG, + .dsp3 = 0x0, + .com3 = 0x0, + .com7 = OFMT_YUV, }, { - .format = &ov772x_fmt_lists[3], - .dsp3 = 0x0, - .com3 = SWAP_RGB, - .com7 = FMT_RGB555 | OFMT_RGB, + .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, + .colorspace = V4L2_COLORSPACE_SRGB, + .dsp3 = 0x0, + .com3 = SWAP_RGB, + .com7 = FMT_RGB555 | OFMT_RGB, }, { - .format = &ov772x_fmt_lists[4], - .dsp3 = 0x0, - .com3 = 0x0, - .com7 = FMT_RGB555 | OFMT_RGB, + .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, + .colorspace = V4L2_COLORSPACE_SRGB, + .dsp3 = 0x0, + .com3 = 0x0, + .com7 = FMT_RGB555 | OFMT_RGB, }, { - .format = &ov772x_fmt_lists[5], - .dsp3 = 0x0, - .com3 = SWAP_RGB, - .com7 = FMT_RGB565 | OFMT_RGB, + .code = V4L2_MBUS_FMT_RGB565_2X8_LE, + .colorspace = V4L2_COLORSPACE_SRGB, + .dsp3 = 0x0, + .com3 = SWAP_RGB, + .com7 = FMT_RGB565 | OFMT_RGB, }, { - .format = &ov772x_fmt_lists[6], - .dsp3 = 0x0, - .com3 = 0x0, - .com7 = FMT_RGB565 | OFMT_RGB, + .code = V4L2_MBUS_FMT_RGB565_2X8_BE, + .colorspace = V4L2_COLORSPACE_SRGB, + .dsp3 = 0x0, + .com3 = 0x0, + .com7 = FMT_RGB565 | OFMT_RGB, }, }; @@ -642,15 +608,15 @@ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable) return 0; } - if (!priv->win || !priv->fmt) { + if (!priv->win || !priv->cfmt) { dev_err(&client->dev, "norm or win select error\n"); return -EPERM; } ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0); - dev_dbg(&client->dev, "format %s, win %s\n", - priv->fmt->format->name, priv->win->name); + dev_dbg(&client->dev, "format %d, win %s\n", + priv->cfmt->code, priv->win->name); return 0; } @@ -806,8 +772,8 @@ static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height) return win; } -static int ov772x_set_params(struct i2c_client *client, - u32 *width, u32 *height, u32 pixfmt) +static int ov772x_set_params(struct i2c_client *client, u32 *width, u32 *height, + enum v4l2_mbus_pixelcode code) { struct ov772x_priv *priv = to_ov772x(client); int ret = -EINVAL; @@ -817,14 +783,14 @@ static int ov772x_set_params(struct i2c_client *client, /* * select format */ - priv->fmt = NULL; + priv->cfmt = NULL; for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) { - if (pixfmt == ov772x_cfmts[i].format->fourcc) { - priv->fmt = ov772x_cfmts + i; + if (code == ov772x_cfmts[i].code) { + priv->cfmt = ov772x_cfmts + i; break; } } - if (!priv->fmt) + if (!priv->cfmt) goto ov772x_set_fmt_error; /* @@ -894,7 +860,7 @@ static int ov772x_set_params(struct i2c_client *client, /* * set DSP_CTRL3 */ - val = priv->fmt->dsp3; + val = priv->cfmt->dsp3; if (val) { ret = ov772x_mask_set(client, DSP_CTRL3, UV_MASK, val); @@ -905,7 +871,7 @@ static int ov772x_set_params(struct i2c_client *client, /* * set COM3 */ - val = priv->fmt->com3; + val = priv->cfmt->com3; if (priv->info->flags & OV772X_FLAG_VFLIP) val |= VFLIP_IMG; if (priv->info->flags & OV772X_FLAG_HFLIP) @@ -923,9 +889,9 @@ static int ov772x_set_params(struct i2c_client *client, /* * set COM7 */ - val = priv->win->com7_bit | priv->fmt->com7; + val = priv->win->com7_bit | priv->cfmt->com7; ret = ov772x_mask_set(client, - COM7, (SLCT_MASK | FMT_MASK | OFMT_MASK), + COM7, SLCT_MASK | FMT_MASK | OFMT_MASK, val); if (ret < 0) goto ov772x_set_fmt_error; @@ -951,7 +917,7 @@ ov772x_set_fmt_error: ov772x_reset(client); priv->win = NULL; - priv->fmt = NULL; + priv->cfmt = NULL; return ret; } @@ -981,54 +947,79 @@ static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } -static int ov772x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int ov772x_g_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct ov772x_priv *priv = to_ov772x(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - if (!priv->win || !priv->fmt) { + if (!priv->win || !priv->cfmt) { u32 width = VGA_WIDTH, height = VGA_HEIGHT; int ret = ov772x_set_params(client, &width, &height, - V4L2_PIX_FMT_YUYV); + V4L2_MBUS_FMT_YUYV8_2X8_LE); if (ret < 0) return ret; } - f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - - pix->width = priv->win->width; - pix->height = priv->win->height; - pix->pixelformat = priv->fmt->format->fourcc; - pix->colorspace = priv->fmt->format->colorspace; - pix->field = V4L2_FIELD_NONE; + mf->width = priv->win->width; + mf->height = priv->win->height; + mf->code = priv->cfmt->code; + mf->colorspace = priv->cfmt->colorspace; + mf->field = V4L2_FIELD_NONE; return 0; } -static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int ov772x_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; - struct v4l2_pix_format *pix = &f->fmt.pix; + struct ov772x_priv *priv = to_ov772x(client); + int ret = ov772x_set_params(client, &mf->width, &mf->height, + mf->code); + + if (!ret) + mf->colorspace = priv->cfmt->colorspace; - return ov772x_set_params(client, &pix->width, &pix->height, - pix->pixelformat); + return ret; } static int ov772x_try_fmt(struct v4l2_subdev *sd, - struct v4l2_format *f) + struct v4l2_mbus_framefmt *mf) { - struct v4l2_pix_format *pix = &f->fmt.pix; + struct i2c_client *client = sd->priv; + struct ov772x_priv *priv = to_ov772x(client); const struct ov772x_win_size *win; + int i; /* * select suitable win */ - win = ov772x_select_win(pix->width, pix->height); + win = ov772x_select_win(mf->width, mf->height); + + mf->width = win->width; + mf->height = win->height; + mf->field = V4L2_FIELD_NONE; - pix->width = win->width; - pix->height = win->height; - pix->field = V4L2_FIELD_NONE; + for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) + if (mf->code == ov772x_cfmts[i].code) + break; + + if (i == ARRAY_SIZE(ov772x_cfmts)) { + /* Unsupported format requested. Propose either */ + if (priv->cfmt) { + /* the current one or */ + mf->colorspace = priv->cfmt->colorspace; + mf->code = priv->cfmt->code; + } else { + /* the default one */ + mf->colorspace = ov772x_cfmts[0].colorspace; + mf->code = ov772x_cfmts[0].code; + } + } else { + /* Also return the colorspace */ + mf->colorspace = ov772x_cfmts[i].colorspace; + } return 0; } @@ -1057,9 +1048,6 @@ static int ov772x_video_probe(struct soc_camera_device *icd, return -ENODEV; } - icd->formats = ov772x_fmt_lists; - icd->num_formats = ARRAY_SIZE(ov772x_fmt_lists); - /* * check and show product ID and manufacturer ID */ @@ -1109,13 +1097,24 @@ static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = { #endif }; +static int ov772x_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + if ((unsigned int)index >= ARRAY_SIZE(ov772x_cfmts)) + return -EINVAL; + + *code = ov772x_cfmts[index].code; + return 0; +} + static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = { .s_stream = ov772x_s_stream, - .g_fmt = ov772x_g_fmt, - .s_fmt = ov772x_s_fmt, - .try_fmt = ov772x_try_fmt, + .g_mbus_fmt = ov772x_g_fmt, + .s_mbus_fmt = ov772x_s_fmt, + .try_mbus_fmt = ov772x_try_fmt, .cropcap = ov772x_cropcap, .g_crop = ov772x_g_crop, + .enum_mbus_fmt = ov772x_enum_fmt, }; static struct v4l2_subdev_ops ov772x_subdev_ops = { diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c index c81ae2192887..47bf60ceb7a2 100644 --- a/drivers/media/video/ov9640.c +++ b/drivers/media/video/ov9640.c @@ -154,19 +154,10 @@ static const struct ov9640_reg ov9640_regs_rgb[] = { { OV9640_MTXS, 0x65 }, }; -/* - * TODO: this sensor also supports RGB555 and RGB565 formats, but support for - * them has not yet been sufficiently tested and so it is not included with - * this version of the driver. To test and debug these formats add two entries - * to the below array, see ov722x.c for an example. - */ -static const struct soc_camera_data_format ov9640_fmt_lists[] = { - { - .name = "UYVY", - .fourcc = V4L2_PIX_FMT_UYVY, - .depth = 16, - .colorspace = V4L2_COLORSPACE_JPEG, - }, +static enum v4l2_mbus_pixelcode ov9640_codes[] = { + V4L2_MBUS_FMT_YUYV8_2X8_BE, + V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, + V4L2_MBUS_FMT_RGB565_2X8_LE, }; static const struct v4l2_queryctrl ov9640_controls[] = { @@ -434,20 +425,22 @@ static void ov9640_res_roundup(u32 *width, u32 *height) } /* Prepare necessary register changes depending on color encoding */ -static void ov9640_alter_regs(u32 pixfmt, struct ov9640_reg_alt *alt) +static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code, + struct ov9640_reg_alt *alt) { - switch (pixfmt) { - case V4L2_PIX_FMT_UYVY: + switch (code) { + default: + case V4L2_MBUS_FMT_YUYV8_2X8_BE: alt->com12 = OV9640_COM12_YUV_AVG; alt->com13 = OV9640_COM13_Y_DELAY_EN | OV9640_COM13_YUV_DLY(0x01); break; - case V4L2_PIX_FMT_RGB555: + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: alt->com7 = OV9640_COM7_RGB; alt->com13 = OV9640_COM13_RGB_AVG; alt->com15 = OV9640_COM15_RGB_555; break; - case V4L2_PIX_FMT_RGB565: + case V4L2_MBUS_FMT_RGB565_2X8_LE: alt->com7 = OV9640_COM7_RGB; alt->com13 = OV9640_COM13_RGB_AVG; alt->com15 = OV9640_COM15_RGB_565; @@ -456,8 +449,8 @@ static void ov9640_alter_regs(u32 pixfmt, struct ov9640_reg_alt *alt) } /* Setup registers according to resolution and color encoding */ -static int ov9640_write_regs(struct i2c_client *client, - u32 width, u32 pixfmt, struct ov9640_reg_alt *alts) +static int ov9640_write_regs(struct i2c_client *client, u32 width, + enum v4l2_mbus_pixelcode code, struct ov9640_reg_alt *alts) { const struct ov9640_reg *ov9640_regs, *matrix_regs; int ov9640_regs_len, matrix_regs_len; @@ -500,7 +493,7 @@ static int ov9640_write_regs(struct i2c_client *client, } /* select color matrix configuration for given color encoding */ - if (pixfmt == V4L2_PIX_FMT_UYVY) { + if (code == V4L2_MBUS_FMT_YUYV8_2X8_BE) { matrix_regs = ov9640_regs_yuv; matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv); } else { @@ -562,15 +555,17 @@ static int ov9640_prog_dflt(struct i2c_client *client) } /* set the format we will capture in */ -static int ov9640_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int ov9640_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; - struct v4l2_pix_format *pix = &f->fmt.pix; struct ov9640_reg_alt alts = {0}; + enum v4l2_colorspace cspace; + enum v4l2_mbus_pixelcode code = mf->code; int ret; - ov9640_res_roundup(&pix->width, &pix->height); - ov9640_alter_regs(pix->pixelformat, &alts); + ov9640_res_roundup(&mf->width, &mf->height); + ov9640_alter_regs(mf->code, &alts); ov9640_reset(client); @@ -578,19 +573,57 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) if (ret) return ret; - return ov9640_write_regs(client, pix->width, pix->pixelformat, &alts); + switch (code) { + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: + case V4L2_MBUS_FMT_RGB565_2X8_LE: + cspace = V4L2_COLORSPACE_SRGB; + break; + default: + code = V4L2_MBUS_FMT_YUYV8_2X8_BE; + case V4L2_MBUS_FMT_YUYV8_2X8_BE: + cspace = V4L2_COLORSPACE_JPEG; + } + + ret = ov9640_write_regs(client, mf->width, code, &alts); + if (!ret) { + mf->code = code; + mf->colorspace = cspace; + } + + return ret; } -static int ov9640_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int ov9640_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { - struct v4l2_pix_format *pix = &f->fmt.pix; + ov9640_res_roundup(&mf->width, &mf->height); - ov9640_res_roundup(&pix->width, &pix->height); - pix->field = V4L2_FIELD_NONE; + mf->field = V4L2_FIELD_NONE; + + switch (mf->code) { + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: + case V4L2_MBUS_FMT_RGB565_2X8_LE: + mf->colorspace = V4L2_COLORSPACE_SRGB; + break; + default: + mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE; + case V4L2_MBUS_FMT_YUYV8_2X8_BE: + mf->colorspace = V4L2_COLORSPACE_JPEG; + } return 0; } +static int ov9640_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + if ((unsigned int)index >= ARRAY_SIZE(ov9640_codes)) + return -EINVAL; + + *code = ov9640_codes[index]; + return 0; +} + static int ov9640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { a->c.left = 0; @@ -637,9 +670,6 @@ static int ov9640_video_probe(struct soc_camera_device *icd, goto err; } - icd->formats = ov9640_fmt_lists; - icd->num_formats = ARRAY_SIZE(ov9640_fmt_lists); - /* * check and show product ID and manufacturer ID */ @@ -702,11 +732,12 @@ static struct v4l2_subdev_core_ops ov9640_core_ops = { }; static struct v4l2_subdev_video_ops ov9640_video_ops = { - .s_stream = ov9640_s_stream, - .s_fmt = ov9640_s_fmt, - .try_fmt = ov9640_try_fmt, - .cropcap = ov9640_cropcap, - .g_crop = ov9640_g_crop, + .s_stream = ov9640_s_stream, + .s_mbus_fmt = ov9640_s_fmt, + .try_mbus_fmt = ov9640_try_fmt, + .enum_mbus_fmt = ov9640_enum_fmt, + .cropcap = ov9640_cropcap, + .g_crop = ov9640_g_crop, }; diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index f063f5981f43..294f860ce2b0 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -32,6 +32,7 @@ #include #include #include +#include #include @@ -183,16 +184,12 @@ struct pxa_cam_dma { /* buffer for one video frame */ struct pxa_buffer { /* common v4l buffer stuff -- must be first */ - struct videobuf_buffer vb; - - const struct soc_camera_data_format *fmt; - + struct videobuf_buffer vb; + enum v4l2_mbus_pixelcode code; /* our descriptor lists for Y, U and V channels */ - struct pxa_cam_dma dmas[3]; - - int inwork; - - enum pxa_camera_active_dma active_dma; + struct pxa_cam_dma dmas[3]; + int inwork; + enum pxa_camera_active_dma active_dma; }; struct pxa_camera_dev { @@ -243,11 +240,15 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct soc_camera_device *icd = vq->priv_data; + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); + + if (bytes_per_line < 0) + return bytes_per_line; dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size); - *size = roundup(icd->user_width * icd->user_height * - ((icd->current_fmt->depth + 7) >> 3), 8); + *size = bytes_per_line * icd->user_height; if (0 == *count) *count = 32; @@ -433,6 +434,11 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); int ret; int size_y, size_u = 0, size_v = 0; + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); + + if (bytes_per_line < 0) + return bytes_per_line; dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); @@ -456,18 +462,18 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq, */ buf->inwork = 1; - if (buf->fmt != icd->current_fmt || + if (buf->code != icd->current_fmt->code || vb->width != icd->user_width || vb->height != icd->user_height || vb->field != field) { - buf->fmt = icd->current_fmt; + buf->code = icd->current_fmt->code; vb->width = icd->user_width; vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } - vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3); + vb->size = bytes_per_line * vb->height; if (0 != vb->baddr && vb->bsize < vb->size) { ret = -EINVAL; goto out; @@ -1157,9 +1163,15 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; unsigned long bus_flags, camera_flags, common_flags; - int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags); + const struct soc_mbus_pixelfmt *fmt; + int ret; struct pxa_cam *cam = icd->host_priv; + fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code); + if (!fmt) + return -EINVAL; + + ret = test_platform_param(pcdev, fmt->bits_per_sample, &bus_flags); if (ret < 0) return ret; @@ -1223,59 +1235,49 @@ static int pxa_camera_try_bus_param(struct soc_camera_device *icd, return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL; } -static const struct soc_camera_data_format pxa_camera_formats[] = { +static const struct soc_mbus_pixelfmt pxa_camera_formats[] = { { - .name = "Planar YUV422 16 bit", - .depth = 16, - .fourcc = V4L2_PIX_FMT_YUV422P, - .colorspace = V4L2_COLORSPACE_JPEG, + .fourcc = V4L2_PIX_FMT_YUV422P, + .name = "Planar YUV422 16 bit", + .bits_per_sample = 8, + .packing = SOC_MBUS_PACKING_2X8_PADHI, + .order = SOC_MBUS_ORDER_LE, }, }; -static bool buswidth_supported(struct soc_camera_device *icd, int depth) +/* This will be corrected as we get more formats */ +static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - struct pxa_camera_dev *pcdev = ici->priv; - - switch (depth) { - case 8: - return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8); - case 9: - return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9); - case 10: - return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10); - } - return false; -} - -static int required_buswidth(const struct soc_camera_data_format *fmt) -{ - switch (fmt->fourcc) { - case V4L2_PIX_FMT_UYVY: - case V4L2_PIX_FMT_VYUY: - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_YVYU: - case V4L2_PIX_FMT_RGB565: - case V4L2_PIX_FMT_RGB555: - return 8; - default: - return fmt->depth; - } + return fmt->packing == SOC_MBUS_PACKING_NONE || + (fmt->bits_per_sample == 8 && + fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || + (fmt->bits_per_sample > 8 && + fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, struct soc_camera_format_xlate *xlate) { + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; - int formats = 0, buswidth, ret; + int formats = 0, ret; struct pxa_cam *cam; + enum v4l2_mbus_pixelcode code; + const struct soc_mbus_pixelfmt *fmt; - buswidth = required_buswidth(icd->formats + idx); + ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); + if (ret < 0) + /* No more formats */ + return 0; - if (!buswidth_supported(icd, buswidth)) + fmt = soc_mbus_get_fmtdesc(code); + if (!fmt) { + dev_err(dev, "Invalid format code #%d: %d\n", idx, code); return 0; + } - ret = pxa_camera_try_bus_param(icd, buswidth); + /* This also checks support for the requested bits-per-sample */ + ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; @@ -1289,45 +1291,40 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, cam = icd->host_priv; } - switch (icd->formats[idx].fourcc) { - case V4L2_PIX_FMT_UYVY: + switch (code) { + case V4L2_MBUS_FMT_YUYV8_2X8_BE: formats++; if (xlate) { - xlate->host_fmt = &pxa_camera_formats[0]; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = buswidth; + xlate->host_fmt = &pxa_camera_formats[0]; + xlate->code = code; xlate++; - dev_dbg(dev, "Providing format %s using %s\n", - pxa_camera_formats[0].name, - icd->formats[idx].name); + dev_dbg(dev, "Providing format %s using code %d\n", + pxa_camera_formats[0].name, code); } - case V4L2_PIX_FMT_VYUY: - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_YVYU: - case V4L2_PIX_FMT_RGB565: - case V4L2_PIX_FMT_RGB555: - formats++; - if (xlate) { - xlate->host_fmt = icd->formats + idx; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = buswidth; - xlate++; + case V4L2_MBUS_FMT_YVYU8_2X8_BE: + case V4L2_MBUS_FMT_YUYV8_2X8_LE: + case V4L2_MBUS_FMT_YVYU8_2X8_LE: + case V4L2_MBUS_FMT_RGB565_2X8_LE: + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: + if (xlate) dev_dbg(dev, "Providing format %s packed\n", - icd->formats[idx].name); - } + fmt->name); break; default: - /* Generic pass-through */ - formats++; - if (xlate) { - xlate->host_fmt = icd->formats + idx; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = icd->formats[idx].depth; - xlate++; + if (!pxa_camera_packing_supported(fmt)) + return 0; + if (xlate) dev_dbg(dev, "Providing format %s in pass-through mode\n", - icd->formats[idx].name); - } + fmt->name); + } + + /* Generic pass-through */ + formats++; + if (xlate) { + xlate->host_fmt = fmt; + xlate->code = code; + xlate++; } return formats; @@ -1339,11 +1336,11 @@ static void pxa_camera_put_formats(struct soc_camera_device *icd) icd->host_priv = NULL; } -static int pxa_camera_check_frame(struct v4l2_pix_format *pix) +static int pxa_camera_check_frame(u32 width, u32 height) { /* limit to pxa hardware capabilities */ - return pix->height < 32 || pix->height > 2048 || pix->width < 48 || - pix->width > 2048 || (pix->width & 0x01); + return height < 32 || height > 2048 || width < 48 || width > 2048 || + (width & 0x01); } static int pxa_camera_set_crop(struct soc_camera_device *icd, @@ -1358,9 +1355,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, }; - struct v4l2_format f; - struct v4l2_pix_format *pix = &f.fmt.pix, pix_tmp; + struct v4l2_mbus_framefmt mf; struct pxa_cam *cam = icd->host_priv; + u32 fourcc = icd->current_fmt->host_fmt->fourcc; int ret; /* If PCLK is used to latch data from the sensor, check sense */ @@ -1377,27 +1374,23 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, return ret; } - f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - - ret = v4l2_subdev_call(sd, video, g_fmt, &f); + ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; - pix_tmp = *pix; - if (pxa_camera_check_frame(pix)) { + if (pxa_camera_check_frame(mf.width, mf.height)) { /* * Camera cropping produced a frame beyond our capabilities. * FIXME: just extract a subframe, that we can process. */ - v4l_bound_align_image(&pix->width, 48, 2048, 1, - &pix->height, 32, 2048, 0, - icd->current_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? - 4 : 0); - ret = v4l2_subdev_call(sd, video, s_fmt, &f); + v4l_bound_align_image(&mf.width, 48, 2048, 1, + &mf.height, 32, 2048, 0, + fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0); + ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; - if (pxa_camera_check_frame(pix)) { + if (pxa_camera_check_frame(mf.width, mf.height)) { dev_warn(icd->dev.parent, "Inconsistent state. Use S_FMT to repair\n"); return -EINVAL; @@ -1414,10 +1407,10 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd, recalculate_fifo_timeout(pcdev, sense.pixel_clock); } - icd->user_width = pix->width; - icd->user_height = pix->height; + icd->user_width = mf.width; + icd->user_height = mf.height; - pxa_camera_setup_cicr(icd, cam->flags, icd->current_fmt->fourcc); + pxa_camera_setup_cicr(icd, cam->flags, fourcc); return ret; } @@ -1429,14 +1422,13 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, struct pxa_camera_dev *pcdev = ici->priv; struct device *dev = icd->dev.parent; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - const struct soc_camera_data_format *cam_fmt = NULL; const struct soc_camera_format_xlate *xlate = NULL; struct soc_camera_sense sense = { .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, }; struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_format cam_f = *f; + struct v4l2_mbus_framefmt mf; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); @@ -1445,26 +1437,31 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, return -EINVAL; } - cam_fmt = xlate->cam_fmt; - /* If PCLK is used to latch data from the sensor, check sense */ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) + /* The caller holds a mutex. */ icd->sense = &sense; - cam_f.fmt.pix.pixelformat = cam_fmt->fourcc; - ret = v4l2_subdev_call(sd, video, s_fmt, &cam_f); - cam_f.fmt.pix.pixelformat = pix->pixelformat; - *pix = cam_f.fmt.pix; + mf.width = pix->width; + mf.height = pix->height; + mf.field = pix->field; + mf.colorspace = pix->colorspace; + mf.code = xlate->code; + + ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); + + if (mf.code != xlate->code) + return -EINVAL; icd->sense = NULL; if (ret < 0) { dev_warn(dev, "Failed to configure for format %x\n", pix->pixelformat); - } else if (pxa_camera_check_frame(pix)) { + } else if (pxa_camera_check_frame(mf.width, mf.height)) { dev_warn(dev, "Camera driver produced an unsupported frame %dx%d\n", - pix->width, pix->height); + mf.width, mf.height); ret = -EINVAL; } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { @@ -1476,10 +1473,14 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, recalculate_fifo_timeout(pcdev, sense.pixel_clock); } - if (!ret) { - icd->buswidth = xlate->buswidth; - icd->current_fmt = xlate->host_fmt; - } + if (ret < 0) + return ret; + + pix->width = mf.width; + pix->height = mf.height; + pix->field = mf.field; + pix->colorspace = mf.colorspace; + icd->current_fmt = xlate; return ret; } @@ -1487,17 +1488,16 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd, static int pxa_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; + struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; - enum v4l2_field field; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { - dev_warn(ici->v4l2_dev.dev, "Format %x not found\n", pixfmt); + dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); return -EINVAL; } @@ -1511,22 +1511,36 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd, &pix->height, 32, 2048, 0, pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0); - pix->bytesperline = pix->width * - DIV_ROUND_UP(xlate->host_fmt->depth, 8); + pix->bytesperline = soc_mbus_bytes_per_line(pix->width, + xlate->host_fmt); + if (pix->bytesperline < 0) + return pix->bytesperline; pix->sizeimage = pix->height * pix->bytesperline; - /* camera has to see its format, but the user the original one */ - pix->pixelformat = xlate->cam_fmt->fourcc; /* limit to sensor capabilities */ - ret = v4l2_subdev_call(sd, video, try_fmt, f); - pix->pixelformat = pixfmt; + mf.width = pix->width; + mf.height = pix->height; + mf.field = pix->field; + mf.colorspace = pix->colorspace; + mf.code = xlate->code; - field = pix->field; + ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); + if (ret < 0) + return ret; - if (field == V4L2_FIELD_ANY) { - pix->field = V4L2_FIELD_NONE; - } else if (field != V4L2_FIELD_NONE) { - dev_err(icd->dev.parent, "Field type %d unsupported.\n", field); + pix->width = mf.width; + pix->height = mf.height; + pix->colorspace = mf.colorspace; + + switch (mf.field) { + case V4L2_FIELD_ANY: + case V4L2_FIELD_NONE: + pix->field = V4L2_FIELD_NONE; + break; + default: + /* TODO: support interlaced at least in pass-through mode */ + dev_err(icd->dev.parent, "Field type %d unsupported.\n", + mf.field); return -EINVAL; } diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c index 373f2a30a677..7b08bff443f5 100644 --- a/drivers/media/video/rj54n1cb0c.c +++ b/drivers/media/video/rj54n1cb0c.c @@ -16,6 +16,7 @@ #include #include #include +#include #define RJ54N1_DEV_CODE 0x0400 #define RJ54N1_DEV_CODE2 0x0401 @@ -85,18 +86,35 @@ /* I2C addresses: 0x50, 0x51, 0x60, 0x61 */ -static const struct soc_camera_data_format rj54n1_colour_formats[] = { - { - .name = "YUYV", - .depth = 16, - .fourcc = V4L2_PIX_FMT_YUYV, - .colorspace = V4L2_COLORSPACE_JPEG, - }, { - .name = "RGB565", - .depth = 16, - .fourcc = V4L2_PIX_FMT_RGB565, - .colorspace = V4L2_COLORSPACE_SRGB, - } +/* RJ54N1CB0C has only one fixed colorspace per pixelcode */ +struct rj54n1_datafmt { + enum v4l2_mbus_pixelcode code; + enum v4l2_colorspace colorspace; +}; + +/* Find a data format by a pixel code in an array */ +static const struct rj54n1_datafmt *rj54n1_find_datafmt( + enum v4l2_mbus_pixelcode code, const struct rj54n1_datafmt *fmt, + int n) +{ + int i; + for (i = 0; i < n; i++) + if (fmt[i].code == code) + return fmt + i; + + return NULL; +} + +static const struct rj54n1_datafmt rj54n1_colour_fmts[] = { + {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB}, + {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, }; struct rj54n1_clock_div { @@ -109,12 +127,12 @@ struct rj54n1_clock_div { struct rj54n1 { struct v4l2_subdev subdev; + const struct rj54n1_datafmt *fmt; struct v4l2_rect rect; /* Sensor window */ unsigned short width; /* Output window */ unsigned short height; unsigned short resize; /* Sensor * 1024 / resize = Output */ struct rj54n1_clock_div clk_div; - u32 fourcc; unsigned short scale; u8 bank; }; @@ -440,6 +458,16 @@ static int reg_write_multiple(struct i2c_client *client, return 0; } +static int rj54n1_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + if ((unsigned int)index >= ARRAY_SIZE(rj54n1_colour_fmts)) + return -EINVAL; + + *code = rj54n1_colour_fmts[index].code; + return 0; +} + static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable) { /* TODO: start / stop streaming */ @@ -527,16 +555,17 @@ static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } -static int rj54n1_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int rj54n1_g_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct rj54n1 *rj54n1 = to_rj54n1(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - pix->pixelformat = rj54n1->fourcc; - pix->field = V4L2_FIELD_NONE; - pix->width = rj54n1->width; - pix->height = rj54n1->height; + mf->code = rj54n1->fmt->code; + mf->colorspace = rj54n1->fmt->colorspace; + mf->field = V4L2_FIELD_NONE; + mf->width = rj54n1->width; + mf->height = rj54n1->height; return 0; } @@ -787,26 +816,44 @@ static int rj54n1_reg_init(struct i2c_client *client) } /* FIXME: streaming output only up to 800x600 is functional */ -static int rj54n1_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int rj54n1_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { - struct v4l2_pix_format *pix = &f->fmt.pix; + struct i2c_client *client = sd->priv; + struct rj54n1 *rj54n1 = to_rj54n1(client); + const struct rj54n1_datafmt *fmt; + int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 || + mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE || + mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE || + mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE || + mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE; + + dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n", + __func__, mf->code, mf->width, mf->height); + + fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, + ARRAY_SIZE(rj54n1_colour_fmts)); + if (!fmt) { + fmt = rj54n1->fmt; + mf->code = fmt->code; + } - pix->field = V4L2_FIELD_NONE; + mf->field = V4L2_FIELD_NONE; + mf->colorspace = fmt->colorspace; - if (pix->width > 800) - pix->width = 800; - if (pix->height > 600) - pix->height = 600; + v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align, + &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0); return 0; } -static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int rj54n1_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct rj54n1 *rj54n1 = to_rj54n1(client); - struct v4l2_pix_format *pix = &f->fmt.pix; - unsigned int output_w, output_h, + const struct rj54n1_datafmt *fmt; + unsigned int output_w, output_h, max_w, max_h, input_w = rj54n1->rect.width, input_h = rj54n1->rect.height; int ret; @@ -814,7 +861,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) * The host driver can call us without .try_fmt(), so, we have to take * care ourseleves */ - ret = rj54n1_try_fmt(sd, f); + ret = rj54n1_try_fmt(sd, mf); /* * Verify if the sensor has just been powered on. TODO: replace this @@ -832,49 +879,101 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) } /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */ - switch (pix->pixelformat) { - case V4L2_PIX_FMT_YUYV: + switch (mf->code) { + case V4L2_MBUS_FMT_YUYV8_2X8_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 0); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); break; - case V4L2_PIX_FMT_RGB565: + case V4L2_MBUS_FMT_YVYU8_2X8_LE: + ret = reg_write(client, RJ54N1_OUT_SEL, 0); + if (!ret) + ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); + break; + case V4L2_MBUS_FMT_RGB565_2X8_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); break; + case V4L2_MBUS_FMT_RGB565_2X8_BE: + ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); + if (!ret) + ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); + break; + case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE: + ret = reg_write(client, RJ54N1_OUT_SEL, 4); + if (!ret) + ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); + if (!ret) + ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); + break; + case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: + ret = reg_write(client, RJ54N1_OUT_SEL, 4); + if (!ret) + ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); + if (!ret) + ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); + break; + case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE: + ret = reg_write(client, RJ54N1_OUT_SEL, 4); + if (!ret) + ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); + if (!ret) + ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); + break; + case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE: + ret = reg_write(client, RJ54N1_OUT_SEL, 4); + if (!ret) + ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); + if (!ret) + ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); + break; + case V4L2_MBUS_FMT_SBGGR10_1X10: + ret = reg_write(client, RJ54N1_OUT_SEL, 5); + break; default: ret = -EINVAL; } + /* Special case: a raw mode with 10 bits of data per clock tick */ + if (!ret) + ret = reg_set(client, RJ54N1_OCLK_SEL_EN, + (mf->code == V4L2_MBUS_FMT_SBGGR10_1X10) << 1, 2); + if (ret < 0) return ret; - /* Supported scales 1:1 - 1:16 */ - if (pix->width < input_w / 16) - pix->width = input_w / 16; - if (pix->height < input_h / 16) - pix->height = input_h / 16; + /* Supported scales 1:1 >= scale > 1:16 */ + max_w = mf->width * (16 * 1024 - 1) / 1024; + if (input_w > max_w) + input_w = max_w; + max_h = mf->height * (16 * 1024 - 1) / 1024; + if (input_h > max_h) + input_h = max_h; - output_w = pix->width; - output_h = pix->height; + output_w = mf->width; + output_h = mf->height; ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); if (ret < 0) return ret; - rj54n1->fourcc = pix->pixelformat; + fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, + ARRAY_SIZE(rj54n1_colour_fmts)); + + rj54n1->fmt = fmt; rj54n1->resize = ret; rj54n1->rect.width = input_w; rj54n1->rect.height = input_h; rj54n1->width = output_w; rj54n1->height = output_h; - pix->width = output_w; - pix->height = output_h; - pix->field = V4L2_FIELD_NONE; + mf->width = output_w; + mf->height = output_h; + mf->field = V4L2_FIELD_NONE; + mf->colorspace = fmt->colorspace; - return ret; + return 0; } static int rj54n1_g_chip_ident(struct v4l2_subdev *sd, @@ -1054,9 +1153,10 @@ static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = { static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = { .s_stream = rj54n1_s_stream, - .s_fmt = rj54n1_s_fmt, - .g_fmt = rj54n1_g_fmt, - .try_fmt = rj54n1_try_fmt, + .s_mbus_fmt = rj54n1_s_fmt, + .g_mbus_fmt = rj54n1_g_fmt, + .try_mbus_fmt = rj54n1_try_fmt, + .enum_mbus_fmt = rj54n1_enum_fmt, .g_crop = rj54n1_g_crop, .cropcap = rj54n1_cropcap, }; @@ -1153,7 +1253,7 @@ static int rj54n1_probe(struct i2c_client *client, rj54n1->rect.height = RJ54N1_MAX_HEIGHT; rj54n1->width = RJ54N1_MAX_WIDTH; rj54n1->height = RJ54N1_MAX_HEIGHT; - rj54n1->fourcc = V4L2_PIX_FMT_YUYV; + rj54n1->fmt = &rj54n1_colour_fmts[0]; rj54n1->resize = 1024; ret = rj54n1_video_probe(icd, client); @@ -1164,9 +1264,6 @@ static int rj54n1_probe(struct i2c_client *client, return ret; } - icd->formats = rj54n1_colour_formats; - icd->num_formats = ARRAY_SIZE(rj54n1_colour_formats); - return ret; } diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 2a38e1d90613..273ad34f9d8d 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -38,6 +38,8 @@ #include #include #include +#include +#include /* register offsets for sh7722 / sh7723 */ @@ -85,7 +87,7 @@ /* per video frame buffer */ struct sh_mobile_ceu_buffer { struct videobuf_buffer vb; /* v4l buffer must be first */ - const struct soc_camera_data_format *fmt; + enum v4l2_mbus_pixelcode code; }; struct sh_mobile_ceu_dev { @@ -114,8 +116,8 @@ struct sh_mobile_ceu_cam { struct v4l2_rect ceu_rect; unsigned int cam_width; unsigned int cam_height; - const struct soc_camera_data_format *extra_fmt; - const struct soc_camera_data_format *camera_fmt; + const struct soc_mbus_pixelfmt *extra_fmt; + enum v4l2_mbus_pixelcode code; }; static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev) @@ -197,10 +199,13 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq, struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; - int bytes_per_pixel = (icd->current_fmt->depth + 7) >> 3; + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); - *size = PAGE_ALIGN(icd->user_width * icd->user_height * - bytes_per_pixel); + if (bytes_per_line < 0) + return bytes_per_line; + + *size = PAGE_ALIGN(bytes_per_line * icd->user_height); if (0 == *count) *count = 2; @@ -284,7 +289,7 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) ceu_write(pcdev, CDBYR, phys_addr_bottom); } - switch (icd->current_fmt->fourcc) { + switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: @@ -311,8 +316,13 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq, { struct soc_camera_device *icd = vq->priv_data; struct sh_mobile_ceu_buffer *buf; + int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, + icd->current_fmt->host_fmt); int ret; + if (bytes_per_line < 0) + return bytes_per_line; + buf = container_of(vb, struct sh_mobile_ceu_buffer, vb); dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %zd\n", __func__, @@ -331,18 +341,18 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq, BUG_ON(NULL == icd->current_fmt); - if (buf->fmt != icd->current_fmt || + if (buf->code != icd->current_fmt->code || vb->width != icd->user_width || vb->height != icd->user_height || vb->field != field) { - buf->fmt = icd->current_fmt; + buf->code = icd->current_fmt->code; vb->width = icd->user_width; vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } - vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3); + vb->size = vb->height * bytes_per_line; if (0 != vb->baddr && vb->bsize < vb->size) { ret = -EINVAL; goto out; @@ -564,19 +574,30 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd, in_width *= 2; left_offset *= 2; } - width = cdwdr_width = out_width; + width = out_width; + cdwdr_width = out_width; } else { - unsigned int w_factor = (icd->current_fmt->depth + 7) >> 3; + int bytes_per_line = soc_mbus_bytes_per_line(out_width, + icd->current_fmt->host_fmt); + unsigned int w_factor; - width = out_width * w_factor / 2; + width = out_width; - if (!pcdev->is_16bit) - w_factor *= 2; + switch (icd->current_fmt->host_fmt->packing) { + case SOC_MBUS_PACKING_2X8_PADHI: + w_factor = 2; + break; + default: + w_factor = 1; + } - in_width = rect->width * w_factor / 2; - left_offset = left_offset * w_factor / 2; + in_width = rect->width * w_factor; + left_offset = left_offset * w_factor; - cdwdr_width = width * 2; + if (bytes_per_line < 0) + cdwdr_width = out_width; + else + cdwdr_width = bytes_per_line; } height = out_height; @@ -673,24 +694,24 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, value = 0x00000010; /* data fetch by default */ yuv_lineskip = 0; - switch (icd->current_fmt->fourcc) { + switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */ /* fall-through */ case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: - switch (cam->camera_fmt->fourcc) { - case V4L2_PIX_FMT_UYVY: + switch (cam->code) { + case V4L2_MBUS_FMT_YUYV8_2X8_BE: value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ break; - case V4L2_PIX_FMT_VYUY: + case V4L2_MBUS_FMT_YVYU8_2X8_BE: value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ break; - case V4L2_PIX_FMT_YUYV: + case V4L2_MBUS_FMT_YUYV8_2X8_LE: value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ break; - case V4L2_PIX_FMT_YVYU: + case V4L2_MBUS_FMT_YVYU8_2X8_LE: value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ break; default: @@ -698,8 +719,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, } } - if (icd->current_fmt->fourcc == V4L2_PIX_FMT_NV21 || - icd->current_fmt->fourcc == V4L2_PIX_FMT_NV61) + if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || + icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; @@ -746,7 +767,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, return 0; } -static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd) +static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, + unsigned char buswidth) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; @@ -755,48 +777,75 @@ static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd) camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, make_bus_param(pcdev)); - if (!common_flags) + if (!common_flags || buswidth > 16 || + (buswidth > 8 && !(common_flags & SOCAM_DATAWIDTH_16))) return -EINVAL; return 0; } -static const struct soc_camera_data_format sh_mobile_ceu_formats[] = { - { - .name = "NV12", - .depth = 12, - .fourcc = V4L2_PIX_FMT_NV12, - .colorspace = V4L2_COLORSPACE_JPEG, - }, - { - .name = "NV21", - .depth = 12, - .fourcc = V4L2_PIX_FMT_NV21, - .colorspace = V4L2_COLORSPACE_JPEG, - }, - { - .name = "NV16", - .depth = 16, - .fourcc = V4L2_PIX_FMT_NV16, - .colorspace = V4L2_COLORSPACE_JPEG, - }, +static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { { - .name = "NV61", - .depth = 16, - .fourcc = V4L2_PIX_FMT_NV61, - .colorspace = V4L2_COLORSPACE_JPEG, + .fourcc = V4L2_PIX_FMT_NV12, + .name = "NV12", + .bits_per_sample = 12, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, + }, { + .fourcc = V4L2_PIX_FMT_NV21, + .name = "NV21", + .bits_per_sample = 12, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, + }, { + .fourcc = V4L2_PIX_FMT_NV16, + .name = "NV16", + .bits_per_sample = 16, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, + }, { + .fourcc = V4L2_PIX_FMT_NV61, + .name = "NV61", + .bits_per_sample = 16, + .packing = SOC_MBUS_PACKING_NONE, + .order = SOC_MBUS_ORDER_LE, }, }; +/* This will be corrected as we get more formats */ +static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) +{ + return fmt->packing == SOC_MBUS_PACKING_NONE || + (fmt->bits_per_sample == 8 && + fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || + (fmt->bits_per_sample > 8 && + fmt->packing == SOC_MBUS_PACKING_EXTEND16); +} + static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, struct soc_camera_format_xlate *xlate) { + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; + enum v4l2_mbus_pixelcode code; + const struct soc_mbus_pixelfmt *fmt; - ret = sh_mobile_ceu_try_bus_param(icd); + ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); + if (ret < 0) + /* No more formats */ + return 0; + + fmt = soc_mbus_get_fmtdesc(code); + if (!fmt) { + dev_err(icd->dev.parent, + "Invalid format code #%d: %d\n", idx, code); + return -EINVAL; + } + + ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; @@ -814,13 +863,13 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, if (!idx) cam->extra_fmt = NULL; - switch (icd->formats[idx].fourcc) { - case V4L2_PIX_FMT_UYVY: - case V4L2_PIX_FMT_VYUY: - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_YVYU: + switch (code) { + case V4L2_MBUS_FMT_YUYV8_2X8_BE: + case V4L2_MBUS_FMT_YVYU8_2X8_BE: + case V4L2_MBUS_FMT_YUYV8_2X8_LE: + case V4L2_MBUS_FMT_YVYU8_2X8_LE: if (cam->extra_fmt) - goto add_single_format; + break; /* * Our case is simple so far: for any of the above four camera @@ -831,32 +880,31 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, * the host_priv pointer and check whether the format you're * going to add now is already there. */ - cam->extra_fmt = (void *)sh_mobile_ceu_formats; + cam->extra_fmt = sh_mobile_ceu_formats; n = ARRAY_SIZE(sh_mobile_ceu_formats); formats += n; for (k = 0; xlate && k < n; k++) { - xlate->host_fmt = &sh_mobile_ceu_formats[k]; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = icd->formats[idx].depth; + xlate->host_fmt = &sh_mobile_ceu_formats[k]; + xlate->code = code; xlate++; - dev_dbg(dev, "Providing format %s using %s\n", - sh_mobile_ceu_formats[k].name, - icd->formats[idx].name); + dev_dbg(dev, "Providing format %s using code %d\n", + sh_mobile_ceu_formats[k].name, code); } + break; default: -add_single_format: - /* Generic pass-through */ - formats++; - if (xlate) { - xlate->host_fmt = icd->formats + idx; - xlate->cam_fmt = icd->formats + idx; - xlate->buswidth = icd->formats[idx].depth; - xlate++; - dev_dbg(dev, - "Providing format %s in pass-through mode\n", - icd->formats[idx].name); - } + if (!sh_mobile_ceu_packing_supported(fmt)) + return 0; + } + + /* Generic pass-through */ + formats++; + if (xlate) { + xlate->host_fmt = fmt; + xlate->code = code; + xlate++; + dev_dbg(dev, "Providing format %s in pass-through mode\n", + xlate->host_fmt->name); } return formats; @@ -1036,17 +1084,15 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop, static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect, unsigned int *scale_h, unsigned int *scale_v) { - struct v4l2_format f; + struct v4l2_mbus_framefmt mf; int ret; - f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - - ret = v4l2_subdev_call(sd, video, g_fmt, &f); + ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; - *scale_h = calc_generic_scale(rect->width, f.fmt.pix.width); - *scale_v = calc_generic_scale(rect->height, f.fmt.pix.height); + *scale_h = calc_generic_scale(rect->width, mf.width); + *scale_v = calc_generic_scale(rect->height, mf.height); return 0; } @@ -1061,32 +1107,29 @@ static int get_camera_subwin(struct soc_camera_device *icd, if (!ceu_rect->width) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; - struct v4l2_format f; - struct v4l2_pix_format *pix = &f.fmt.pix; + struct v4l2_mbus_framefmt mf; int ret; /* First time */ - f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - - ret = v4l2_subdev_call(sd, video, g_fmt, &f); + ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; - dev_geo(dev, "camera fmt %ux%u\n", pix->width, pix->height); + dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height); - if (pix->width > 2560) { + if (mf.width > 2560) { ceu_rect->width = 2560; - ceu_rect->left = (pix->width - 2560) / 2; + ceu_rect->left = (mf.width - 2560) / 2; } else { - ceu_rect->width = pix->width; + ceu_rect->width = mf.width; ceu_rect->left = 0; } - if (pix->height > 1920) { + if (mf.height > 1920) { ceu_rect->height = 1920; - ceu_rect->top = (pix->height - 1920) / 2; + ceu_rect->top = (mf.height - 1920) / 2; } else { - ceu_rect->height = pix->height; + ceu_rect->height = mf.height; ceu_rect->top = 0; } @@ -1103,13 +1146,12 @@ static int get_camera_subwin(struct soc_camera_device *icd, return 0; } -static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f, - bool ceu_can_scale) +static int client_s_fmt(struct soc_camera_device *icd, + struct v4l2_mbus_framefmt *mf, bool ceu_can_scale) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; - struct v4l2_pix_format *pix = &f->fmt.pix; - unsigned int width = pix->width, height = pix->height, tmp_w, tmp_h; + unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; unsigned int max_width, max_height; struct v4l2_cropcap cap; int ret; @@ -1123,29 +1165,29 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f, max_width = min(cap.bounds.width, 2560); max_height = min(cap.bounds.height, 1920); - ret = v4l2_subdev_call(sd, video, s_fmt, f); + ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf); if (ret < 0) return ret; - dev_geo(dev, "camera scaled to %ux%u\n", pix->width, pix->height); + dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); - if ((width == pix->width && height == pix->height) || !ceu_can_scale) + if ((width == mf->width && height == mf->height) || !ceu_can_scale) return 0; /* Camera set a format, but geometry is not precise, try to improve */ - tmp_w = pix->width; - tmp_h = pix->height; + tmp_w = mf->width; + tmp_h = mf->height; /* width <= max_width && height <= max_height - guaranteed by try_fmt */ while ((width > tmp_w || height > tmp_h) && tmp_w < max_width && tmp_h < max_height) { tmp_w = min(2 * tmp_w, max_width); tmp_h = min(2 * tmp_h, max_height); - pix->width = tmp_w; - pix->height = tmp_h; - ret = v4l2_subdev_call(sd, video, s_fmt, f); + mf->width = tmp_w; + mf->height = tmp_h; + ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", - pix->width, pix->height); + mf->width, mf->height); if (ret < 0) { /* This shouldn't happen */ dev_err(dev, "Client failed to set format: %d\n", ret); @@ -1163,27 +1205,26 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f, */ static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect, - struct v4l2_format *f, bool ceu_can_scale) + struct v4l2_mbus_framefmt *mf, bool ceu_can_scale) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->dev.parent; - struct v4l2_format f_tmp = *f; - struct v4l2_pix_format *pix_tmp = &f_tmp.fmt.pix; + struct v4l2_mbus_framefmt mf_tmp = *mf; unsigned int scale_h, scale_v; int ret; /* 5. Apply iterative camera S_FMT for camera user window. */ - ret = client_s_fmt(icd, &f_tmp, ceu_can_scale); + ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale); if (ret < 0) return ret; dev_geo(dev, "5: camera scaled to %ux%u\n", - pix_tmp->width, pix_tmp->height); + mf_tmp.width, mf_tmp.height); /* 6. Retrieve camera output window (g_fmt) */ - /* unneeded - it is already in "f_tmp" */ + /* unneeded - it is already in "mf_tmp" */ /* 7. Calculate new camera scales. */ ret = get_camera_scales(sd, rect, &scale_h, &scale_v); @@ -1192,10 +1233,11 @@ static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect, dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v); - cam->cam_width = pix_tmp->width; - cam->cam_height = pix_tmp->height; - f->fmt.pix.width = pix_tmp->width; - f->fmt.pix.height = pix_tmp->height; + cam->cam_width = mf_tmp.width; + cam->cam_height = mf_tmp.height; + mf->width = mf_tmp.width; + mf->height = mf_tmp.height; + mf->colorspace = mf_tmp.colorspace; /* * 8. Calculate new CEU crop - apply camera scales to previously @@ -1259,8 +1301,7 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; - struct v4l2_format f; - struct v4l2_pix_format *pix = &f.fmt.pix; + struct v4l2_mbus_framefmt mf; unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v, out_width, out_height; u32 capsr, cflcr; @@ -1309,25 +1350,24 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, * 5. Using actual input window and calculated combined scales calculate * camera target output window. */ - pix->width = scale_down(cam_rect->width, scale_comb_h); - pix->height = scale_down(cam_rect->height, scale_comb_v); + mf.width = scale_down(cam_rect->width, scale_comb_h); + mf.height = scale_down(cam_rect->height, scale_comb_v); - dev_geo(dev, "5: camera target %ux%u\n", pix->width, pix->height); + dev_geo(dev, "5: camera target %ux%u\n", mf.width, mf.height); /* 6. - 9. */ - pix->pixelformat = cam->camera_fmt->fourcc; - pix->colorspace = cam->camera_fmt->colorspace; + mf.code = cam->code; + mf.field = pcdev->is_interlaced ? V4L2_FIELD_INTERLACED : + V4L2_FIELD_NONE; capsr = capture_save_reset(pcdev); dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); /* Make relative to camera rectangle */ - rect->left -= cam_rect->left; - rect->top -= cam_rect->top; + rect->left -= cam_rect->left; + rect->top -= cam_rect->top; - f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - - ret = client_scale(icd, cam_rect, rect, ceu_rect, &f, + ret = client_scale(icd, cam_rect, rect, ceu_rect, &mf, pcdev->image_mode && !pcdev->is_interlaced); dev_geo(dev, "6-9: %d\n", ret); @@ -1375,8 +1415,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_format cam_f = *f; - struct v4l2_pix_format *cam_pix = &cam_f.fmt.pix; + struct v4l2_mbus_framefmt mf; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; __u32 pixfmt = pix->pixelformat; @@ -1445,9 +1484,11 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, * 4. Calculate camera output window by applying combined scales to real * input window. */ - cam_pix->width = scale_down(cam_rect->width, scale_h); - cam_pix->height = scale_down(cam_rect->height, scale_v); - cam_pix->pixelformat = xlate->cam_fmt->fourcc; + mf.width = scale_down(cam_rect->width, scale_h); + mf.height = scale_down(cam_rect->height, scale_v); + mf.field = pix->field; + mf.colorspace = pix->colorspace; + mf.code = xlate->code; switch (pixfmt) { case V4L2_PIX_FMT_NV12: @@ -1460,11 +1501,10 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, image_mode = false; } - dev_geo(dev, "4: camera output %ux%u\n", - cam_pix->width, cam_pix->height); + dev_geo(dev, "4: camera output %ux%u\n", mf.width, mf.height); /* 5. - 9. */ - ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &cam_f, + ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &mf, image_mode && !is_interlaced); dev_geo(dev, "5-9: client scale %d\n", ret); @@ -1472,37 +1512,48 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, /* Done with the camera. Now see if we can improve the result */ dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n", - ret, cam_pix->width, cam_pix->height, pix->width, pix->height); + ret, mf.width, mf.height, pix->width, pix->height); if (ret < 0) return ret; + if (mf.code != xlate->code) + return -EINVAL; + /* 10. Use CEU scaling to scale to the requested user window. */ /* We cannot scale up */ - if (pix->width > cam_pix->width) - pix->width = cam_pix->width; + if (pix->width > mf.width) + pix->width = mf.width; if (pix->width > ceu_rect.width) pix->width = ceu_rect.width; - if (pix->height > cam_pix->height) - pix->height = cam_pix->height; + if (pix->height > mf.height) + pix->height = mf.height; if (pix->height > ceu_rect.height) pix->height = ceu_rect.height; - /* Let's rock: scale pix->{width x height} down to width x height */ - scale_h = calc_scale(ceu_rect.width, &pix->width); - scale_v = calc_scale(ceu_rect.height, &pix->height); + pix->colorspace = mf.colorspace; + + if (image_mode) { + /* Scale pix->{width x height} down to width x height */ + scale_h = calc_scale(ceu_rect.width, &pix->width); + scale_v = calc_scale(ceu_rect.height, &pix->height); + + pcdev->cflcr = scale_h | (scale_v << 16); + } else { + pix->width = ceu_rect.width; + pix->height = ceu_rect.height; + scale_h = scale_v = 0; + pcdev->cflcr = 0; + } dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", ceu_rect.width, scale_h, pix->width, ceu_rect.height, scale_v, pix->height); - pcdev->cflcr = scale_h | (scale_v << 16); - - icd->buswidth = xlate->buswidth; - icd->current_fmt = xlate->host_fmt; - cam->camera_fmt = xlate->cam_fmt; - cam->ceu_rect = ceu_rect; + cam->code = xlate->code; + cam->ceu_rect = ceu_rect; + icd->current_fmt = xlate; pcdev->is_interlaced = is_interlaced; pcdev->image_mode = image_mode; @@ -1516,6 +1567,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); + struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; int width, height; int ret; @@ -1534,18 +1586,27 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, width = pix->width; height = pix->height; - pix->bytesperline = pix->width * - DIV_ROUND_UP(xlate->host_fmt->depth, 8); - pix->sizeimage = pix->height * pix->bytesperline; - - pix->pixelformat = xlate->cam_fmt->fourcc; + pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt); + if (pix->bytesperline < 0) + return pix->bytesperline; + pix->sizeimage = height * pix->bytesperline; /* limit to sensor capabilities */ - ret = v4l2_subdev_call(sd, video, try_fmt, f); - pix->pixelformat = pixfmt; + mf.width = pix->width; + mf.height = pix->height; + mf.field = pix->field; + mf.code = xlate->code; + mf.colorspace = pix->colorspace; + + ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; + pix->width = mf.width; + pix->height = mf.height; + pix->field = mf.field; + pix->colorspace = mf.colorspace; + switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: @@ -1554,21 +1615,25 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, /* FIXME: check against rect_max after converting soc-camera */ /* We can scale precisely, need a bigger image from camera */ if (pix->width < width || pix->height < height) { - int tmp_w = pix->width, tmp_h = pix->height; - pix->width = 2560; - pix->height = 1920; - ret = v4l2_subdev_call(sd, video, try_fmt, f); + /* + * We presume, the sensor behaves sanely, i.e., if + * requested a bigger rectangle, it will not return a + * smaller one. + */ + mf.width = 2560; + mf.height = 1920; + ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(icd->dev.parent, - "FIXME: try_fmt() returned %d\n", ret); - pix->width = tmp_w; - pix->height = tmp_h; + "FIXME: client try_fmt() = %d\n", ret); + return ret; } } - if (pix->width > width) + /* We will scale exactly */ + if (mf.width > width) pix->width = width; - if (pix->height > height) + if (mf.height > height) pix->height = height; } @@ -1663,7 +1728,7 @@ static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd, switch (ctrl->id) { case V4L2_CID_SHARPNESS: - switch (icd->current_fmt->fourcc) { + switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 5fdedc766401..6b3fbcca7747 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -31,6 +31,7 @@ #include #include #include +#include /* Default to VGA resolution */ #define DEFAULT_WIDTH 640 @@ -40,18 +41,6 @@ static LIST_HEAD(hosts); static LIST_HEAD(devices); static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */ -const struct soc_camera_data_format *soc_camera_format_by_fourcc( - struct soc_camera_device *icd, unsigned int fourcc) -{ - unsigned int i; - - for (i = 0; i < icd->num_formats; i++) - if (icd->formats[i].fourcc == fourcc) - return icd->formats + i; - return NULL; -} -EXPORT_SYMBOL(soc_camera_format_by_fourcc); - const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( struct soc_camera_device *icd, unsigned int fourcc) { @@ -207,21 +196,26 @@ static int soc_camera_dqbuf(struct file *file, void *priv, /* Always entered with .video_lock held */ static int soc_camera_init_user_formats(struct soc_camera_device *icd) { + struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - int i, fmts = 0, ret; + int i, fmts = 0, raw_fmts = 0, ret; + enum v4l2_mbus_pixelcode code; + + while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code)) + raw_fmts++; if (!ici->ops->get_formats) /* * Fallback mode - the host will have to serve all * sensor-provided formats one-to-one to the user */ - fmts = icd->num_formats; + fmts = raw_fmts; else /* * First pass - only count formats this host-sensor * configuration can provide */ - for (i = 0; i < icd->num_formats; i++) { + for (i = 0; i < raw_fmts; i++) { ret = ici->ops->get_formats(icd, i, NULL); if (ret < 0) return ret; @@ -242,11 +236,12 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd) /* Second pass - actually fill data formats */ fmts = 0; - for (i = 0; i < icd->num_formats; i++) + for (i = 0; i < raw_fmts; i++) if (!ici->ops->get_formats) { - icd->user_formats[i].host_fmt = icd->formats + i; - icd->user_formats[i].cam_fmt = icd->formats + i; - icd->user_formats[i].buswidth = icd->formats[i].depth; + v4l2_subdev_call(sd, video, enum_mbus_fmt, i, &code); + icd->user_formats[i].host_fmt = + soc_mbus_get_fmtdesc(code); + icd->user_formats[i].code = code; } else { ret = ici->ops->get_formats(icd, i, &icd->user_formats[fmts]); @@ -255,7 +250,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd) fmts += ret; } - icd->current_fmt = icd->user_formats[0].host_fmt; + icd->current_fmt = &icd->user_formats[0]; return 0; @@ -281,7 +276,7 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) #define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ ((x) >> 24) & 0xff -/* Called with .vb_lock held */ +/* Called with .vb_lock held, or from the first open(2), see comment there */ static int soc_camera_set_fmt(struct soc_camera_file *icf, struct v4l2_format *f) { @@ -302,7 +297,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, if (ret < 0) { return ret; } else if (!icd->current_fmt || - icd->current_fmt->fourcc != pix->pixelformat) { + icd->current_fmt->host_fmt->fourcc != pix->pixelformat) { dev_err(&icd->dev, "Host driver hasn't set up current format correctly!\n"); return -EINVAL; @@ -310,6 +305,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf, icd->user_width = pix->width; icd->user_height = pix->height; + icd->colorspace = pix->colorspace; icf->vb_vidq.field = icd->field = pix->field; @@ -369,8 +365,9 @@ static int soc_camera_open(struct file *file) .width = icd->user_width, .height = icd->user_height, .field = icd->field, - .pixelformat = icd->current_fmt->fourcc, - .colorspace = icd->current_fmt->colorspace, + .colorspace = icd->colorspace, + .pixelformat = + icd->current_fmt->host_fmt->fourcc, }, }; @@ -390,7 +387,12 @@ static int soc_camera_open(struct file *file) goto eiciadd; } - /* Try to configure with default parameters */ + /* + * Try to configure with default parameters. Notice: this is the + * very first open, so, we cannot race against other calls, + * apart from someone else calling open() simultaneously, but + * .video_lock is protecting us against it. + */ ret = soc_camera_set_fmt(icf, &f); if (ret < 0) goto esfmt; @@ -534,7 +536,7 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv, { struct soc_camera_file *icf = file->private_data; struct soc_camera_device *icd = icf->icd; - const struct soc_camera_data_format *format; + const struct soc_mbus_pixelfmt *format; WARN_ON(priv != file->private_data); @@ -543,7 +545,8 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv, format = icd->user_formats[f->index].host_fmt; - strlcpy(f->description, format->name, sizeof(f->description)); + if (format->name) + strlcpy(f->description, format->name, sizeof(f->description)); f->pixelformat = format->fourcc; return 0; } @@ -560,12 +563,15 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv, pix->width = icd->user_width; pix->height = icd->user_height; pix->field = icf->vb_vidq.field; - pix->pixelformat = icd->current_fmt->fourcc; - pix->bytesperline = pix->width * - DIV_ROUND_UP(icd->current_fmt->depth, 8); + pix->pixelformat = icd->current_fmt->host_fmt->fourcc; + pix->bytesperline = soc_mbus_bytes_per_line(pix->width, + icd->current_fmt->host_fmt); + pix->colorspace = icd->colorspace; + if (pix->bytesperline < 0) + return pix->bytesperline; pix->sizeimage = pix->height * pix->bytesperline; dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n", - icd->current_fmt->fourcc); + icd->current_fmt->host_fmt->fourcc); return 0; } @@ -894,7 +900,7 @@ static int soc_camera_probe(struct device *dev) struct soc_camera_link *icl = to_soc_camera_link(icd); struct device *control = NULL; struct v4l2_subdev *sd; - struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE}; + struct v4l2_mbus_framefmt mf; int ret; dev_info(dev, "Probing %s\n", dev_name(dev)); @@ -965,9 +971,11 @@ static int soc_camera_probe(struct device *dev) /* Try to improve our guess of a reasonable window format */ sd = soc_camera_to_subdev(icd); - if (!v4l2_subdev_call(sd, video, g_fmt, &f)) { - icd->user_width = f.fmt.pix.width; - icd->user_height = f.fmt.pix.height; + if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) { + icd->user_width = mf.width; + icd->user_height = mf.height; + icd->colorspace = mf.colorspace; + icd->field = mf.field; } /* Do we have to sysfs_remove_link() before device_unregister()? */ diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index c7c91518c391..10b003a8be83 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -22,7 +22,6 @@ struct soc_camera_platform_priv { struct v4l2_subdev subdev; - struct soc_camera_data_format format; }; static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev) @@ -58,36 +57,36 @@ soc_camera_platform_query_bus_param(struct soc_camera_device *icd) } static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd, - struct v4l2_format *f) + struct v4l2_mbus_framefmt *mf) { struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - struct v4l2_pix_format *pix = &f->fmt.pix; - pix->width = p->format.width; - pix->height = p->format.height; + mf->width = p->format.width; + mf->height = p->format.height; + mf->code = p->format.code; + mf->colorspace = p->format.colorspace; + return 0; } -static void soc_camera_platform_video_probe(struct soc_camera_device *icd, - struct platform_device *pdev) +static struct v4l2_subdev_core_ops platform_subdev_core_ops; + +static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) { - struct soc_camera_platform_priv *priv = get_priv(pdev); - struct soc_camera_platform_info *p = pdev->dev.platform_data; + struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - priv->format.name = p->format_name; - priv->format.depth = p->format_depth; - priv->format.fourcc = p->format.pixelformat; - priv->format.colorspace = p->format.colorspace; + if (index) + return -EINVAL; - icd->formats = &priv->format; - icd->num_formats = 1; + *code = p->format.code; + return 0; } -static struct v4l2_subdev_core_ops platform_subdev_core_ops; - static struct v4l2_subdev_video_ops platform_subdev_video_ops = { .s_stream = soc_camera_platform_s_stream, - .try_fmt = soc_camera_platform_try_fmt, + .try_mbus_fmt = soc_camera_platform_try_fmt, + .enum_mbus_fmt = soc_camera_platform_enum_fmt, }; static struct v4l2_subdev_ops platform_subdev_ops = { @@ -128,12 +127,10 @@ static int soc_camera_platform_probe(struct platform_device *pdev) /* Set the control device reference */ dev_set_drvdata(&icd->dev, &pdev->dev); - icd->ops = &soc_camera_platform_ops; + icd->ops = &soc_camera_platform_ops; ici = to_soc_camera_host(icd->dev.parent); - soc_camera_platform_video_probe(icd, pdev); - v4l2_subdev_init(&priv->subdev, &platform_subdev_ops); v4l2_set_subdevdata(&priv->subdev, p); strncpy(priv->subdev.name, dev_name(&pdev->dev), V4L2_SUBDEV_NAME_SIZE); diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index 8ec1031dacc3..341d0e035cbf 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -251,15 +251,6 @@ static const struct regval_list tw9910_default_regs[] = ENDMARKER, }; -static const struct soc_camera_data_format tw9910_color_fmt[] = { - { - .name = "VYUY", - .fourcc = V4L2_PIX_FMT_VYUY, - .depth = 16, - .colorspace = V4L2_COLORSPACE_SMPTE170M, - } -}; - static const struct tw9910_scale_ctrl tw9910_ntsc_scales[] = { { .name = "NTSC SQ", @@ -814,11 +805,11 @@ static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) return 0; } -static int tw9910_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int tw9910_g_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct tw9910_priv *priv = to_tw9910(client); - struct v4l2_pix_format *pix = &f->fmt.pix; if (!priv->scale) { int ret; @@ -835,74 +826,76 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) return ret; } - f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - - pix->width = priv->scale->width; - pix->height = priv->scale->height; - pix->pixelformat = V4L2_PIX_FMT_VYUY; - pix->colorspace = V4L2_COLORSPACE_SMPTE170M; - pix->field = V4L2_FIELD_INTERLACED; + mf->width = priv->scale->width; + mf->height = priv->scale->height; + mf->code = V4L2_MBUS_FMT_YVYU8_2X8_BE; + mf->colorspace = V4L2_COLORSPACE_JPEG; + mf->field = V4L2_FIELD_INTERLACED; return 0; } -static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int tw9910_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct tw9910_priv *priv = to_tw9910(client); - struct v4l2_pix_format *pix = &f->fmt.pix; /* See tw9910_s_crop() - no proper cropping support */ struct v4l2_crop a = { .c = { .left = 0, .top = 0, - .width = pix->width, - .height = pix->height, + .width = mf->width, + .height = mf->height, }, }; - int i, ret; + int ret; + + WARN_ON(mf->field != V4L2_FIELD_ANY && + mf->field != V4L2_FIELD_INTERLACED); /* * check color format */ - for (i = 0; i < ARRAY_SIZE(tw9910_color_fmt); i++) - if (pix->pixelformat == tw9910_color_fmt[i].fourcc) - break; - - if (i == ARRAY_SIZE(tw9910_color_fmt)) + if (mf->code != V4L2_MBUS_FMT_YVYU8_2X8_BE) return -EINVAL; + mf->colorspace = V4L2_COLORSPACE_JPEG; + ret = tw9910_s_crop(sd, &a); if (!ret) { - pix->width = priv->scale->width; - pix->height = priv->scale->height; + mf->width = priv->scale->width; + mf->height = priv->scale->height; } return ret; } -static int tw9910_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f) +static int tw9910_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = sd->priv; struct soc_camera_device *icd = client->dev.platform_data; - struct v4l2_pix_format *pix = &f->fmt.pix; const struct tw9910_scale_ctrl *scale; - if (V4L2_FIELD_ANY == pix->field) { - pix->field = V4L2_FIELD_INTERLACED; - } else if (V4L2_FIELD_INTERLACED != pix->field) { - dev_err(&client->dev, "Field type invalid.\n"); + if (V4L2_FIELD_ANY == mf->field) { + mf->field = V4L2_FIELD_INTERLACED; + } else if (V4L2_FIELD_INTERLACED != mf->field) { + dev_err(&client->dev, "Field type %d invalid.\n", mf->field); return -EINVAL; } + mf->code = V4L2_MBUS_FMT_YVYU8_2X8_BE; + mf->colorspace = V4L2_COLORSPACE_JPEG; + /* * select suitable norm */ - scale = tw9910_select_norm(icd, pix->width, pix->height); + scale = tw9910_select_norm(icd, mf->width, mf->height); if (!scale) return -EINVAL; - pix->width = scale->width; - pix->height = scale->height; + mf->width = scale->width; + mf->height = scale->height; return 0; } @@ -930,9 +923,6 @@ static int tw9910_video_probe(struct soc_camera_device *icd, return -ENODEV; } - icd->formats = tw9910_color_fmt; - icd->num_formats = ARRAY_SIZE(tw9910_color_fmt); - /* * check and show Product ID * So far only revisions 0 and 1 have been seen @@ -973,14 +963,25 @@ static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = { #endif }; +static int tw9910_enum_fmt(struct v4l2_subdev *sd, int index, + enum v4l2_mbus_pixelcode *code) +{ + if (index) + return -EINVAL; + + *code = V4L2_MBUS_FMT_YVYU8_2X8_BE; + return 0; +} + static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = { .s_stream = tw9910_s_stream, - .g_fmt = tw9910_g_fmt, - .s_fmt = tw9910_s_fmt, - .try_fmt = tw9910_try_fmt, + .g_mbus_fmt = tw9910_g_fmt, + .s_mbus_fmt = tw9910_s_fmt, + .try_mbus_fmt = tw9910_try_fmt, .cropcap = tw9910_cropcap, .g_crop = tw9910_g_crop, .s_crop = tw9910_s_crop, + .enum_mbus_fmt = tw9910_enum_fmt, }; static struct v4l2_subdev_ops tw9910_subdev_ops = { diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 831efffaf2ae..dcc5b86bcb6c 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -24,15 +24,13 @@ struct soc_camera_device { struct device *pdev; /* Platform device */ s32 user_width; s32 user_height; + enum v4l2_colorspace colorspace; unsigned char iface; /* Host number */ unsigned char devnum; /* Device number per host */ - unsigned char buswidth; /* See comment in .c */ struct soc_camera_sense *sense; /* See comment in struct definition */ struct soc_camera_ops *ops; struct video_device *vdev; - const struct soc_camera_data_format *current_fmt; - const struct soc_camera_data_format *formats; - int num_formats; + const struct soc_camera_format_xlate *current_fmt; struct soc_camera_format_xlate *user_formats; int num_user_formats; enum v4l2_field field; /* Preserve field over close() */ @@ -161,23 +159,13 @@ static inline struct v4l2_subdev *soc_camera_to_subdev( int soc_camera_host_register(struct soc_camera_host *ici); void soc_camera_host_unregister(struct soc_camera_host *ici); -const struct soc_camera_data_format *soc_camera_format_by_fourcc( - struct soc_camera_device *icd, unsigned int fourcc); const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( struct soc_camera_device *icd, unsigned int fourcc); -struct soc_camera_data_format { - const char *name; - unsigned int depth; - __u32 fourcc; - enum v4l2_colorspace colorspace; -}; - /** * struct soc_camera_format_xlate - match between host and sensor formats - * @cam_fmt: sensor format provided by the sensor - * @host_fmt: host format after host translation from cam_fmt - * @buswidth: bus width for this format + * @code: code of a sensor provided format + * @host_fmt: host format after host translation from code * * Host and sensor translation structure. Used in table of host and sensor * formats matchings in soc_camera_device. A host can override the generic list @@ -185,9 +173,8 @@ struct soc_camera_data_format { * format setup. */ struct soc_camera_format_xlate { - const struct soc_camera_data_format *cam_fmt; - const struct soc_camera_data_format *host_fmt; - unsigned char buswidth; + enum v4l2_mbus_pixelcode code; + const struct soc_mbus_pixelfmt *host_fmt; }; struct soc_camera_ops { diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h index 88b3b5747f62..0ecefe227b76 100644 --- a/include/media/soc_camera_platform.h +++ b/include/media/soc_camera_platform.h @@ -19,7 +19,7 @@ struct device; struct soc_camera_platform_info { const char *format_name; unsigned long format_depth; - struct v4l2_pix_format format; + struct v4l2_mbus_framefmt format; unsigned long bus_param; struct device *dev; int (*set_capture)(struct soc_camera_platform_info *info, int enable); -- cgit v1.2.3 From a6b5f2008a3d54b5f5350a01121b718dd6bfead7 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 11 Dec 2009 11:53:45 -0300 Subject: V4L/DVB (13661): rj54n1cb0c: Add cropping, auto white balance, restrict sizes, add platform data It has been experimentally found out, that the sensor only supports up to 512x384 video output and also has some restrictions on minimum scale. We disable non-working size ranges until, maybe, someone finds out how to properly set them up. Also add cropping support, an auto white balance control, platform data to specify master clock frequency and polarity of the IOCTL pin. create mode 100644 include/media/rj54n1cb0c.h Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- arch/sh/boards/mach-kfr2r09/setup.c | 13 +- drivers/media/video/rj54n1cb0c.c | 277 ++++++++++++++++++++++++++++++------ include/media/rj54n1cb0c.h | 19 +++ 3 files changed, 261 insertions(+), 48 deletions(-) create mode 100644 include/media/rj54n1cb0c.h (limited to 'include') diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 87438d6603d6..9038d768a525 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include