From f5417612d787e6b619fd69616bbf95f1b895e900 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Mon, 28 Nov 2005 18:09:44 +0000 Subject: [ARM] 3181/1: add PORT_ identifier for Hilscher netx uart Patch from Sascha Hauer This patch adds PORT_NETX for supporting the Hilscher netx embedded UARTs. Signed-off-by: Sascha Hauer Signed-off-by: Russell King --- include/linux/serial_core.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index a3ac92b19aca..e3710d7e260a 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -121,6 +121,9 @@ #define PORT_IP3106 70 +/* Hilscher netx */ +#define PORT_NETX 71 + #ifdef __KERNEL__ #include -- cgit v1.2.3 From 24117defabc849a6ad5081ad0fafd0664bf55f13 Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Mon, 28 Nov 2005 21:00:29 +0000 Subject: [MMC] Fix protocol errors A review against MMC/SD specifications found some errors in the current implementation. Signed-off-by: Pierre Ossman Signed-off-by: Russell King --- drivers/mmc/mmc.c | 2 +- include/linux/mmc/protocol.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index da528390acf8..d336a1d65dc7 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c @@ -816,7 +816,7 @@ static void mmc_discover_cards(struct mmc_host *host) cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; - cmd.flags = MMC_RSP_R1; + cmd.flags = MMC_RSP_R6; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h index f819cae92266..a14dc306545b 100644 --- a/include/linux/mmc/protocol.h +++ b/include/linux/mmc/protocol.h @@ -63,7 +63,7 @@ /* class 5 */ #define MMC_ERASE_GROUP_START 35 /* ac [31:0] data addr R1 */ #define MMC_ERASE_GROUP_END 36 /* ac [31:0] data addr R1 */ -#define MMC_ERASE 37 /* ac R1b */ +#define MMC_ERASE 38 /* ac R1b */ /* class 9 */ #define MMC_FAST_IO 39 /* ac R4 */ @@ -74,7 +74,7 @@ /* class 8 */ #define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ -#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1b */ +#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ /* SD commands type argument response */ /* class 8 */ -- cgit v1.2.3 From 6aab341e0a28aff100a09831c5300a2994b8b986 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 28 Nov 2005 14:34:23 -0800 Subject: mm: re-architect the VM_UNPAGED logic This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/vdso.c | 6 +- drivers/char/mem.c | 2 +- fs/proc/task_mmu.c | 7 +- include/linux/mm.h | 5 +- mm/fremap.c | 22 ++---- mm/madvise.c | 2 +- mm/memory.c | 189 ++++++++++++++++++++++++--------------------- mm/mempolicy.c | 12 +-- mm/msync.c | 12 +-- mm/nommu.c | 2 +- mm/rmap.c | 14 +--- 11 files changed, 127 insertions(+), 146 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index b44b36e0c293..f0c47dab0903 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -145,8 +145,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); struct page *upg = (vma && vma->vm_mm) ? - follow_page(vma->vm_mm, vma->vm_start + - i*PAGE_SIZE, 0) + follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) : NULL; dump_one_vdso_page(pg, upg); } @@ -157,8 +156,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); struct page *upg = (vma && vma->vm_mm) ? - follow_page(vma->vm_mm, vma->vm_start + - i*PAGE_SIZE, 0) + follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) : NULL; dump_one_vdso_page(pg, upg); } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 29c3b631445a..91dd669273e0 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size) if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) goto out_up; - if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED)) + if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) break; count = vma->vm_end - addr; if (count > size) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9ab97cef0daa..50bd5a8f0446 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -402,12 +402,11 @@ struct numa_maps { /* * Calculate numa node maps for a vma */ -static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) +static struct numa_maps *get_numa_maps(struct vm_area_struct *vma) { + int i; struct page *page; unsigned long vaddr; - struct mm_struct *mm = vma->vm_mm; - int i; struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); if (!md) @@ -420,7 +419,7 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) md->node[i] =0; for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { - page = follow_page(mm, vaddr, 0); + page = follow_page(vma, vaddr, 0); if (page) { int count = page_mapcount(page); diff --git a/include/linux/mm.h b/include/linux/mm.h index f0cdfd18db55..6a75a7a78bf1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -145,7 +145,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_GROWSUP 0x00000200 #define VM_SHM 0x00000000 /* Means nothing: delete it later */ -#define VM_UNPAGED 0x00000400 /* Pages managed without map count */ +#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_EXECUTABLE 0x00001000 @@ -664,6 +664,7 @@ struct zap_details { unsigned long truncate_count; /* Compare vm_truncate_count */ }; +struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, @@ -953,7 +954,7 @@ unsigned long vmalloc_to_pfn(void *addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); -struct page *follow_page(struct mm_struct *, unsigned long address, +struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags); #define FOLL_WRITE 0x01 /* check pte is writable */ #define FOLL_TOUCH 0x02 /* mark page accessed */ diff --git a/mm/fremap.c b/mm/fremap.c index 007cbad9331e..f851775e09c2 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -27,24 +27,20 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page = NULL; if (pte_present(pte)) { - unsigned long pfn = pte_pfn(pte); - flush_cache_page(vma, addr, pfn); + flush_cache_page(vma, addr, pte_pfn(pte)); pte = ptep_clear_flush(vma, addr, ptep); - if (unlikely(!pfn_valid(pfn))) { - print_bad_pte(vma, pte, addr); - goto out; + page = vm_normal_page(vma, addr, pte); + if (page) { + if (pte_dirty(pte)) + set_page_dirty(page); + page_remove_rmap(page); + page_cache_release(page); } - page = pfn_to_page(pfn); - if (pte_dirty(pte)) - set_page_dirty(page); - page_remove_rmap(page); - page_cache_release(page); } else { if (!pte_file(pte)) free_swap_and_cache(pte_to_swp_entry(pte)); pte_clear(mm, addr, ptep); } -out: return !!page; } @@ -65,8 +61,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, pte_t pte_val; spinlock_t *ptl; - BUG_ON(vma->vm_flags & VM_UNPAGED); - pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) @@ -122,8 +116,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, pte_t pte_val; spinlock_t *ptl; - BUG_ON(vma->vm_flags & VM_UNPAGED); - pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) diff --git a/mm/madvise.c b/mm/madvise.c index 328a3bcce527..2b7cf0400a21 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -126,7 +126,7 @@ static long madvise_dontneed(struct vm_area_struct * vma, unsigned long start, unsigned long end) { *prev = vma; - if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_UNPAGED)) + if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) return -EINVAL; if (unlikely(vma->vm_flags & VM_NONLINEAR)) { diff --git a/mm/memory.c b/mm/memory.c index d1f46f4e4c8a..b57fbc636058 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -333,9 +333,9 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) } /* - * This function is called to print an error when a pte in a - * !VM_UNPAGED region is found pointing to an invalid pfn (which - * is an error. + * This function is called to print an error when a bad pte + * is found. For example, we might have a PFN-mapped pte in + * a region that doesn't allow it. * * The calling function must still handle the error. */ @@ -350,19 +350,56 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr) } /* - * page_is_anon applies strict checks for an anonymous page belonging to - * this vma at this address. It is used on VM_UNPAGED vmas, which are - * usually populated with shared originals (which must not be counted), - * but occasionally contain private COWed copies (when !VM_SHARED, or - * perhaps via ptrace when VM_SHARED). An mmap of /dev/mem might window - * free pages, pages from other processes, or from other parts of this: - * it's tricky, but try not to be deceived by foreign anonymous pages. + * This function gets the "struct page" associated with a pte. + * + * NOTE! Some mappings do not have "struct pages". A raw PFN mapping + * will have each page table entry just pointing to a raw page frame + * number, and as far as the VM layer is concerned, those do not have + * pages associated with them - even if the PFN might point to memory + * that otherwise is perfectly fine and has a "struct page". + * + * The way we recognize those mappings is through the rules set up + * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set, + * and the vm_pgoff will point to the first PFN mapped: thus every + * page that is a raw mapping will always honor the rule + * + * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) + * + * and if that isn't true, the page has been COW'ed (in which case it + * _does_ have a "struct page" associated with it even if it is in a + * VM_PFNMAP range). */ -static inline int page_is_anon(struct page *page, - struct vm_area_struct *vma, unsigned long addr) +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { - return page && PageAnon(page) && page_mapped(page) && - page_address_in_vma(page, vma) == addr; + unsigned long pfn = pte_pfn(pte); + + if (vma->vm_flags & VM_PFNMAP) { + unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; + if (pfn == vma->vm_pgoff + off) + return NULL; + } + + /* + * Add some anal sanity checks for now. Eventually, + * we should just do "return pfn_to_page(pfn)", but + * in the meantime we check that we get a valid pfn, + * and that the resulting page looks ok. + * + * Remove this test eventually! + */ + if (unlikely(!pfn_valid(pfn))) { + print_bad_pte(vma, pte, addr); + return NULL; + } + + /* + * NOTE! We still have PageReserved() pages in the page + * tables. + * + * The PAGE_ZERO() pages and various VDSO mappings can + * cause them to exist. + */ + return pfn_to_page(pfn); } /* @@ -379,7 +416,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, unsigned long vm_flags = vma->vm_flags; pte_t pte = *src_pte; struct page *page; - unsigned long pfn; /* pte contains position in swap or file, so copy. */ if (unlikely(!pte_present(pte))) { @@ -397,22 +433,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, goto out_set_pte; } - pfn = pte_pfn(pte); - page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; - - if (unlikely(vm_flags & VM_UNPAGED)) - if (!page_is_anon(page, vma, addr)) - goto out_set_pte; - - /* - * If the pte points outside of valid memory but - * the region is not VM_UNPAGED, we have a problem. - */ - if (unlikely(!page)) { - print_bad_pte(vma, pte, addr); - goto out_set_pte; /* try to do something sane */ - } - /* * If it's a COW mapping, write protect it both * in the parent and the child @@ -429,9 +449,13 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (vm_flags & VM_SHARED) pte = pte_mkclean(pte); pte = pte_mkold(pte); - get_page(page); - page_dup_rmap(page); - rss[!!PageAnon(page)]++; + + page = vm_normal_page(vma, addr, pte); + if (page) { + get_page(page); + page_dup_rmap(page); + rss[!!PageAnon(page)]++; + } out_set_pte: set_pte_at(dst_mm, addr, dst_pte, pte); @@ -543,7 +567,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, * readonly mappings. The tradeoff is that copy_page_range is more * efficient than faulting. */ - if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) { + if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { if (!vma->anon_vma) return 0; } @@ -584,19 +608,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, } if (pte_present(ptent)) { struct page *page; - unsigned long pfn; (*zap_work) -= PAGE_SIZE; - pfn = pte_pfn(ptent); - page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; - - if (unlikely(vma->vm_flags & VM_UNPAGED)) { - if (!page_is_anon(page, vma, addr)) - page = NULL; - } else if (unlikely(!page)) - print_bad_pte(vma, ptent, addr); - + page = vm_normal_page(vma, addr, ptent); if (unlikely(details) && page) { /* * unmap_shared_mapping_pages() wants to @@ -852,7 +867,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, /* * Do a quick page-table lookup for a single page. */ -struct page *follow_page(struct mm_struct *mm, unsigned long address, +struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pgd_t *pgd; @@ -860,8 +875,8 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address, pmd_t *pmd; pte_t *ptep, pte; spinlock_t *ptl; - unsigned long pfn; struct page *page; + struct mm_struct *mm = vma->vm_mm; page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { @@ -897,11 +912,10 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address, goto unlock; if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; - pfn = pte_pfn(pte); - if (!pfn_valid(pfn)) + page = vm_normal_page(vma, address, pte); + if (unlikely(!page)) goto unlock; - page = pfn_to_page(pfn); if (flags & FOLL_GET) get_page(page); if (flags & FOLL_TOUCH) { @@ -974,8 +988,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, return i ? : -EFAULT; } if (pages) { - pages[i] = pte_page(*pte); - get_page(pages[i]); + struct page *page = vm_normal_page(vma, start, *pte); + pages[i] = page; + if (page) + get_page(page); } pte_unmap(pte); if (vmas) @@ -1010,7 +1026,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, foll_flags |= FOLL_WRITE; cond_resched(); - while (!(page = follow_page(mm, start, foll_flags))) { + while (!(page = follow_page(vma, start, foll_flags))) { int ret; ret = __handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); @@ -1214,11 +1230,12 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, * in 2.6 the LRU scan won't even find its pages, so this * flag means no more than count its pages in reserved_vm, * and omit it from core dump, even when VM_IO turned off. - * VM_UNPAGED tells the core MM not to "manage" these pages - * (e.g. refcount, mapcount, try to swap them out): in - * particular, zap_pte_range does not try to free them. + * VM_PFNMAP tells the core MM that the base pages are just + * raw PFN mappings, and do not have a "struct page" associated + * with them. */ - vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED; + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; + vma->vm_pgoff = pfn; BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; @@ -1273,6 +1290,26 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } +static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) +{ + /* + * If the source page was a PFN mapping, we don't have + * a "struct page" for it. We do a best-effort copy by + * just copying from the original user address. If that + * fails, we just zero-fill it. Live with it. + */ + if (unlikely(!src)) { + void *kaddr = kmap_atomic(dst, KM_USER0); + unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE); + if (left) + memset(kaddr, 0, PAGE_SIZE); + kunmap_atomic(kaddr, KM_USER0); + return; + + } + copy_user_highpage(dst, src, va); +} + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -1296,28 +1333,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl, pte_t orig_pte) { struct page *old_page, *src_page, *new_page; - unsigned long pfn = pte_pfn(orig_pte); pte_t entry; int ret = VM_FAULT_MINOR; - if (unlikely(!pfn_valid(pfn))) { - /* - * Page table corrupted: show pte and kill process. - * Or it's an attempt to COW an out-of-map VM_UNPAGED - * entry, which copy_user_highpage does not support. - */ - print_bad_pte(vma, orig_pte, address); - ret = VM_FAULT_OOM; - goto unlock; - } - old_page = pfn_to_page(pfn); + old_page = vm_normal_page(vma, address, orig_pte); src_page = old_page; - - if (unlikely(vma->vm_flags & VM_UNPAGED)) - if (!page_is_anon(old_page, vma, address)) { - old_page = NULL; - goto gotten; - } + if (!old_page) + goto gotten; if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { int reuse = can_share_swap_page(old_page); @@ -1351,7 +1373,7 @@ gotten: new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); if (!new_page) goto oom; - copy_user_highpage(new_page, src_page, address); + cow_user_page(new_page, src_page, address); } /* @@ -1812,16 +1834,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pte_t entry; - /* - * A VM_UNPAGED vma will normally be filled with present ptes - * by remap_pfn_range, and never arrive here; but it might have - * holes, or if !VM_DONTEXPAND, mremap might have expanded it. - * It's weird enough handling anon pages in unpaged vmas, we do - * not want to worry about ZERO_PAGEs too (it may or may not - * matter if their counts wrap): just give them anon pages. - */ - - if (write_access || (vma->vm_flags & VM_UNPAGED)) { + if (write_access) { /* Allocate our own private page. */ pte_unmap(page_table); @@ -1896,8 +1909,6 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, int anon = 0; pte_unmap(page_table); - BUG_ON(vma->vm_flags & VM_UNPAGED); - if (vma->vm_file) { mapping = vma->vm_file->f_mapping; sequence = mapping->truncate_count; @@ -1930,7 +1941,7 @@ retry: page = alloc_page_vma(GFP_HIGHUSER, vma, address); if (!page) goto oom; - copy_user_highpage(page, new_page, address); + cow_user_page(page, new_page, address); page_cache_release(new_page); new_page = page; anon = 1; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5609a31bdf22..bec88c81244e 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -189,17 +189,15 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { - unsigned long pfn; + struct page *page; unsigned int nid; if (!pte_present(*pte)) continue; - pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) { - print_bad_pte(vma, *pte, addr); + page = vm_normal_page(vma, addr, *pte); + if (!page) continue; - } - nid = pfn_to_nid(pfn); + nid = page_to_nid(page); if (!node_isset(nid, *nodes)) break; } while (pte++, addr += PAGE_SIZE, addr != end); @@ -269,8 +267,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, first = find_vma(mm, start); if (!first) return ERR_PTR(-EFAULT); - if (first->vm_flags & VM_UNPAGED) - return ERR_PTR(-EACCES); prev = NULL; for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { if (!vma->vm_next && vma->vm_end < end) diff --git a/mm/msync.c b/mm/msync.c index b3f4caf3010b..1b5b6f662dcf 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -27,7 +27,6 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, again: pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { - unsigned long pfn; struct page *page; if (progress >= 64) { @@ -40,13 +39,9 @@ again: continue; if (!pte_maybe_dirty(*pte)) continue; - pfn = pte_pfn(*pte); - if (unlikely(!pfn_valid(pfn))) { - print_bad_pte(vma, *pte, addr); + page = vm_normal_page(vma, addr, *pte); + if (!page) continue; - } - page = pfn_to_page(pfn); - if (ptep_clear_flush_dirty(vma, addr, pte) || page_test_and_clear_dirty(page)) set_page_dirty(page); @@ -97,9 +92,8 @@ static void msync_page_range(struct vm_area_struct *vma, /* For hugepages we can't go walking the page table normally, * but that's ok, hugetlbfs is memory based, so we don't need * to do anything more on an msync(). - * Can't do anything with VM_UNPAGED regions either. */ - if (vma->vm_flags & (VM_HUGETLB|VM_UNPAGED)) + if (vma->vm_flags & VM_HUGETLB) return; BUG_ON(addr >= end); diff --git a/mm/nommu.c b/mm/nommu.c index 6deb6ab3d6ad..c1196812876b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1045,7 +1045,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) EXPORT_SYMBOL(find_vma); -struct page *follow_page(struct mm_struct *mm, unsigned long address, +struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { return NULL; diff --git a/mm/rmap.c b/mm/rmap.c index 2e034a0b89ab..6389cda02a20 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -226,8 +226,6 @@ vma_address(struct page *page, struct vm_area_struct *vma) /* * At what user virtual address is page expected in vma? checking that the * page matches the vma: currently only used on anon pages, by unuse_vma; - * and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking - * care that an mmap of /dev/mem might window free and foreign pages. */ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { @@ -614,7 +612,6 @@ static void try_to_unmap_cluster(unsigned long cursor, struct page *page; unsigned long address; unsigned long end; - unsigned long pfn; address = (vma->vm_start + cursor) & CLUSTER_MASK; end = address + CLUSTER_SIZE; @@ -643,15 +640,8 @@ static void try_to_unmap_cluster(unsigned long cursor, for (; address < end; pte++, address += PAGE_SIZE) { if (!pte_present(*pte)) continue; - - pfn = pte_pfn(*pte); - if (unlikely(!pfn_valid(pfn))) { - print_bad_pte(vma, *pte, address); - continue; - } - - page = pfn_to_page(pfn); - BUG_ON(PageAnon(page)); + page = vm_normal_page(vma, address, *pte); + BUG_ON(!page || PageAnon(page)); if (ptep_clear_flush_young(vma, address, pte)) continue; -- cgit v1.2.3 From a9d9baa1e819b2f92f9cfa5240f766c535e636a6 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Mon, 28 Nov 2005 13:43:46 -0800 Subject: [PATCH] clean up lock_cpu_hotplug() in cpufreq There are some callers in cpufreq hotplug notify path that the lowest function calls lock_cpu_hotplug(). The lock is already held during cpu_up() and cpu_down() calls when the notify calls are broadcast to registered clients. Ideally if possible, we could disable_preempt() at the highest caller and make sure we dont sleep in the path down in cpufreq->driver_target() calls but the calls are so intertwined and cumbersome to cleanup. Hence we consistently use lock_cpu_hotplug() and unlock_cpu_hotplug() in all places. - Removed export of cpucontrol semaphore and made it static. - removed explicit uses of up/down with lock_cpu_hotplug() so we can keep track of the the callers in same thread context and just keep refcounts without calling a down() that causes a deadlock. - Removed current_in_hotplug() uses - Removed PF_HOTPLUG_CPU in sched.h introduced for the current_in_hotplug() temporary workaround. Tested with insmod of cpufreq_stat.ko, and logical online/offline to make sure we dont have any hang situations. Signed-off-by: Ashok Raj Cc: Zwane Mwaikambo Cc: Shaohua Li Cc: "Siddha, Suresh B" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/cpufreq/cpufreq.c | 12 ++----- include/linux/cpu.h | 7 ++-- include/linux/sched.h | 1 - kernel/cpu.c | 83 ++++++++++++++++++++++++++++------------------- 4 files changed, 54 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1c0f62d0f938..815902c2c856 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1113,21 +1113,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, { int retval = -EINVAL; - /* - * If we are already in context of hotplug thread, we dont need to - * acquire the hotplug lock. Otherwise acquire cpucontrol to prevent - * hotplug from removing this cpu that we are working on. - */ - if (!current_in_cpu_hotplug()) - lock_cpu_hotplug(); - + lock_cpu_hotplug(); dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); - if (!current_in_cpu_hotplug()) - unlock_cpu_hotplug(); + unlock_cpu_hotplug(); return retval; } diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 43c44530ef9d..0ed1d4853c69 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -65,10 +65,9 @@ extern struct sysdev_class cpu_sysdev_class; #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ -extern struct semaphore cpucontrol; -#define lock_cpu_hotplug() down(&cpucontrol) -#define unlock_cpu_hotplug() up(&cpucontrol) -#define lock_cpu_hotplug_interruptible() down_interruptible(&cpucontrol) +extern void lock_cpu_hotplug(void); +extern void unlock_cpu_hotplug(void); +extern int lock_cpu_hotplug_interruptible(void); #define hotcpu_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 2038bd27b041..b0ad6f30679e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -908,7 +908,6 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ #define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ #define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ -#define PF_HOTPLUG_CPU 0x01000000 /* Currently performing CPU hotplug */ /* * Only the _current_ task can read/write to tsk->flags, but other diff --git a/kernel/cpu.c b/kernel/cpu.c index d61ba88f34e5..e882c6babf41 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -16,47 +16,76 @@ #include /* This protects CPUs going up and down... */ -DECLARE_MUTEX(cpucontrol); -EXPORT_SYMBOL_GPL(cpucontrol); +static DECLARE_MUTEX(cpucontrol); static struct notifier_block *cpu_chain; -/* - * Used to check by callers if they need to acquire the cpucontrol - * or not to protect a cpu from being removed. Its sometimes required to - * call these functions both for normal operations, and in response to - * a cpu being added/removed. If the context of the call is in the same - * thread context as a CPU hotplug thread, we dont need to take the lock - * since its already protected - * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj - */ +#ifdef CONFIG_HOTPLUG_CPU +static struct task_struct *lock_cpu_hotplug_owner; +static int lock_cpu_hotplug_depth; -int current_in_cpu_hotplug(void) +static int __lock_cpu_hotplug(int interruptible) { - return (current->flags & PF_HOTPLUG_CPU); + int ret = 0; + + if (lock_cpu_hotplug_owner != current) { + if (interruptible) + ret = down_interruptible(&cpucontrol); + else + down(&cpucontrol); + } + + /* + * Set only if we succeed in locking + */ + if (!ret) { + lock_cpu_hotplug_depth++; + lock_cpu_hotplug_owner = current; + } + + return ret; } -EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); +void lock_cpu_hotplug(void) +{ + __lock_cpu_hotplug(0); +} +EXPORT_SYMBOL_GPL(lock_cpu_hotplug); +void unlock_cpu_hotplug(void) +{ + if (--lock_cpu_hotplug_depth == 0) { + lock_cpu_hotplug_owner = NULL; + up(&cpucontrol); + } +} +EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); + +int lock_cpu_hotplug_interruptible(void) +{ + return __lock_cpu_hotplug(1); +} +EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); +#endif /* CONFIG_HOTPLUG_CPU */ /* Need to know about CPUs going up/down? */ int register_cpu_notifier(struct notifier_block *nb) { int ret; - if ((ret = down_interruptible(&cpucontrol)) != 0) + if ((ret = lock_cpu_hotplug_interruptible()) != 0) return ret; ret = notifier_chain_register(&cpu_chain, nb); - up(&cpucontrol); + unlock_cpu_hotplug(); return ret; } EXPORT_SYMBOL(register_cpu_notifier); void unregister_cpu_notifier(struct notifier_block *nb) { - down(&cpucontrol); + lock_cpu_hotplug(); notifier_chain_unregister(&cpu_chain, nb); - up(&cpucontrol); + unlock_cpu_hotplug(); } EXPORT_SYMBOL(unregister_cpu_notifier); @@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu) goto out; } - /* - * Leave a trace in current->flags indicating we are already in - * process of performing CPU hotplug. Callers can check if cpucontrol - * is already acquired by current thread, and if so not cause - * a dead lock by not acquiring the lock - */ - current->flags |= PF_HOTPLUG_CPU; err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { @@ -171,7 +193,6 @@ out_thread: out_allowed: set_cpus_allowed(current, old_allowed); out: - current->flags &= ~PF_HOTPLUG_CPU; unlock_cpu_hotplug(); return err; } @@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) int ret; void *hcpu = (void *)(long)cpu; - if ((ret = down_interruptible(&cpucontrol)) != 0) + if ((ret = lock_cpu_hotplug_interruptible()) != 0) return ret; if (cpu_online(cpu) || !cpu_present(cpu)) { @@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu) goto out; } - /* - * Leave a trace in current->flags indicating we are already in - * process of performing CPU hotplug. - */ - current->flags |= PF_HOTPLUG_CPU; ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", @@ -217,7 +233,6 @@ out_notify: if (ret != 0) notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); out: - current->flags &= ~PF_HOTPLUG_CPU; - up(&cpucontrol); + unlock_cpu_hotplug(); return ret; } -- cgit v1.2.3 From ff88a3b2f56ae4f3296ea957ea38f99f8bd0e5a8 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 28 Nov 2005 13:43:47 -0800 Subject: [PATCH] memory_sysdev_class is static So don't define it as extern in the header file. drivers/base/memory.c:28: error: static declaration of 'memory_sysdev_class' follows non-static declaration include/linux/memory.h:88: error: previous declaration of 'memory_sysdev_class' was here Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/memory.h b/include/linux/memory.h index 9a424383e6c6..dc4081b6f161 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -85,7 +85,6 @@ struct notifier_block; extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -extern struct sysdev_class memory_sysdev_class; #endif /* CONFIG_MEMORY_HOTPLUG */ #define hotplug_memory_notifier(fn, pri) { \ -- cgit v1.2.3 From f7b7fd8f3ebbb2810d6893295aa984acd0fd30db Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 28 Nov 2005 13:44:07 -0800 Subject: [PATCH] temporarily disable swap token on memory pressure Some users (hi Zwane) have seen a problem when running a workload that eats nearly all of physical memory - th system does an OOM kill, even when there is still a lot of swap free. The problem appears to be a very big task that is holding the swap token, and the VM has a very hard time finding any other page in the system that is swappable. Instead of ignoring the swap token when sc->priority reaches 0, we could simply take the swap token away from the memory hog and make sure we don't give it back to the memory hog for a few seconds. This patch resolves the problem Zwane ran into. Signed-off-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 4 ++-- include/linux/swap.h | 6 ++++++ mm/rmap.c | 26 ++++++++++---------------- mm/thrash.c | 10 +++++++--- mm/vmscan.c | 11 +++++++++-- 5 files changed, 34 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 35b30e6c8cf8..33261f1d2239 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -89,7 +89,7 @@ static inline void page_dup_rmap(struct page *page) /* * Called from mm/vmscan.c to handle paging out */ -int page_referenced(struct page *, int is_locked, int ignore_token); +int page_referenced(struct page *, int is_locked); int try_to_unmap(struct page *); /* @@ -109,7 +109,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0) -#define page_referenced(page,l,i) TestClearPageReferenced(page) +#define page_referenced(page,l) TestClearPageReferenced(page) #define try_to_unmap(page) SWAP_FAIL #endif /* CONFIG_MMU */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 20c975642cab..508668f840b6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -239,6 +239,11 @@ static inline void put_swap_token(struct mm_struct *mm) __put_swap_token(mm); } +static inline void disable_swap_token(void) +{ + put_swap_token(swap_token_mm); +} + #else /* CONFIG_SWAP */ #define total_swap_pages 0 @@ -283,6 +288,7 @@ static inline swp_entry_t get_swap_page(void) #define put_swap_token(x) do { } while(0) #define grab_swap_token() do { } while(0) #define has_swap_token(x) 0 +#define disable_swap_token() do { } while(0) #endif /* CONFIG_SWAP */ #endif /* __KERNEL__*/ diff --git a/mm/rmap.c b/mm/rmap.c index 6389cda02a20..491ac350048f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -290,7 +290,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, * repeatedly from either page_referenced_anon or page_referenced_file. */ static int page_referenced_one(struct page *page, - struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token) + struct vm_area_struct *vma, unsigned int *mapcount) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -311,7 +311,7 @@ static int page_referenced_one(struct page *page, /* Pretend the page is referenced if the task has the swap token and is in the middle of a page fault. */ - if (mm != current->mm && !ignore_token && has_swap_token(mm) && + if (mm != current->mm && has_swap_token(mm) && rwsem_is_locked(&mm->mmap_sem)) referenced++; @@ -321,7 +321,7 @@ out: return referenced; } -static int page_referenced_anon(struct page *page, int ignore_token) +static int page_referenced_anon(struct page *page) { unsigned int mapcount; struct anon_vma *anon_vma; @@ -334,8 +334,7 @@ static int page_referenced_anon(struct page *page, int ignore_token) mapcount = page_mapcount(page); list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - referenced += page_referenced_one(page, vma, &mapcount, - ignore_token); + referenced += page_referenced_one(page, vma, &mapcount); if (!mapcount) break; } @@ -354,7 +353,7 @@ static int page_referenced_anon(struct page *page, int ignore_token) * * This function is only called from page_referenced for object-based pages. */ -static int page_referenced_file(struct page *page, int ignore_token) +static int page_referenced_file(struct page *page) { unsigned int mapcount; struct address_space *mapping = page->mapping; @@ -392,8 +391,7 @@ static int page_referenced_file(struct page *page, int ignore_token) referenced++; break; } - referenced += page_referenced_one(page, vma, &mapcount, - ignore_token); + referenced += page_referenced_one(page, vma, &mapcount); if (!mapcount) break; } @@ -410,13 +408,10 @@ static int page_referenced_file(struct page *page, int ignore_token) * Quick test_and_clear_referenced for all mappings to a page, * returns the number of ptes which referenced the page. */ -int page_referenced(struct page *page, int is_locked, int ignore_token) +int page_referenced(struct page *page, int is_locked) { int referenced = 0; - if (!swap_token_default_timeout) - ignore_token = 1; - if (page_test_and_clear_young(page)) referenced++; @@ -425,15 +420,14 @@ int page_referenced(struct page *page, int is_locked, int ignore_token) if (page_mapped(page) && page->mapping) { if (PageAnon(page)) - referenced += page_referenced_anon(page, ignore_token); + referenced += page_referenced_anon(page); else if (is_locked) - referenced += page_referenced_file(page, ignore_token); + referenced += page_referenced_file(page); else if (TestSetPageLocked(page)) referenced++; else { if (page->mapping) - referenced += page_referenced_file(page, - ignore_token); + referenced += page_referenced_file(page); unlock_page(page); } } diff --git a/mm/thrash.c b/mm/thrash.c index eff3c18c33a1..f4c560b4a2b7 100644 --- a/mm/thrash.c +++ b/mm/thrash.c @@ -57,14 +57,17 @@ void grab_swap_token(void) /* We have the token. Let others know we still need it. */ if (has_swap_token(current->mm)) { current->mm->recent_pagein = 1; + if (unlikely(!swap_token_default_timeout)) + disable_swap_token(); return; } if (time_after(jiffies, swap_token_check)) { - /* Can't get swapout protection if we exceed our RSS limit. */ - // if (current->mm->rss > current->mm->rlimit_rss) - // return; + if (!swap_token_default_timeout) { + swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; + return; + } /* ... or if we recently held the token. */ if (time_before(jiffies, current->mm->swap_token_time)) @@ -95,6 +98,7 @@ void __put_swap_token(struct mm_struct *mm) { spin_lock(&swap_token_lock); if (likely(mm == swap_token_mm)) { + mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL; swap_token_mm = &init_mm; swap_token_check = jiffies; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 28130541270f..078cf920208a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -407,7 +407,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) if (PageWriteback(page)) goto keep_locked; - referenced = page_referenced(page, 1, sc->priority <= 0); + referenced = page_referenced(page, 1); /* In active use or really unfreeable? Activate it. */ if (referenced && page_mapping_inuse(page)) goto activate_locked; @@ -756,7 +756,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) if (page_mapped(page)) { if (!reclaim_mapped || (total_swap_pages == 0 && PageAnon(page)) || - page_referenced(page, 0, sc->priority <= 0)) { + page_referenced(page, 0)) { list_add(&page->lru, &l_active); continue; } @@ -960,6 +960,8 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) sc.nr_reclaimed = 0; sc.priority = priority; sc.swap_cluster_max = SWAP_CLUSTER_MAX; + if (!priority) + disable_swap_token(); shrink_caches(zones, &sc); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { @@ -1056,6 +1058,10 @@ loop_again: int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long lru_pages = 0; + /* The swap token gets in the way of swapout... */ + if (!priority) + disable_swap_token(); + all_zones_ok = 1; if (nr_pages == 0) { @@ -1360,6 +1366,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) sc.nr_reclaimed = 0; /* scan at the highest priority */ sc.priority = 0; + disable_swap_token(); if (nr_pages > SWAP_CLUSTER_MAX) sc.swap_cluster_max = nr_pages; -- cgit v1.2.3 From 987d24018dc83d27e491674c50ff2272f51eb719 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Tue, 15 Nov 2005 23:28:20 +0000 Subject: [MTD] CFI: Use 16-bit access to autoselect/read device id data Recent models of Intel/Sharp and Spansion CFI flash now have significant bits in the upper byte of device ID codes, read via what Spansion calls "autoselect" and Intel calls "read device identifier". Currently these values are truncated to the low 8 bits in the mtd data structures, as all CFI read query info has previously been read one byte at a time. Add a new method for reading 16-bit info, currently just manufacturer and device codes; datasheets hint at future uses for upper bytes in other fields. Signed-off-by: Todd Poynor Signed-off-by: Thomas Gleixner --- drivers/mtd/chips/cfi_probe.c | 6 +++--- include/linux/mtd/cfi.h | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index 90eb30e06b7c..d65ccdebbda4 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c @@ -1,7 +1,7 @@ /* Common Flash Interface probe code. (C) 2000 Red Hat. GPL'd. - $Id: cfi_probe.c,v 1.84 2005/11/07 11:14:23 gleixner Exp $ + $Id: cfi_probe.c,v 1.85 2005/11/15 23:28:17 tpoynor Exp $ */ #include @@ -230,8 +230,8 @@ static int __xipram cfi_chip_setup(struct map_info *map, cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); - cfi->mfr = cfi_read_query(map, base); - cfi->id = cfi_read_query(map, base + ofs_factor); + cfi->mfr = cfi_read_query16(map, base); + cfi->id = cfi_read_query16(map, base + ofs_factor); /* Put it back into Read Mode */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 3c9ea4b7adda..23a568910341 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -1,7 +1,7 @@ /* Common Flash Interface structures * See http://support.intel.com/design/flash/technote/index.htm - * $Id: cfi.h,v 1.56 2005/11/07 11:14:54 gleixner Exp $ + * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $ */ #ifndef __MTD_CFI_H__ @@ -426,6 +426,22 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) } } +static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) +{ + map_word val = map_read(map, addr); + + if (map_bankwidth_is_1(map)) { + return val.x[0] & 0xff; + } else if (map_bankwidth_is_2(map)) { + return cfi16_to_cpu(val.x[0]); + } else { + /* No point in a 64-bit byteswap since that would just be + swapping the responses from different chips, and we are + only interested in one chip (a representative sample) */ + return cfi32_to_cpu(val.x[0]); + } +} + static inline void cfi_udelay(int us) { if (us >= 1000) { -- cgit v1.2.3 From 238f58d898df941aa9d1cb390fb27ff4febe8965 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 29 Nov 2005 13:01:56 -0800 Subject: Support strange discontiguous PFN remappings These get created by some drivers that don't generally even want a pfn remapping at all, but would really mostly prefer to just map pages they've allocated individually instead. For now, create a helper function that turns such an incomplete PFN remapping call into a loop that does that explicit mapping. In the long run we almost certainly want to export a totally different interface for that, though. Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/memory.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 6a75a7a78bf1..74f90d7eb5ef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -163,6 +163,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ +#define VM_INCOMPLETE 0x02000000 /* Strange partial PFN mapping marker */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS diff --git a/mm/memory.c b/mm/memory.c index 74839b3a3999..990e7dc666f8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1146,6 +1146,95 @@ int zeromap_page_range(struct vm_area_struct *vma, return err; } +/* + * This is the old fallback for page remapping. + * + * For historical reasons, it only allows reserved pages. Only + * old drivers should use this, and they needed to mark their + * pages reserved for the old functions anyway. + */ +static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot) +{ + int retval; + pgd_t * pgd; + pud_t * pud; + pmd_t * pmd; + pte_t * pte; + spinlock_t *ptl; + + retval = -EINVAL; + if (PageAnon(page) || !PageReserved(page)) + goto out; + retval = -ENOMEM; + flush_dcache_page(page); + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (!pud) + goto out; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + goto out; + pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (!pte) + goto out; + retval = -EBUSY; + if (!pte_none(*pte)) + goto out_unlock; + + /* Ok, finally just insert the thing.. */ + get_page(page); + inc_mm_counter(mm, file_rss); + page_add_file_rmap(page); + set_pte_at(mm, addr, pte, mk_pte(page, prot)); + + retval = 0; +out_unlock: + pte_unmap_unlock(pte, ptl); +out: + return retval; +} + +/* + * Somebody does a pfn remapping that doesn't actually work as a vma. + * + * Do it as individual pages instead, and warn about it. It's bad form, + * and very inefficient. + */ +static int incomplete_pfn_remap(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long pfn, pgprot_t prot) +{ + static int warn = 10; + struct page *page; + int retval; + + if (!(vma->vm_flags & VM_INCOMPLETE)) { + if (warn) { + warn--; + printk("%s does an incomplete pfn remapping", current->comm); + dump_stack(); + } + } + vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED; + + if (start < vma->vm_start || end > vma->vm_end) + return -EINVAL; + + if (!pfn_valid(pfn)) + return -EINVAL; + + retval = 0; + page = pfn_to_page(pfn); + while (start < end) { + retval = insert_page(vma->vm_mm, start, page, prot); + if (retval < 0) + break; + start += PAGE_SIZE; + page++; + } + return retval; +} + /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results @@ -1220,6 +1309,9 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, struct mm_struct *mm = vma->vm_mm; int err; + if (addr != vma->vm_start || end != vma->vm_end) + return incomplete_pfn_remap(vma, addr, end, pfn, prot); + /* * Physically remapped pages are special. Tell the * rest of the world about it: -- cgit v1.2.3 From c9cfcddfd65735437a4cb8563d6b66a6da8a5ed6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 29 Nov 2005 14:03:14 -0800 Subject: VM: add common helper function to create the page tables This logic was duplicated four times, for no good reason. Signed-off-by: Linus Torvalds --- fs/exec.c | 12 +----------- include/linux/mm.h | 2 ++ mm/fremap.c | 24 ++---------------------- mm/memory.c | 26 ++++++++++++++------------ 4 files changed, 19 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/fs/exec.c b/fs/exec.c index 1f8a9fd2c9ed..22533cce0611 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -306,9 +306,6 @@ void install_arg_page(struct vm_area_struct *vma, struct page *page, unsigned long address) { struct mm_struct *mm = vma->vm_mm; - pgd_t * pgd; - pud_t * pud; - pmd_t * pmd; pte_t * pte; spinlock_t *ptl; @@ -316,14 +313,7 @@ void install_arg_page(struct vm_area_struct *vma, goto out; flush_dcache_page(page); - pgd = pgd_offset(mm, address); - pud = pud_alloc(mm, pgd, address); - if (!pud) - goto out; - pmd = pmd_alloc(mm, pud, address); - if (!pmd) - goto out; - pte = pte_alloc_map_lock(mm, pmd, address, &ptl); + pte = get_locked_pte(mm, address, &ptl); if (!pte) goto out; if (!pte_none(*pte)) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 74f90d7eb5ef..0e73f1539d08 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -742,6 +742,8 @@ struct shrinker; extern struct shrinker *set_shrinker(int, shrinker_t); extern void remove_shrinker(struct shrinker *shrinker); +extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); + int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); diff --git a/mm/fremap.c b/mm/fremap.c index f851775e09c2..9f381e58bf44 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -55,20 +55,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, pgoff_t size; int err = -ENOMEM; pte_t *pte; - pmd_t *pmd; - pud_t *pud; - pgd_t *pgd; pte_t pte_val; spinlock_t *ptl; - pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); - if (!pud) - goto out; - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - goto out; - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; @@ -110,20 +100,10 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, { int err = -ENOMEM; pte_t *pte; - pmd_t *pmd; - pud_t *pud; - pgd_t *pgd; pte_t pte_val; spinlock_t *ptl; - pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); - if (!pud) - goto out; - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - goto out; - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; diff --git a/mm/memory.c b/mm/memory.c index 990e7dc666f8..74f95ae0510b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1146,6 +1146,18 @@ int zeromap_page_range(struct vm_area_struct *vma, return err; } +pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) +{ + pgd_t * pgd = pgd_offset(mm, addr); + pud_t * pud = pud_alloc(mm, pgd, addr); + if (pud) { + pmd_t * pmd = pmd_alloc(mm, pgd, addr); + if (pmd) + return pte_alloc_map_lock(mm, pmd, addr, ptl); + } + return NULL; +} + /* * This is the old fallback for page remapping. * @@ -1156,10 +1168,7 @@ int zeromap_page_range(struct vm_area_struct *vma, static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot) { int retval; - pgd_t * pgd; - pud_t * pud; - pmd_t * pmd; - pte_t * pte; + pte_t *pte; spinlock_t *ptl; retval = -EINVAL; @@ -1167,14 +1176,7 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa goto out; retval = -ENOMEM; flush_dcache_page(page); - pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); - if (!pud) - goto out; - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - goto out; - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; -- cgit v1.2.3 From c219750b2e667f4f79f4d8faca5057dad793db87 Mon Sep 17 00:00:00 2001 From: Mitchell Blank Jr Date: Tue, 29 Nov 2005 16:13:55 -0800 Subject: [ATM]: atm_pcr_goal() doesn't modify its argument's contents -- mark it as const Signed-off-by: Mitchell Blank Jr Signed-off-by: Chas Williams Signed-off-by: David S. Miller --- include/linux/atmdev.h | 2 +- net/atm/atm_misc.c | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index e7d0593bb576..37e5ee485399 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -468,7 +468,7 @@ static inline void atm_dev_put(struct atm_dev *dev) int atm_charge(struct atm_vcc *vcc,int truesize); struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, gfp_t gfp_flags); -int atm_pcr_goal(struct atm_trafprm *tp); +int atm_pcr_goal(const struct atm_trafprm *tp); void vcc_release_async(struct atm_vcc *vcc, int reply); diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c index 223c7ad5bd0f..02cc7e71efea 100644 --- a/net/atm/atm_misc.c +++ b/net/atm/atm_misc.c @@ -74,11 +74,14 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, */ -int atm_pcr_goal(struct atm_trafprm *tp) +int atm_pcr_goal(const struct atm_trafprm *tp) { - if (tp->pcr && tp->pcr != ATM_MAX_PCR) return -tp->pcr; - if (tp->min_pcr && !tp->pcr) return tp->min_pcr; - if (tp->max_pcr != ATM_MAX_PCR) return -tp->max_pcr; + if (tp->pcr && tp->pcr != ATM_MAX_PCR) + return -tp->pcr; + if (tp->min_pcr && !tp->pcr) + return tp->min_pcr; + if (tp->max_pcr != ATM_MAX_PCR) + return -tp->max_pcr; return 0; } -- cgit v1.2.3 From 5045b6d34c6a9efa4a8a1815265ca9fcf44d6a7c Mon Sep 17 00:00:00 2001 From: Chas Williams Date: Tue, 29 Nov 2005 16:15:38 -0800 Subject: [ATM]: linux/config.h only needed for #ifdef __KERNEL__ section Signed-off-by: Chas Williams Signed-off-by: David S. Miller --- include/linux/atmdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 37e5ee485399..8fadb073c834 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -7,7 +7,6 @@ #define LINUX_ATMDEV_H -#include #include #include #include @@ -210,6 +209,7 @@ struct atm_cirange { #ifdef __KERNEL__ +#include #include /* wait_queue_head_t */ #include /* struct timeval */ #include -- cgit v1.2.3 From 64bf69ddff7637b7ed7acf9b2a823cc0ee519439 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 29 Nov 2005 16:16:41 -0800 Subject: [ATM]: deregistration removes device from atm_devs list immediately atm_dev_deregister() removes device from atm_dev list immediately to prevent operations on a phantom device. Decision to free device based only on ->refcnt now. Remove shutdown_atm_dev() use atm_dev_deregister() instead. atm_dev_deregister() also asynchronously releases all vccs related to device. Signed-off-by: Stanislaw Gruszka Signed-off-by: Chas Williams Signed-off-by: David S. Miller --- drivers/atm/atmtcp.c | 20 ++------------------ drivers/usb/atm/usbatm.c | 4 ++-- include/linux/atmdev.h | 14 +++++++------- net/atm/common.c | 30 +++++++++++++++++++++++++++--- net/atm/common.h | 2 ++ net/atm/resources.c | 39 ++++++++++++--------------------------- 6 files changed, 52 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index 57f1810fdccd..fc518d85543d 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -246,10 +246,6 @@ static void atmtcp_c_close(struct atm_vcc *vcc) { struct atm_dev *atmtcp_dev; struct atmtcp_dev_data *dev_data; - struct sock *s; - struct hlist_node *node; - struct atm_vcc *walk; - int i; atmtcp_dev = (struct atm_dev *) vcc->dev_data; dev_data = PRIV(atmtcp_dev); @@ -257,20 +253,8 @@ static void atmtcp_c_close(struct atm_vcc *vcc) if (dev_data->persist) return; atmtcp_dev->dev_data = NULL; kfree(dev_data); - shutdown_atm_dev(atmtcp_dev); + atm_dev_deregister(atmtcp_dev); vcc->dev_data = NULL; - read_lock(&vcc_sklist_lock); - for(i = 0; i < VCC_HTABLE_SIZE; ++i) { - struct hlist_head *head = &vcc_hash[i]; - - sk_for_each(s, node, head) { - walk = atm_sk(s); - if (walk->dev != atmtcp_dev) - continue; - wake_up(s->sk_sleep); - } - } - read_unlock(&vcc_sklist_lock); module_put(THIS_MODULE); } @@ -450,7 +434,7 @@ static int atmtcp_remove_persistent(int itf) if (PRIV(dev)->vcc) return 0; kfree(dev_data); atm_dev_put(dev); - shutdown_atm_dev(dev); + atm_dev_deregister(dev); return 0; } diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index c466739428b2..2e6593e6c1bd 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -879,7 +879,7 @@ static int usbatm_atm_init(struct usbatm_data *instance) fail: instance->atm_dev = NULL; - shutdown_atm_dev(atm_dev); /* usbatm_atm_dev_close will eventually be called */ + atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */ return ret; } @@ -1164,7 +1164,7 @@ void usbatm_usb_disconnect(struct usb_interface *intf) /* ATM finalize */ if (instance->atm_dev) - shutdown_atm_dev(instance->atm_dev); + atm_dev_deregister(instance->atm_dev); usbatm_put_instance(instance); /* taken in usbatm_usb_probe */ } diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 8fadb073c834..b203ea82a0a8 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -274,7 +274,7 @@ enum { enum { - ATM_DF_CLOSE, /* close device when last VCC is closed */ + ATM_DF_REMOVED, /* device was removed from atm_devs list */ }; @@ -415,7 +415,6 @@ struct atm_dev *atm_dev_register(const char *type,const struct atmdev_ops *ops, int number,unsigned long *flags); /* number == -1: pick first available */ struct atm_dev *atm_dev_lookup(int number); void atm_dev_deregister(struct atm_dev *dev); -void shutdown_atm_dev(struct atm_dev *dev); void vcc_insert_socket(struct sock *sk); @@ -457,11 +456,12 @@ static inline void atm_dev_hold(struct atm_dev *dev) static inline void atm_dev_put(struct atm_dev *dev) { - atomic_dec(&dev->refcnt); - - if ((atomic_read(&dev->refcnt) == 1) && - test_bit(ATM_DF_CLOSE,&dev->flags)) - shutdown_atm_dev(dev); + if (atomic_dec_and_test(&dev->refcnt)) { + BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); + if (dev->ops->dev_close) + dev->ops->dev_close(dev); + kfree(dev); + } } diff --git a/net/atm/common.c b/net/atm/common.c index 9e016f404e14..6656b111cc05 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -221,6 +221,29 @@ void vcc_release_async(struct atm_vcc *vcc, int reply) EXPORT_SYMBOL(vcc_release_async); +void atm_dev_release_vccs(struct atm_dev *dev) +{ + int i; + + write_lock_irq(&vcc_sklist_lock); + for (i = 0; i < VCC_HTABLE_SIZE; i++) { + struct hlist_head *head = &vcc_hash[i]; + struct hlist_node *node, *tmp; + struct sock *s; + struct atm_vcc *vcc; + + sk_for_each_safe(s, node, tmp, head) { + vcc = atm_sk(s); + if (vcc->dev == dev) { + vcc_release_async(vcc, -EPIPE); + sk_del_node_init(s); + } + } + } + write_unlock_irq(&vcc_sklist_lock); +} + + static int adjust_tp(struct atm_trafprm *tp,unsigned char aal) { int max_sdu; @@ -332,12 +355,13 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, return -EINVAL; if (vci > 0 && vci < ATM_NOT_RSV_VCI && !capable(CAP_NET_BIND_SERVICE)) return -EPERM; - error = 0; + error = -ENODEV; if (!try_module_get(dev->ops->owner)) - return -ENODEV; + return error; vcc->dev = dev; write_lock_irq(&vcc_sklist_lock); - if ((error = find_ci(vcc, &vpi, &vci))) { + if (test_bit(ATM_DF_REMOVED, &dev->flags) || + (error = find_ci(vcc, &vpi, &vci))) { write_unlock_irq(&vcc_sklist_lock); goto fail_module_put; } diff --git a/net/atm/common.h b/net/atm/common.h index e49ed41c0e33..4887c317cefe 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -47,4 +47,6 @@ static inline void atm_proc_exit(void) /* SVC */ int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos); +void atm_dev_release_vccs(struct atm_dev *dev); + #endif diff --git a/net/atm/resources.c b/net/atm/resources.c index ad533b02b84f..c8c459fcb038 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -70,6 +70,7 @@ struct atm_dev *atm_dev_lookup(int number) return dev; } + struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, int number, unsigned long *flags) { @@ -123,37 +124,22 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, void atm_dev_deregister(struct atm_dev *dev) { - unsigned long warning_time; - - atm_proc_dev_deregister(dev); - + BUG_ON(test_bit(ATM_DF_REMOVED, &dev->flags)); + set_bit(ATM_DF_REMOVED, &dev->flags); + + /* + * if we remove current device from atm_devs list, new device + * with same number can appear, such we need deregister proc, + * release async all vccs and remove them from vccs list too + */ down(&atm_dev_mutex); list_del(&dev->dev_list); up(&atm_dev_mutex); - warning_time = jiffies; - while (atomic_read(&dev->refcnt) != 1) { - msleep(250); - if ((jiffies - warning_time) > 10 * HZ) { - printk(KERN_EMERG "atm_dev_deregister: waiting for " - "dev %d to become free. Usage count = %d\n", - dev->number, atomic_read(&dev->refcnt)); - warning_time = jiffies; - } - } - - kfree(dev); -} + atm_dev_release_vccs(dev); + atm_proc_dev_deregister(dev); -void shutdown_atm_dev(struct atm_dev *dev) -{ - if (atomic_read(&dev->refcnt) > 1) { - set_bit(ATM_DF_CLOSE, &dev->flags); - return; - } - if (dev->ops->dev_close) - dev->ops->dev_close(dev); - atm_dev_deregister(dev); + atm_dev_put(dev); } @@ -433,4 +419,3 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) EXPORT_SYMBOL(atm_dev_register); EXPORT_SYMBOL(atm_dev_deregister); EXPORT_SYMBOL(atm_dev_lookup); -EXPORT_SYMBOL(shutdown_atm_dev); -- cgit v1.2.3 From df69a60dc6afc2936d79054d30b481c1fd9720e5 Mon Sep 17 00:00:00 2001 From: Matt Helsley Date: Tue, 29 Nov 2005 19:34:31 -0800 Subject: [PATCH] process events connector: uid_t gid_t size issues The uid_t and gid_t fields appear to present a 32/64-bit userspace/kernel problem for some archs. This patch addresses the problem by fixing the size to the largest size for uid_t/gid_t used in the kernel. This preserves the total size of the event structure while ensuring that the layouts of the ID change event match in 32 and 64-bit kernels and applications. Signed-off-by: Matt Helsley Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cn_proc.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h index 70ab56317380..c948f678e04e 100644 --- a/include/linux/cn_proc.h +++ b/include/linux/cn_proc.h @@ -86,12 +86,12 @@ struct proc_event { pid_t process_pid; pid_t process_tgid; union { - uid_t ruid; /* current->uid */ - gid_t rgid; /* current->gid */ + __u32 ruid; /* task uid */ + __u32 rgid; /* task gid */ } r; union { - uid_t euid; - gid_t egid; + __u32 euid; + __u32 egid; } e; } id; -- cgit v1.2.3